diff options
Diffstat (limited to 'drivers/bluetooth/btintel_pcie.c')
-rw-r--r-- | drivers/bluetooth/btintel_pcie.c | 582 |
1 files changed, 567 insertions, 15 deletions
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c index 091ffe3e1495..c1e69fcc9c4f 100644 --- a/drivers/bluetooth/btintel_pcie.c +++ b/drivers/bluetooth/btintel_pcie.c @@ -36,6 +36,7 @@ /* Intel Bluetooth PCIe device id table */ static const struct pci_device_id btintel_pcie_table[] = { { BTINTEL_PCI_DEVICE(0xA876, PCI_ANY_ID) }, + { BTINTEL_PCI_DEVICE(0xE476, PCI_ANY_ID) }, { 0 } }; MODULE_DEVICE_TABLE(pci, btintel_pcie_table); @@ -48,6 +49,19 @@ MODULE_DEVICE_TABLE(pci, btintel_pcie_table); #define BTINTEL_PCIE_HCI_EVT_PKT 0x00000004 #define BTINTEL_PCIE_HCI_ISO_PKT 0x00000005 +#define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5 + +#define BTINTEL_PCIE_BLZR_HWEXP_SIZE 1024 +#define BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR 0xB00A7C00 + +#define BTINTEL_PCIE_SCP_HWEXP_SIZE 4096 +#define BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR 0xB030F800 + +#define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5 + +#define BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER 0x17A2 +#define BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT 0x1E61 + /* Alive interrupt context */ enum { BTINTEL_PCIE_ROM, @@ -59,6 +73,83 @@ enum { BTINTEL_PCIE_D3 }; +/* Structure for dbgc fragment buffer + * @buf_addr_lsb: LSB of the buffer's physical address + * @buf_addr_msb: MSB of the buffer's physical address + * @buf_size: Total size of the buffer + */ +struct btintel_pcie_dbgc_ctxt_buf { + u32 buf_addr_lsb; + u32 buf_addr_msb; + u32 buf_size; +}; + +/* Structure for dbgc fragment + * @magic_num: 0XA5A5A5A5 + * @ver: For Driver-FW compatibility + * @total_size: Total size of the payload debug info + * @num_buf: Num of allocated debug bufs + * @bufs: All buffer's addresses and sizes + */ +struct btintel_pcie_dbgc_ctxt { + u32 magic_num; + u32 ver; + u32 total_size; + u32 num_buf; + struct btintel_pcie_dbgc_ctxt_buf bufs[BTINTEL_PCIE_DBGC_BUFFER_COUNT]; +}; + +/* This function initializes the memory for DBGC buffers and formats the + * DBGC fragment which consists header info and DBGC buffer's LSB, MSB and + * size as the payload + */ +static int btintel_pcie_setup_dbgc(struct btintel_pcie_data *data) +{ + struct btintel_pcie_dbgc_ctxt db_frag; + struct data_buf *buf; + int i; + + data->dbgc.count = BTINTEL_PCIE_DBGC_BUFFER_COUNT; + data->dbgc.bufs = devm_kcalloc(&data->pdev->dev, data->dbgc.count, + sizeof(*buf), GFP_KERNEL); + if (!data->dbgc.bufs) + return -ENOMEM; + + data->dbgc.buf_v_addr = dmam_alloc_coherent(&data->pdev->dev, + data->dbgc.count * + BTINTEL_PCIE_DBGC_BUFFER_SIZE, + &data->dbgc.buf_p_addr, + GFP_KERNEL | __GFP_NOWARN); + if (!data->dbgc.buf_v_addr) + return -ENOMEM; + + data->dbgc.frag_v_addr = dmam_alloc_coherent(&data->pdev->dev, + sizeof(struct btintel_pcie_dbgc_ctxt), + &data->dbgc.frag_p_addr, + GFP_KERNEL | __GFP_NOWARN); + if (!data->dbgc.frag_v_addr) + return -ENOMEM; + + data->dbgc.frag_size = sizeof(struct btintel_pcie_dbgc_ctxt); + + db_frag.magic_num = BTINTEL_PCIE_MAGIC_NUM; + db_frag.ver = BTINTEL_PCIE_DBGC_FRAG_VERSION; + db_frag.total_size = BTINTEL_PCIE_DBGC_FRAG_PAYLOAD_SIZE; + db_frag.num_buf = BTINTEL_PCIE_DBGC_FRAG_BUFFER_COUNT; + + for (i = 0; i < data->dbgc.count; i++) { + buf = &data->dbgc.bufs[i]; + buf->data_p_addr = data->dbgc.buf_p_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE; + buf->data = data->dbgc.buf_v_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE; + db_frag.bufs[i].buf_addr_lsb = lower_32_bits(buf->data_p_addr); + db_frag.bufs[i].buf_addr_msb = upper_32_bits(buf->data_p_addr); + db_frag.bufs[i].buf_size = BTINTEL_PCIE_DBGC_BUFFER_SIZE; + } + + memcpy(data->dbgc.frag_v_addr, &db_frag, sizeof(db_frag)); + return 0; +} + static inline void ipc_print_ia_ring(struct hci_dev *hdev, struct ia *ia, u16 queue_num) { @@ -273,6 +364,271 @@ static int btintel_pcie_reset_bt(struct btintel_pcie_data *data) return reg == 0 ? 0 : -ENODEV; } +static void btintel_pcie_mac_init(struct btintel_pcie_data *data) +{ + u32 reg; + + /* Set MAC_INIT bit to start primary bootloader */ + reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG); + reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT | + BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON | + BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET); + reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA | + BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT); + btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg); +} + +static int btintel_pcie_add_dmp_data(struct hci_dev *hdev, const void *data, int size) +{ + struct sk_buff *skb; + int err; + + skb = alloc_skb(size, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + skb_put_data(skb, data, size); + err = hci_devcd_append(hdev, skb); + if (err) { + bt_dev_err(hdev, "Failed to append data in the coredump"); + return err; + } + + return 0; +} + +static int btintel_pcie_get_mac_access(struct btintel_pcie_data *data) +{ + u32 reg; + int retry = 15; + + reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG); + + reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS; + reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ; + if ((reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS) == 0) + reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ; + + btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg); + + do { + reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG); + if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS) + return 0; + /* Need delay here for Target Access harwdware to settle down*/ + usleep_range(1000, 1200); + + } while (--retry > 0); + + return -ETIME; +} + +static void btintel_pcie_release_mac_access(struct btintel_pcie_data *data) +{ + u32 reg; + + reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG); + + if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ) + reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ; + + if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS) + reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS; + + if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ) + reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ; + + btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg); +} + +static void btintel_pcie_copy_tlv(struct sk_buff *skb, enum btintel_pcie_tlv_type type, + void *data, int size) +{ + struct intel_tlv *tlv; + + tlv = skb_put(skb, sizeof(*tlv) + size); + tlv->type = type; + tlv->len = size; + memcpy(tlv->val, data, tlv->len); +} + +static int btintel_pcie_read_dram_buffers(struct btintel_pcie_data *data) +{ + u32 offset, prev_size, wr_ptr_status, dump_size, i; + struct btintel_pcie_dbgc *dbgc = &data->dbgc; + u8 buf_idx, dump_time_len, fw_build; + struct hci_dev *hdev = data->hdev; + struct intel_tlv *tlv; + struct timespec64 now; + struct sk_buff *skb; + struct tm tm_now; + char buf[256]; + u16 hdr_len; + int ret; + + wr_ptr_status = btintel_pcie_rd_dev_mem(data, BTINTEL_PCIE_DBGC_CUR_DBGBUFF_STATUS); + offset = wr_ptr_status & BTINTEL_PCIE_DBG_OFFSET_BIT_MASK; + + buf_idx = BTINTEL_PCIE_DBGC_DBG_BUF_IDX(wr_ptr_status); + if (buf_idx > dbgc->count) { + bt_dev_warn(hdev, "Buffer index is invalid"); + return -EINVAL; + } + + prev_size = buf_idx * BTINTEL_PCIE_DBGC_BUFFER_SIZE; + if (prev_size + offset >= prev_size) + data->dmp_hdr.write_ptr = prev_size + offset; + else + return -EINVAL; + + ktime_get_real_ts64(&now); + time64_to_tm(now.tv_sec, 0, &tm_now); + dump_time_len = snprintf(buf, sizeof(buf), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d", + tm_now.tm_mday, tm_now.tm_mon + 1, tm_now.tm_year + 1900, + tm_now.tm_hour, tm_now.tm_min, tm_now.tm_sec); + + fw_build = snprintf(buf + dump_time_len, sizeof(buf) - dump_time_len, + "Firmware Timestamp: Year %u WW %02u buildtype %u build %u", + 2000 + (data->dmp_hdr.fw_timestamp >> 8), + data->dmp_hdr.fw_timestamp & 0xff, data->dmp_hdr.fw_build_type, + data->dmp_hdr.fw_build_num); + + hdr_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) + + sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) + + sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) + + sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) + + sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) + + sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) + + sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) + + sizeof(*tlv) + dump_time_len + + sizeof(*tlv) + fw_build; + + dump_size = hdr_len + sizeof(hdr_len); + + skb = alloc_skb(dump_size, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + /* Add debug buffers data length to dump size */ + dump_size += BTINTEL_PCIE_DBGC_BUFFER_SIZE * dbgc->count; + + ret = hci_devcd_init(hdev, dump_size); + if (ret) { + bt_dev_err(hdev, "Failed to init devcoredump, err %d", ret); + kfree_skb(skb); + return ret; + } + + skb_put_data(skb, &hdr_len, sizeof(hdr_len)); + + btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt, + sizeof(data->dmp_hdr.cnvi_bt)); + + btintel_pcie_copy_tlv(skb, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr, + sizeof(data->dmp_hdr.write_ptr)); + + data->dmp_hdr.wrap_ctr = btintel_pcie_rd_dev_mem(data, + BTINTEL_PCIE_DBGC_DBGBUFF_WRAP_ARND); + + btintel_pcie_copy_tlv(skb, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr, + sizeof(data->dmp_hdr.wrap_ctr)); + + btintel_pcie_copy_tlv(skb, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason, + sizeof(data->dmp_hdr.trigger_reason)); + + btintel_pcie_copy_tlv(skb, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1, + sizeof(data->dmp_hdr.fw_git_sha1)); + + btintel_pcie_copy_tlv(skb, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top, + sizeof(data->dmp_hdr.cnvr_top)); + + btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top, + sizeof(data->dmp_hdr.cnvi_top)); + + btintel_pcie_copy_tlv(skb, BTINTEL_DUMP_TIME, buf, dump_time_len); + + btintel_pcie_copy_tlv(skb, BTINTEL_FW_BUILD, buf + dump_time_len, fw_build); + + ret = hci_devcd_append(hdev, skb); + if (ret) + goto exit_err; + + for (i = 0; i < dbgc->count; i++) { + ret = btintel_pcie_add_dmp_data(hdev, dbgc->bufs[i].data, + BTINTEL_PCIE_DBGC_BUFFER_SIZE); + if (ret) + break; + } + +exit_err: + hci_devcd_complete(hdev); + return ret; +} + +static void btintel_pcie_dump_traces(struct hci_dev *hdev) +{ + struct btintel_pcie_data *data = hci_get_drvdata(hdev); + int ret = 0; + + ret = btintel_pcie_get_mac_access(data); + if (ret) { + bt_dev_err(hdev, "Failed to get mac access: (%d)", ret); + return; + } + + ret = btintel_pcie_read_dram_buffers(data); + + btintel_pcie_release_mac_access(data); + + if (ret) + bt_dev_err(hdev, "Failed to dump traces: (%d)", ret); +} + +static void btintel_pcie_dump_hdr(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btintel_pcie_data *data = hci_get_drvdata(hdev); + u16 len = skb->len; + u16 *hdrlen_ptr; + char buf[80]; + + hdrlen_ptr = skb_put_zero(skb, sizeof(len)); + + snprintf(buf, sizeof(buf), "Controller Name: 0x%X\n", + INTEL_HW_VARIANT(data->dmp_hdr.cnvi_bt)); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Firmware Build Number: %u\n", + data->dmp_hdr.fw_build_num); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Driver: %s\n", data->dmp_hdr.driver_name); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Vendor: Intel\n"); + skb_put_data(skb, buf, strlen(buf)); + + *hdrlen_ptr = skb->len - len; +} + +static void btintel_pcie_dump_notify(struct hci_dev *hdev, int state) +{ + struct btintel_pcie_data *data = hci_get_drvdata(hdev); + + switch (state) { + case HCI_DEVCOREDUMP_IDLE: + data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE; + break; + case HCI_DEVCOREDUMP_ACTIVE: + data->dmp_hdr.state = HCI_DEVCOREDUMP_ACTIVE; + break; + case HCI_DEVCOREDUMP_TIMEOUT: + case HCI_DEVCOREDUMP_ABORT: + case HCI_DEVCOREDUMP_DONE: + data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE; + break; + } +} + /* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in * BTINTEL_PCIE_CSR_FUNC_CTRL_REG register and wait for MSI-X with * BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0. @@ -329,20 +685,6 @@ static int btintel_pcie_enable_bt(struct btintel_pcie_data *data) return 0; } -/* BIT(0) - ROM, BIT(1) - IML and BIT(3) - OP - * Sometimes during firmware image switching from ROM to IML or IML to OP image, - * the previous image bit is not cleared by firmware when alive interrupt is - * received. Driver needs to take care of these sticky bits when deciding the - * current image running on controller. - * Ex: 0x10 and 0x11 - both represents that controller is running IML - */ -static inline bool btintel_pcie_in_rom(struct btintel_pcie_data *data) -{ - return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ROM && - !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML) && - !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW); -} - static inline bool btintel_pcie_in_op(struct btintel_pcie_data *data) { return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW; @@ -393,6 +735,27 @@ static inline char *btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt) } } +static int btintel_pcie_read_device_mem(struct btintel_pcie_data *data, + void *buf, u32 dev_addr, int len) +{ + int err; + u32 *val = buf; + + /* Get device mac access */ + err = btintel_pcie_get_mac_access(data); + if (err) { + bt_dev_err(data->hdev, "Failed to get mac access %d", err); + return err; + } + + for (; len > 0; len -= 4, dev_addr += 4, val++) + *val = btintel_pcie_rd_dev_mem(data, dev_addr); + + btintel_pcie_release_mac_access(data); + + return 0; +} + /* This function handles the MSI-X interrupt for gp0 cause (bit 0 in * BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response. */ @@ -714,6 +1077,126 @@ exit_error: return ret; } +static void btintel_pcie_read_hwexp(struct btintel_pcie_data *data) +{ + int len, err, offset, pending; + struct sk_buff *skb; + u8 *buf, prefix[64]; + u32 addr, val; + u16 pkt_len; + + struct tlv { + u8 type; + __le16 len; + u8 val[]; + } __packed; + + struct tlv *tlv; + + switch (data->dmp_hdr.cnvi_top & 0xfff) { + case BTINTEL_CNVI_BLAZARI: + case BTINTEL_CNVI_BLAZARIW: + /* only from step B0 onwards */ + if (INTEL_CNVX_TOP_STEP(data->dmp_hdr.cnvi_top) != 0x01) + return; + len = BTINTEL_PCIE_BLZR_HWEXP_SIZE; /* exception data length */ + addr = BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR; + break; + case BTINTEL_CNVI_SCP: + len = BTINTEL_PCIE_SCP_HWEXP_SIZE; + addr = BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR; + break; + default: + bt_dev_err(data->hdev, "Unsupported cnvi 0x%8.8x", data->dmp_hdr.cnvi_top); + return; + } + + buf = kzalloc(len, GFP_KERNEL); + if (!buf) + goto exit_on_error; + + btintel_pcie_mac_init(data); + + err = btintel_pcie_read_device_mem(data, buf, addr, len); + if (err) + goto exit_on_error; + + val = get_unaligned_le32(buf); + if (val != BTINTEL_PCIE_MAGIC_NUM) { + bt_dev_err(data->hdev, "Invalid exception dump signature: 0x%8.8x", + val); + goto exit_on_error; + } + + snprintf(prefix, sizeof(prefix), "Bluetooth: %s: ", bt_dev_name(data->hdev)); + + offset = 4; + do { + pending = len - offset; + if (pending < sizeof(*tlv)) + break; + tlv = (struct tlv *)(buf + offset); + + /* If type == 0, then there are no more TLVs to be parsed */ + if (!tlv->type) { + bt_dev_dbg(data->hdev, "Invalid TLV type 0"); + break; + } + pkt_len = le16_to_cpu(tlv->len); + offset += sizeof(*tlv); + pending = len - offset; + if (pkt_len > pending) + break; + + offset += pkt_len; + + /* Only TLVs of type == 1 are HCI events, no need to process other + * TLVs + */ + if (tlv->type != 1) + continue; + + bt_dev_dbg(data->hdev, "TLV packet length: %u", pkt_len); + if (pkt_len > HCI_MAX_EVENT_SIZE) + break; + skb = bt_skb_alloc(pkt_len, GFP_KERNEL); + if (!skb) + goto exit_on_error; + hci_skb_pkt_type(skb) = HCI_EVENT_PKT; + skb_put_data(skb, tlv->val, pkt_len); + + /* copy Intel specific pcie packet type */ + val = BTINTEL_PCIE_HCI_EVT_PKT; + memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &val, + BTINTEL_PCIE_HCI_TYPE_LEN); + + print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_OFFSET, 16, 1, + tlv->val, pkt_len, false); + + btintel_pcie_recv_frame(data, skb); + } while (offset < len); + +exit_on_error: + kfree(buf); +} + +static void btintel_pcie_msix_hw_exp_handler(struct btintel_pcie_data *data) +{ + bt_dev_err(data->hdev, "Received hw exception interrupt"); + + if (test_and_set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags)) + return; + + if (test_and_set_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) + return; + + /* Trigger device core dump when there is HW exception */ + if (!test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) + data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT; + + queue_work(data->workqueue, &data->rx_work); +} + static void btintel_pcie_rx_work(struct work_struct *work) { struct btintel_pcie_data *data = container_of(work, @@ -722,6 +1205,23 @@ static void btintel_pcie_rx_work(struct work_struct *work) int err; struct hci_dev *hdev = data->hdev; + if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) { + /* Unlike usb products, controller will not send hardware + * exception event on exception. Instead controller writes the + * hardware event to device memory along with optional debug + * events, raises MSIX and halts. Driver shall read the + * exception event from device memory and passes it stack for + * further processing. + */ + btintel_pcie_read_hwexp(data); + clear_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags); + } + + if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) { + btintel_pcie_dump_traces(data->hdev); + clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags); + } + /* Process the sk_buf in queue and send to the HCI layer */ while ((skb = skb_dequeue(&data->rx_skb_q))) { err = btintel_pcie_recv_frame(data, skb); @@ -840,6 +1340,10 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id) return IRQ_NONE; } + /* This interrupt is raised when there is an hardware exception */ + if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP) + btintel_pcie_msix_hw_exp_handler(data); + /* This interrupt is triggered by the firmware after updating * boot_stage register and image_response register */ @@ -920,7 +1424,8 @@ struct btintel_pcie_causes_list { static struct btintel_pcie_causes_list causes_list[] = { { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x00 }, { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x01 }, - { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x20 }, + { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x20 }, + { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x23 }, }; /* This function configures the interrupt masks for both HW_INT_CAUSES and @@ -1007,6 +1512,11 @@ static void btintel_pcie_init_ci(struct btintel_pcie_data *data, ci->addr_urbdq1 = data->rxq.urbd1s_p_addr; ci->num_urbdq1 = data->rxq.count; ci->urbdq_db_vec = BTINTEL_PCIE_RXQ_NUM; + + ci->dbg_output_mode = 0x01; + ci->dbgc_addr = data->dbgc.frag_p_addr; + ci->dbgc_size = data->dbgc.frag_size; + ci->dbg_preset = 0x00; } static void btintel_pcie_free_txq_bufs(struct btintel_pcie_data *data, @@ -1219,6 +1729,11 @@ static int btintel_pcie_alloc(struct btintel_pcie_data *data) /* Setup Index Array */ btintel_pcie_setup_ia(data, p_addr, v_addr, &data->ia); + /* Setup data buffers for dbgc */ + err = btintel_pcie_setup_dbgc(data); + if (err) + goto exit_error_txq; + /* Setup Context Information */ p_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4; v_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4; @@ -1392,6 +1907,7 @@ static void btintel_pcie_release_hdev(struct btintel_pcie_data *data) static int btintel_pcie_setup_internal(struct hci_dev *hdev) { + struct btintel_pcie_data *data = hci_get_drvdata(hdev); const u8 param[1] = { 0xFF }; struct intel_version_tlv ver_tlv; struct sk_buff *skb; @@ -1449,6 +1965,7 @@ static int btintel_pcie_setup_internal(struct hci_dev *hdev) */ switch (INTEL_HW_VARIANT(ver_tlv.cnvi_bt)) { case 0x1e: /* BzrI */ + case 0x1f: /* ScP */ /* Display version information of TLV type */ btintel_version_info_tlv(hdev, &ver_tlv); @@ -1474,6 +1991,23 @@ static int btintel_pcie_setup_internal(struct hci_dev *hdev) break; } + data->dmp_hdr.cnvi_top = ver_tlv.cnvi_top; + data->dmp_hdr.cnvr_top = ver_tlv.cnvr_top; + data->dmp_hdr.fw_timestamp = ver_tlv.timestamp; + data->dmp_hdr.fw_build_type = ver_tlv.build_type; + data->dmp_hdr.fw_build_num = ver_tlv.build_num; + data->dmp_hdr.cnvi_bt = ver_tlv.cnvi_bt; + + if (ver_tlv.img_type == 0x02 || ver_tlv.img_type == 0x03) + data->dmp_hdr.fw_git_sha1 = ver_tlv.git_sha1; + + err = hci_devcd_register(hdev, btintel_pcie_dump_traces, btintel_pcie_dump_hdr, + btintel_pcie_dump_notify); + if (err) { + bt_dev_err(hdev, "Failed to register coredump (%d)", err); + goto exit_error; + } + btintel_print_fseq_info(hdev); exit_error: kfree_skb(skb); @@ -1538,6 +2072,7 @@ static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data) goto exit_error; } + data->dmp_hdr.driver_name = KBUILD_MODNAME; return 0; exit_error: @@ -1650,11 +2185,28 @@ static void btintel_pcie_remove(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); } +#ifdef CONFIG_DEV_COREDUMP +static void btintel_pcie_coredump(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct btintel_pcie_data *data = pci_get_drvdata(pdev); + + if (test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) + return; + + data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER; + queue_work(data->workqueue, &data->rx_work); +} +#endif + static struct pci_driver btintel_pcie_driver = { .name = KBUILD_MODNAME, .id_table = btintel_pcie_table, .probe = btintel_pcie_probe, .remove = btintel_pcie_remove, +#ifdef CONFIG_DEV_COREDUMP + .driver.coredump = btintel_pcie_coredump +#endif }; module_pci_driver(btintel_pcie_driver); |