diff options
Diffstat (limited to 'drivers/block/mtip32xx/mtip32xx.c')
-rw-r--r-- | drivers/block/mtip32xx/mtip32xx.c | 226 |
1 files changed, 80 insertions, 146 deletions
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index a7daa8acbab3..88e8440e75c3 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -168,41 +168,6 @@ static bool mtip_check_surprise_removal(struct pci_dev *pdev) return false; /* device present */ } -/* we have to use runtime tag to setup command header */ -static void mtip_init_cmd_header(struct request *rq) -{ - struct driver_data *dd = rq->q->queuedata; - struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); - - /* Point the command headers at the command tables. */ - cmd->command_header = dd->port->command_list + - (sizeof(struct mtip_cmd_hdr) * rq->tag); - cmd->command_header_dma = dd->port->command_list_dma + - (sizeof(struct mtip_cmd_hdr) * rq->tag); - - if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags)) - cmd->command_header->ctbau = __force_bit2int cpu_to_le32((cmd->command_dma >> 16) >> 16); - - cmd->command_header->ctba = __force_bit2int cpu_to_le32(cmd->command_dma & 0xFFFFFFFF); -} - -static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd) -{ - struct request *rq; - - if (mtip_check_surprise_removal(dd->pdev)) - return NULL; - - rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED); - if (IS_ERR(rq)) - return NULL; - - /* Internal cmd isn't submitted via .queue_rq */ - mtip_init_cmd_header(rq); - - return blk_mq_rq_to_pdu(rq); -} - static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd, unsigned int tag) { @@ -1023,13 +988,14 @@ static int mtip_exec_internal_command(struct mtip_port *port, return -EFAULT; } - int_cmd = mtip_get_int_command(dd); - if (!int_cmd) { + if (mtip_check_surprise_removal(dd->pdev)) + return -EFAULT; + + rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED); + if (IS_ERR(rq)) { dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n"); return -EFAULT; } - rq = blk_mq_rq_from_pdu(int_cmd); - rq->special = &icmd; set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); @@ -1050,6 +1016,8 @@ static int mtip_exec_internal_command(struct mtip_port *port, } /* Copy the command to the command table */ + int_cmd = blk_mq_rq_to_pdu(rq); + int_cmd->icmd = &icmd; memcpy(int_cmd->command, fis, fis_len*4); rq->timeout = timeout; @@ -1423,23 +1391,19 @@ static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id, * @dd pointer to driver_data structure * @lba starting lba * @len # of 512b sectors to trim - * - * return value - * -ENOMEM Out of dma memory - * -EINVAL Invalid parameters passed in, trim not supported - * -EIO Error submitting trim request to hw */ -static int mtip_send_trim(struct driver_data *dd, unsigned int lba, - unsigned int len) +static blk_status_t mtip_send_trim(struct driver_data *dd, unsigned int lba, + unsigned int len) { - int i, rv = 0; u64 tlba, tlen, sect_left; struct mtip_trim_entry *buf; dma_addr_t dma_addr; struct host_to_dev_fis fis; + blk_status_t ret = BLK_STS_OK; + int i; if (!len || dd->trim_supp == false) - return -EINVAL; + return BLK_STS_IOERR; /* Trim request too big */ WARN_ON(len > (MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES)); @@ -1454,7 +1418,7 @@ static int mtip_send_trim(struct driver_data *dd, unsigned int lba, buf = dmam_alloc_coherent(&dd->pdev->dev, ATA_SECT_SIZE, &dma_addr, GFP_KERNEL); if (!buf) - return -ENOMEM; + return BLK_STS_RESOURCE; memset(buf, 0, ATA_SECT_SIZE); for (i = 0, sect_left = len, tlba = lba; @@ -1463,8 +1427,8 @@ static int mtip_send_trim(struct driver_data *dd, unsigned int lba, tlen = (sect_left >= MTIP_MAX_TRIM_ENTRY_LEN ? MTIP_MAX_TRIM_ENTRY_LEN : sect_left); - buf[i].lba = __force_bit2int cpu_to_le32(tlba); - buf[i].range = __force_bit2int cpu_to_le16(tlen); + buf[i].lba = cpu_to_le32(tlba); + buf[i].range = cpu_to_le16(tlen); tlba += tlen; sect_left -= tlen; } @@ -1486,10 +1450,10 @@ static int mtip_send_trim(struct driver_data *dd, unsigned int lba, ATA_SECT_SIZE, 0, MTIP_TRIM_TIMEOUT_MS) < 0) - rv = -EIO; + ret = BLK_STS_IOERR; dmam_free_coherent(&dd->pdev->dev, ATA_SECT_SIZE, buf, dma_addr); - return rv; + return ret; } /* @@ -1585,23 +1549,20 @@ static inline void fill_command_sg(struct driver_data *dd, int n; unsigned int dma_len; struct mtip_cmd_sg *command_sg; - struct scatterlist *sg = command->sg; + struct scatterlist *sg; command_sg = command->command + AHCI_CMD_TBL_HDR_SZ; - for (n = 0; n < nents; n++) { + for_each_sg(command->sg, sg, nents, n) { dma_len = sg_dma_len(sg); if (dma_len > 0x400000) dev_err(&dd->pdev->dev, "DMA segment length truncated\n"); - command_sg->info = __force_bit2int - cpu_to_le32((dma_len-1) & 0x3FFFFF); - command_sg->dba = __force_bit2int - cpu_to_le32(sg_dma_address(sg)); - command_sg->dba_upper = __force_bit2int + command_sg->info = cpu_to_le32((dma_len-1) & 0x3FFFFF); + command_sg->dba = cpu_to_le32(sg_dma_address(sg)); + command_sg->dba_upper = cpu_to_le32((sg_dma_address(sg) >> 16) >> 16); command_sg++; - sg++; } } @@ -2171,7 +2132,6 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd, * @dd Pointer to the driver data structure. * @start First sector to read. * @nsect Number of sectors to read. - * @nents Number of entries in scatter list for the read command. * @tag The tag of this read command. * @callback Pointer to the function that should be called * when the read completes. @@ -2183,16 +2143,20 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd, * None */ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, - struct mtip_cmd *command, int nents, + struct mtip_cmd *command, struct blk_mq_hw_ctx *hctx) { + struct mtip_cmd_hdr *hdr = + dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag; struct host_to_dev_fis *fis; struct mtip_port *port = dd->port; int dma_dir = rq_data_dir(rq) == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE; u64 start = blk_rq_pos(rq); unsigned int nsect = blk_rq_sectors(rq); + unsigned int nents; /* Map the scatter list for DMA access */ + nents = blk_rq_map_sg(hctx->queue, rq, command->sg); nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir); prefetch(&port->flags); @@ -2233,10 +2197,11 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, fis->device |= 1 << 7; /* Populate the command header */ - command->command_header->opts = - __force_bit2int cpu_to_le32( - (nents << 16) | 5 | AHCI_CMD_PREFETCH); - command->command_header->byte_count = 0; + hdr->ctba = cpu_to_le32(command->command_dma & 0xFFFFFFFF); + if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags)) + hdr->ctbau = cpu_to_le32((command->command_dma >> 16) >> 16); + hdr->opts = cpu_to_le32((nents << 16) | 5 | AHCI_CMD_PREFETCH); + hdr->byte_count = 0; command->direction = dma_dir; @@ -2715,12 +2680,12 @@ static void mtip_softirq_done_fn(struct request *rq) cmd->direction); if (unlikely(cmd->unaligned)) - up(&dd->port->cmd_slot_unal); + atomic_inc(&dd->port->cmd_slot_unal); blk_mq_end_request(rq, cmd->status); } -static void mtip_abort_cmd(struct request *req, void *data, bool reserved) +static bool mtip_abort_cmd(struct request *req, void *data, bool reserved) { struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req); struct driver_data *dd = data; @@ -2730,14 +2695,16 @@ static void mtip_abort_cmd(struct request *req, void *data, bool reserved) clear_bit(req->tag, dd->port->cmds_to_issue); cmd->status = BLK_STS_IOERR; mtip_softirq_done_fn(req); + return true; } -static void mtip_queue_cmd(struct request *req, void *data, bool reserved) +static bool mtip_queue_cmd(struct request *req, void *data, bool reserved) { struct driver_data *dd = data; set_bit(req->tag, dd->port->cmds_to_issue); blk_abort_request(req); + return true; } /* @@ -2803,10 +2770,7 @@ restart_eh: blk_mq_quiesce_queue(dd->queue); - spin_lock(dd->queue->queue_lock); - blk_mq_tagset_busy_iter(&dd->tags, - mtip_queue_cmd, dd); - spin_unlock(dd->queue->queue_lock); + blk_mq_tagset_busy_iter(&dd->tags, mtip_queue_cmd, dd); set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags); @@ -3026,7 +2990,7 @@ static int mtip_hw_init(struct driver_data *dd) else dd->unal_qdepth = 0; - sema_init(&dd->port->cmd_slot_unal, dd->unal_qdepth); + atomic_set(&dd->port->cmd_slot_unal, dd->unal_qdepth); /* Spinlock to prevent concurrent issue */ for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++) @@ -3531,58 +3495,24 @@ static inline bool is_se_active(struct driver_data *dd) return false; } -/* - * Block layer make request function. - * - * This function is called by the kernel to process a BIO for - * the P320 device. - * - * @queue Pointer to the request queue. Unused other than to obtain - * the driver data structure. - * @rq Pointer to the request. - * - */ -static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq) +static inline bool is_stopped(struct driver_data *dd, struct request *rq) { - struct driver_data *dd = hctx->queue->queuedata; - struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); - unsigned int nents; - - if (is_se_active(dd)) - return -ENODATA; - - if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) { - if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, - &dd->dd_flag))) { - return -ENXIO; - } - if (unlikely(test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))) { - return -ENODATA; - } - if (unlikely(test_bit(MTIP_DDF_WRITE_PROTECT_BIT, - &dd->dd_flag) && - rq_data_dir(rq))) { - return -ENODATA; - } - if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag) || - test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))) - return -ENODATA; - } - - if (req_op(rq) == REQ_OP_DISCARD) { - int err; - - err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq)); - blk_mq_end_request(rq, err ? BLK_STS_IOERR : BLK_STS_OK); - return 0; - } + if (likely(!(dd->dd_flag & MTIP_DDF_STOP_IO))) + return false; - /* Create the scatter list for this request. */ - nents = blk_rq_map_sg(hctx->queue, rq, cmd->sg); + if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) + return true; + if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag)) + return true; + if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag) && + rq_data_dir(rq)) + return true; + if (test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) + return true; + if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) + return true; - /* Issue the read/write. */ - mtip_hw_submit_io(dd, rq, cmd, nents, hctx); - return 0; + return false; } static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx, @@ -3603,7 +3533,7 @@ static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx, cmd->unaligned = 1; } - if (cmd->unaligned && down_trylock(&dd->port->cmd_slot_unal)) + if (cmd->unaligned && atomic_dec_if_positive(&dd->port->cmd_slot_unal) >= 0) return true; return false; @@ -3613,32 +3543,33 @@ static blk_status_t mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx, struct request *rq) { struct driver_data *dd = hctx->queue->queuedata; - struct mtip_int_cmd *icmd = rq->special; struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); + struct mtip_int_cmd *icmd = cmd->icmd; + struct mtip_cmd_hdr *hdr = + dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag; struct mtip_cmd_sg *command_sg; if (mtip_commands_active(dd->port)) - return BLK_STS_RESOURCE; + return BLK_STS_DEV_RESOURCE; + hdr->ctba = cpu_to_le32(cmd->command_dma & 0xFFFFFFFF); + if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags)) + hdr->ctbau = cpu_to_le32((cmd->command_dma >> 16) >> 16); /* Populate the SG list */ - cmd->command_header->opts = - __force_bit2int cpu_to_le32(icmd->opts | icmd->fis_len); + hdr->opts = cpu_to_le32(icmd->opts | icmd->fis_len); if (icmd->buf_len) { command_sg = cmd->command + AHCI_CMD_TBL_HDR_SZ; - command_sg->info = - __force_bit2int cpu_to_le32((icmd->buf_len-1) & 0x3FFFFF); - command_sg->dba = - __force_bit2int cpu_to_le32(icmd->buffer & 0xFFFFFFFF); + command_sg->info = cpu_to_le32((icmd->buf_len-1) & 0x3FFFFF); + command_sg->dba = cpu_to_le32(icmd->buffer & 0xFFFFFFFF); command_sg->dba_upper = - __force_bit2int cpu_to_le32((icmd->buffer >> 16) >> 16); + cpu_to_le32((icmd->buffer >> 16) >> 16); - cmd->command_header->opts |= - __force_bit2int cpu_to_le32((1 << 16)); + hdr->opts |= cpu_to_le32((1 << 16)); } /* Populate the command header */ - cmd->command_header->byte_count = 0; + hdr->byte_count = 0; blk_mq_start_request(rq); mtip_issue_non_ncq_command(dd->port, rq->tag); @@ -3648,23 +3579,25 @@ static blk_status_t mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx, static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { + struct driver_data *dd = hctx->queue->queuedata; struct request *rq = bd->rq; - int ret; - - mtip_init_cmd_header(rq); + struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); if (blk_rq_is_passthrough(rq)) return mtip_issue_reserved_cmd(hctx, rq); if (unlikely(mtip_check_unal_depth(hctx, rq))) - return BLK_STS_RESOURCE; + return BLK_STS_DEV_RESOURCE; + + if (is_se_active(dd) || is_stopped(dd, rq)) + return BLK_STS_IOERR; blk_mq_start_request(rq); - ret = mtip_submit_request(hctx, rq); - if (likely(!ret)) - return BLK_STS_OK; - return BLK_STS_IOERR; + if (req_op(rq) == REQ_OP_DISCARD) + return mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq)); + mtip_hw_submit_io(dd, rq, cmd, hctx); + return BLK_STS_OK; } static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq, @@ -3920,12 +3853,13 @@ protocol_init_error: return rv; } -static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv) +static bool mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv) { struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); cmd->status = BLK_STS_IOERR; blk_mq_complete_request(rq); + return true; } /* |