summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/smartpqi/smartpqi_init.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/smartpqi/smartpqi_init.c')
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c496
1 files changed, 366 insertions, 130 deletions
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 24c7cb285dca..7fd5a8c813dc 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.26-030"
+#define DRIVER_VERSION "2.1.30-031"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 26
-#define DRIVER_REVISION 30
+#define DRIVER_RELEASE 30
+#define DRIVER_REVISION 31
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -92,9 +92,9 @@ static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
-static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
-static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
-static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
+static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u32 total_size, u32 min_size);
+static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor);
+static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u16 function_code);
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
@@ -1508,6 +1508,12 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
if (rc)
goto error;
+ device->raid_io_stats = alloc_percpu(struct pqi_raid_io_stats);
+ if (!device->raid_io_stats) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
device->raid_map = raid_map;
return 0;
@@ -2099,6 +2105,10 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
/* To prevent this from being freed later. */
new_device->raid_map = NULL;
}
+ if (new_device->raid_bypass_enabled && existing_device->raid_io_stats == NULL) {
+ existing_device->raid_io_stats = new_device->raid_io_stats;
+ new_device->raid_io_stats = NULL;
+ }
existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
}
@@ -2121,6 +2131,7 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
static inline void pqi_free_device(struct pqi_scsi_dev *device)
{
if (device) {
+ free_percpu(device->raid_io_stats);
kfree(device->raid_map);
kfree(device);
}
@@ -2292,17 +2303,23 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
* queue depth, device size.
*/
list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
+ /*
+ * Check for queue depth change.
+ */
if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
device->advertised_queue_depth = device->queue_depth;
scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
- spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
- if (pqi_volume_rescan_needed(device)) {
- device->rescan = false;
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- scsi_rescan_device(device->sdev);
- } else {
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- }
+ }
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ /*
+ * Check for changes in the device, such as size.
+ */
+ if (pqi_volume_rescan_needed(device)) {
+ device->rescan = false;
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ scsi_rescan_device(device->sdev);
+ } else {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
}
}
@@ -2354,14 +2371,6 @@ static inline void pqi_mask_device(u8 *scsi3addr)
scsi3addr[3] |= 0xc0;
}
-static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device)
-{
- if (pqi_is_logical_device(device))
- return false;
-
- return (device->path_map & (device->path_map - 1)) != 0;
-}
-
static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
{
return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
@@ -3244,6 +3253,20 @@ static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
sense_data_length);
}
+ if (pqi_cmd_priv(scmd)->this_residual &&
+ !pqi_is_logical_device(scmd->device->hostdata) &&
+ scsi_status == SAM_STAT_CHECK_CONDITION &&
+ host_byte == DID_OK &&
+ sense_data_length &&
+ scsi_normalize_sense(error_info->data, sense_data_length, &sshdr) &&
+ sshdr.sense_key == ILLEGAL_REQUEST &&
+ sshdr.asc == 0x26 &&
+ sshdr.ascq == 0x0) {
+ host_byte = DID_NO_CONNECT;
+ pqi_take_device_offline(scmd->device, "AIO");
+ scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 0x3e, 0x1);
+ }
+
scmd->result = scsi_status;
set_host_byte(scmd, host_byte);
}
@@ -3258,14 +3281,12 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
int residual_count;
int xfer_count;
bool device_offline;
- struct pqi_scsi_dev *device;
scmd = io_request->scmd;
error_info = io_request->error_info;
host_byte = DID_OK;
sense_data_length = 0;
device_offline = false;
- device = scmd->device->hostdata;
switch (error_info->service_response) {
case PQI_AIO_SERV_RESPONSE_COMPLETE:
@@ -3290,14 +3311,8 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
break;
case PQI_AIO_STATUS_AIO_PATH_DISABLED:
pqi_aio_path_disabled(io_request);
- if (pqi_is_multipath_device(device)) {
- pqi_device_remove_start(device);
- host_byte = DID_NO_CONNECT;
- scsi_status = SAM_STAT_CHECK_CONDITION;
- } else {
- scsi_status = SAM_STAT_GOOD;
- io_request->status = -EAGAIN;
- }
+ scsi_status = SAM_STAT_GOOD;
+ io_request->status = -EAGAIN;
break;
case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
case PQI_AIO_STATUS_INVALID_DEVICE:
@@ -3625,7 +3640,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
ctrl_info->pqi_mode_enabled = false;
pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
- pqi_ofa_free_host_buffer(ctrl_info);
+ pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
pqi_ctrl_ofa_done(ctrl_info);
dev_info(&ctrl_info->pci_dev->dev,
"Online Firmware Activation: %s\n",
@@ -3636,7 +3651,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
"Online Firmware Activation ABORTED\n");
if (ctrl_info->soft_reset_handshake_supported)
pqi_clear_soft_reset_status(ctrl_info);
- pqi_ofa_free_host_buffer(ctrl_info);
+ pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
pqi_ctrl_ofa_done(ctrl_info);
pqi_ofa_ctrl_unquiesce(ctrl_info);
break;
@@ -3646,7 +3661,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
dev_err(&ctrl_info->pci_dev->dev,
"unexpected Online Firmware Activation reset status: 0x%x\n",
reset_status);
- pqi_ofa_free_host_buffer(ctrl_info);
+ pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
pqi_ctrl_ofa_done(ctrl_info);
pqi_ofa_ctrl_unquiesce(ctrl_info);
pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
@@ -3661,8 +3676,8 @@ static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
pqi_ctrl_ofa_start(ctrl_info);
- pqi_ofa_setup_host_buffer(ctrl_info);
- pqi_ofa_host_memory_update(ctrl_info);
+ pqi_host_setup_buffer(ctrl_info, &ctrl_info->ofa_memory, ctrl_info->ofa_bytes_requested, ctrl_info->ofa_bytes_requested);
+ pqi_host_memory_update(ctrl_info, &ctrl_info->ofa_memory, PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE);
}
static void pqi_ofa_quiesce_worker(struct work_struct *work)
@@ -3702,7 +3717,7 @@ static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
dev_info(&ctrl_info->pci_dev->dev,
"received Online Firmware Activation cancel request: reason: %u\n",
ctrl_info->ofa_cancel_reason);
- pqi_ofa_free_host_buffer(ctrl_info);
+ pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
pqi_ctrl_ofa_done(ctrl_info);
break;
default:
@@ -5933,7 +5948,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
int rc;
struct pqi_scsi_dev *device;
struct pqi_stream_data *pqi_stream_data;
- struct pqi_scsi_dev_raid_map_data rmd;
+ struct pqi_scsi_dev_raid_map_data rmd = { 0 };
if (!ctrl_info->enable_stream_detection)
return false;
@@ -5975,6 +5990,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
pqi_stream_data->next_lba = rmd.first_block +
rmd.block_cnt;
pqi_stream_data->last_accessed = jiffies;
+ per_cpu_ptr(device->raid_io_stats, smp_processor_id())->write_stream_cnt++;
return true;
}
@@ -6025,7 +6041,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
ctrl_info = shost_to_hba(shost);
- if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
+ if (pqi_ctrl_offline(ctrl_info) || pqi_device_offline(device) || pqi_device_in_remove(device)) {
set_host_byte(scmd, DID_NO_CONNECT);
pqi_scsi_done(scmd);
return 0;
@@ -6053,7 +6069,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
raid_bypassed = true;
- device->raid_bypass_cnt++;
+ per_cpu_ptr(device->raid_io_stats, smp_processor_id())->raid_bypass_cnt++;
}
}
if (!raid_bypassed)
@@ -6190,14 +6206,12 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
continue;
scsi_device = scmd->device->hostdata;
- if (scsi_device != device)
- continue;
-
- if ((u8)scmd->device->lun != lun)
- continue;
list_del(&io_request->request_list_entry);
- set_host_byte(scmd, DID_RESET);
+ if (scsi_device == device && (u8)scmd->device->lun == lun)
+ set_host_byte(scmd, DID_RESET);
+ else
+ set_host_byte(scmd, DID_REQUEUE);
pqi_free_io_request(io_request);
scsi_dma_unmap(scmd);
pqi_scsi_done(scmd);
@@ -7350,7 +7364,8 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
struct scsi_device *sdev;
struct pqi_scsi_dev *device;
unsigned long flags;
- unsigned int raid_bypass_cnt;
+ u64 raid_bypass_cnt;
+ int cpu;
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
@@ -7366,11 +7381,17 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
return -ENODEV;
}
- raid_bypass_cnt = device->raid_bypass_cnt;
+ raid_bypass_cnt = 0;
+
+ if (device->raid_io_stats) {
+ for_each_online_cpu(cpu) {
+ raid_bypass_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->raid_bypass_cnt;
+ }
+ }
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
+ return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", raid_bypass_cnt);
}
static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
@@ -7452,6 +7473,43 @@ static ssize_t pqi_numa_node_show(struct device *dev,
return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node);
}
+static ssize_t pqi_write_stream_cnt_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct scsi_device *sdev;
+ struct pqi_scsi_dev *device;
+ unsigned long flags;
+ u64 write_stream_cnt;
+ int cpu;
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -ENODEV;
+ }
+
+ write_stream_cnt = 0;
+
+ if (device->raid_io_stats) {
+ for_each_online_cpu(cpu) {
+ write_stream_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->write_stream_cnt;
+ }
+ }
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", write_stream_cnt);
+}
+
static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
@@ -7462,6 +7520,7 @@ static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL);
+static DEVICE_ATTR(write_stream_cnt, 0444, pqi_write_stream_cnt_show, NULL);
static struct attribute *pqi_sdev_attrs[] = {
&dev_attr_lunid.attr,
@@ -7473,6 +7532,7 @@ static struct attribute *pqi_sdev_attrs[] = {
&dev_attr_raid_bypass_cnt.attr,
&dev_attr_sas_ncq_prio_enable.attr,
&dev_attr_numa_node.attr,
+ &dev_attr_write_stream_cnt.attr,
NULL
};
@@ -7863,6 +7923,9 @@ static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:
ctrl_info->multi_lun_device_supported = firmware_feature->enabled;
break;
+ case PQI_FIRMWARE_FEATURE_CTRL_LOGGING:
+ ctrl_info->ctrl_logging_supported = firmware_feature->enabled;
+ break;
}
pqi_firmware_feature_status(ctrl_info, firmware_feature);
@@ -7968,6 +8031,11 @@ static struct pqi_firmware_feature pqi_firmware_features[] = {
.feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,
.feature_status = pqi_ctrl_update_feature_flags,
},
+ {
+ .feature_name = "Controller Data Logging",
+ .feature_bit = PQI_FIRMWARE_FEATURE_CTRL_LOGGING,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
};
static void pqi_process_firmware_features(
@@ -8070,6 +8138,7 @@ static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
ctrl_info->firmware_triage_supported = false;
ctrl_info->rpl_extended_format_4_5_supported = false;
ctrl_info->multi_lun_device_supported = false;
+ ctrl_info->ctrl_logging_supported = false;
}
static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
@@ -8210,6 +8279,9 @@ static void pqi_perform_lockup_action(void)
}
}
+#define PQI_CTRL_LOG_TOTAL_SIZE (4 * 1024 * 1024)
+#define PQI_CTRL_LOG_MIN_SIZE (PQI_CTRL_LOG_TOTAL_SIZE / PQI_HOST_MAX_SG_DESCRIPTORS)
+
static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
{
int rc;
@@ -8221,6 +8293,12 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
if (rc)
return rc;
}
+ if (sis_is_ctrl_logging_supported(ctrl_info)) {
+ sis_notify_kdump(ctrl_info);
+ rc = sis_wait_for_ctrl_logging_completion(ctrl_info);
+ if (rc)
+ return rc;
+ }
sis_soft_reset(ctrl_info);
ssleep(PQI_POST_RESET_DELAY_SECS);
} else {
@@ -8402,6 +8480,11 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
if (rc)
return rc;
+ if (ctrl_info->ctrl_logging_supported && !reset_devices) {
+ pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE);
+ pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
+ }
+
rc = pqi_get_ctrl_product_details(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
@@ -8586,8 +8669,22 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
return rc;
}
- if (pqi_ofa_in_progress(ctrl_info))
+ if (pqi_ofa_in_progress(ctrl_info)) {
pqi_ctrl_unblock_scan(ctrl_info);
+ if (ctrl_info->ctrl_logging_supported) {
+ if (!ctrl_info->ctrl_log_memory.host_memory)
+ pqi_host_setup_buffer(ctrl_info,
+ &ctrl_info->ctrl_log_memory,
+ PQI_CTRL_LOG_TOTAL_SIZE,
+ PQI_CTRL_LOG_MIN_SIZE);
+ pqi_host_memory_update(ctrl_info,
+ &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
+ } else {
+ if (ctrl_info->ctrl_log_memory.host_memory)
+ pqi_host_free_buffer(ctrl_info,
+ &ctrl_info->ctrl_log_memory);
+ }
+ }
pqi_scan_scsi_devices(ctrl_info);
@@ -8777,6 +8874,7 @@ static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
pqi_fail_all_outstanding_requests(ctrl_info);
ctrl_info->pqi_mode_enabled = false;
}
+ pqi_host_free_buffer(ctrl_info, &ctrl_info->ctrl_log_memory);
pqi_unregister_scsi(ctrl_info);
if (ctrl_info->pqi_mode_enabled)
pqi_revert_to_sis_mode(ctrl_info);
@@ -8802,177 +8900,187 @@ static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
pqi_ctrl_unblock_scan(ctrl_info);
}
-static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
+static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
+{
+ ssleep(delay_secs);
+
+ return pqi_ctrl_init_resume(ctrl_info);
+}
+
+static int pqi_host_alloc_mem(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_host_memory_descriptor *host_memory_descriptor,
+ u32 total_size, u32 chunk_size)
{
int i;
u32 sg_count;
struct device *dev;
- struct pqi_ofa_memory *ofap;
+ struct pqi_host_memory *host_memory;
struct pqi_sg_descriptor *mem_descriptor;
dma_addr_t dma_handle;
- ofap = ctrl_info->pqi_ofa_mem_virt_addr;
-
sg_count = DIV_ROUND_UP(total_size, chunk_size);
- if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS)
+ if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS)
goto out;
- ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
- if (!ctrl_info->pqi_ofa_chunk_virt_addr)
+ host_memory_descriptor->host_chunk_virt_address = kmalloc(sg_count * sizeof(void *), GFP_KERNEL);
+ if (!host_memory_descriptor->host_chunk_virt_address)
goto out;
dev = &ctrl_info->pci_dev->dev;
+ host_memory = host_memory_descriptor->host_memory;
for (i = 0; i < sg_count; i++) {
- ctrl_info->pqi_ofa_chunk_virt_addr[i] =
- dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
- if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
+ host_memory_descriptor->host_chunk_virt_address[i] = dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
+ if (!host_memory_descriptor->host_chunk_virt_address[i])
goto out_free_chunks;
- mem_descriptor = &ofap->sg_descriptor[i];
+ mem_descriptor = &host_memory->sg_descriptor[i];
put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
put_unaligned_le32(chunk_size, &mem_descriptor->length);
}
put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
- put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
- put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated);
+ put_unaligned_le16(sg_count, &host_memory->num_memory_descriptors);
+ put_unaligned_le32(sg_count * chunk_size, &host_memory->bytes_allocated);
return 0;
out_free_chunks:
while (--i >= 0) {
- mem_descriptor = &ofap->sg_descriptor[i];
+ mem_descriptor = &host_memory->sg_descriptor[i];
dma_free_coherent(dev, chunk_size,
- ctrl_info->pqi_ofa_chunk_virt_addr[i],
+ host_memory_descriptor->host_chunk_virt_address[i],
get_unaligned_le64(&mem_descriptor->address));
}
- kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
-
+ kfree(host_memory_descriptor->host_chunk_virt_address);
out:
return -ENOMEM;
}
-static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
+static int pqi_host_alloc_buffer(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_host_memory_descriptor *host_memory_descriptor,
+ u32 total_required_size, u32 min_required_size)
{
- u32 total_size;
u32 chunk_size;
u32 min_chunk_size;
- if (ctrl_info->ofa_bytes_requested == 0)
+ if (total_required_size == 0 || min_required_size == 0)
return 0;
- total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested);
- min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS);
+ total_required_size = PAGE_ALIGN(total_required_size);
+ min_required_size = PAGE_ALIGN(min_required_size);
+ min_chunk_size = DIV_ROUND_UP(total_required_size, PQI_HOST_MAX_SG_DESCRIPTORS);
min_chunk_size = PAGE_ALIGN(min_chunk_size);
- for (chunk_size = total_size; chunk_size >= min_chunk_size;) {
- if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0)
- return 0;
- chunk_size /= 2;
- chunk_size = PAGE_ALIGN(chunk_size);
+ while (total_required_size >= min_required_size) {
+ for (chunk_size = total_required_size; chunk_size >= min_chunk_size;) {
+ if (pqi_host_alloc_mem(ctrl_info,
+ host_memory_descriptor, total_required_size,
+ chunk_size) == 0)
+ return 0;
+ chunk_size /= 2;
+ chunk_size = PAGE_ALIGN(chunk_size);
+ }
+ total_required_size /= 2;
+ total_required_size = PAGE_ALIGN(total_required_size);
}
return -ENOMEM;
}
-static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
+static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_host_memory_descriptor *host_memory_descriptor,
+ u32 total_size, u32 min_size)
{
struct device *dev;
- struct pqi_ofa_memory *ofap;
+ struct pqi_host_memory *host_memory;
dev = &ctrl_info->pci_dev->dev;
- ofap = dma_alloc_coherent(dev, sizeof(*ofap),
- &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL);
- if (!ofap)
+ host_memory = dma_alloc_coherent(dev, sizeof(*host_memory),
+ &host_memory_descriptor->host_memory_dma_handle, GFP_KERNEL);
+ if (!host_memory)
return;
- ctrl_info->pqi_ofa_mem_virt_addr = ofap;
+ host_memory_descriptor->host_memory = host_memory;
- if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
- dev_err(dev,
- "failed to allocate host buffer for Online Firmware Activation\n");
- dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle);
- ctrl_info->pqi_ofa_mem_virt_addr = NULL;
+ if (pqi_host_alloc_buffer(ctrl_info, host_memory_descriptor,
+ total_size, min_size) < 0) {
+ dev_err(dev, "failed to allocate firmware usable host buffer\n");
+ dma_free_coherent(dev, sizeof(*host_memory), host_memory,
+ host_memory_descriptor->host_memory_dma_handle);
+ host_memory_descriptor->host_memory = NULL;
return;
}
-
- put_unaligned_le16(PQI_OFA_VERSION, &ofap->version);
- memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));
}
-static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
+static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_host_memory_descriptor *host_memory_descriptor)
{
unsigned int i;
struct device *dev;
- struct pqi_ofa_memory *ofap;
+ struct pqi_host_memory *host_memory;
struct pqi_sg_descriptor *mem_descriptor;
unsigned int num_memory_descriptors;
- ofap = ctrl_info->pqi_ofa_mem_virt_addr;
- if (!ofap)
+ host_memory = host_memory_descriptor->host_memory;
+ if (!host_memory)
return;
dev = &ctrl_info->pci_dev->dev;
- if (get_unaligned_le32(&ofap->bytes_allocated) == 0)
+ if (get_unaligned_le32(&host_memory->bytes_allocated) == 0)
goto out;
- mem_descriptor = ofap->sg_descriptor;
- num_memory_descriptors =
- get_unaligned_le16(&ofap->num_memory_descriptors);
+ mem_descriptor = host_memory->sg_descriptor;
+ num_memory_descriptors = get_unaligned_le16(&host_memory->num_memory_descriptors);
for (i = 0; i < num_memory_descriptors; i++) {
dma_free_coherent(dev,
get_unaligned_le32(&mem_descriptor[i].length),
- ctrl_info->pqi_ofa_chunk_virt_addr[i],
+ host_memory_descriptor->host_chunk_virt_address[i],
get_unaligned_le64(&mem_descriptor[i].address));
}
- kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
+ kfree(host_memory_descriptor->host_chunk_virt_address);
out:
- dma_free_coherent(dev, sizeof(*ofap), ofap,
- ctrl_info->pqi_ofa_mem_dma_handle);
- ctrl_info->pqi_ofa_mem_virt_addr = NULL;
+ dma_free_coherent(dev, sizeof(*host_memory), host_memory,
+ host_memory_descriptor->host_memory_dma_handle);
+ host_memory_descriptor->host_memory = NULL;
}
-static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
+static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_host_memory_descriptor *host_memory_descriptor,
+ u16 function_code)
{
u32 buffer_length;
struct pqi_vendor_general_request request;
- struct pqi_ofa_memory *ofap;
+ struct pqi_host_memory *host_memory;
memset(&request, 0, sizeof(request));
request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
- put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
- &request.header.iu_length);
- put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
- &request.function_code);
-
- ofap = ctrl_info->pqi_ofa_mem_virt_addr;
-
- if (ofap) {
- buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) +
- get_unaligned_le16(&ofap->num_memory_descriptors) *
- sizeof(struct pqi_sg_descriptor);
-
- put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
- &request.data.ofa_memory_allocation.buffer_address);
- put_unaligned_le32(buffer_length,
- &request.data.ofa_memory_allocation.buffer_length);
+ put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
+ put_unaligned_le16(function_code, &request.function_code);
+
+ host_memory = host_memory_descriptor->host_memory;
+
+ if (host_memory) {
+ buffer_length = offsetof(struct pqi_host_memory, sg_descriptor) + get_unaligned_le16(&host_memory->num_memory_descriptors) * sizeof(struct pqi_sg_descriptor);
+ put_unaligned_le64((u64)host_memory_descriptor->host_memory_dma_handle, &request.data.host_memory_allocation.buffer_address);
+ put_unaligned_le32(buffer_length, &request.data.host_memory_allocation.buffer_length);
+
+ if (function_code == PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE) {
+ put_unaligned_le16(PQI_OFA_VERSION, &host_memory->version);
+ memcpy(&host_memory->signature, PQI_OFA_SIGNATURE, sizeof(host_memory->signature));
+ } else if (function_code == PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE) {
+ put_unaligned_le16(PQI_CTRL_LOG_VERSION, &host_memory->version);
+ memcpy(&host_memory->signature, PQI_CTRL_LOG_SIGNATURE, sizeof(host_memory->signature));
+ }
}
return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
}
-static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
-{
- ssleep(delay_secs);
-
- return pqi_ctrl_init_resume(ctrl_info);
-}
-
static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
.data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
.status = SAM_STAT_CHECK_CONDITION,
@@ -9446,6 +9554,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x0462)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x1104)
},
{
@@ -9474,6 +9586,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x1110)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x8460)
},
{
@@ -9482,6 +9598,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x8462)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0xc460)
},
{
@@ -9590,6 +9710,14 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x00a1)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f3a, 0x0104)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x19e5, 0xd227)
},
{
@@ -10182,6 +10310,110 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1137, 0x02fe)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1137, 0x02ff)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1137, 0x0300)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0045)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0046)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0047)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0048)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x004a)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x004b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x004c)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x004f)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0051)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0052)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0053)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0054)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x006b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x006c)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x006d)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x006f)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0070)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0071)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0072)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0086)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0087)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0088)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0089)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1e93, 0x1000)
},
{
@@ -10266,6 +10498,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x00a3)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_ANY_ID, PCI_ANY_ID)
},
{ 0 }