diff options
Diffstat (limited to 'drivers/scsi')
63 files changed, 3450 insertions, 2188 deletions
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index 8981701802ca..a777e5c412df 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -474,11 +474,11 @@ static void NCR5380_print_phase(struct Scsi_Host *instance) */ #ifndef USLEEP_SLEEP /* 20 ms (reasonable hard disk speed) */ -#define USLEEP_SLEEP (20*HZ/1000) +#define USLEEP_SLEEP msecs_to_jiffies(20) #endif /* 300 RPM (floppy speed) */ #ifndef USLEEP_POLL -#define USLEEP_POLL (200*HZ/1000) +#define USLEEP_POLL msecs_to_jiffies(200) #endif #ifndef USLEEP_WAITLONG /* RvC: (reasonable time to wait on select error) */ @@ -576,7 +576,7 @@ static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance, if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0)) trying_irqs |= mask; - timeout = jiffies + (250 * HZ / 1000); + timeout = jiffies + msecs_to_jiffies(250); probe_irq = NO_IRQ; /* @@ -634,7 +634,7 @@ static void prepare_info(struct Scsi_Host *instance) "sg_tablesize %d, this_id %d, " "flags { %s%s%s}, " #if defined(USLEEP_POLL) && defined(USLEEP_WAITLONG) - "USLEEP_POLL %d, USLEEP_WAITLONG %d, " + "USLEEP_POLL %lu, USLEEP_WAITLONG %lu, " #endif "options { %s} ", instance->hostt->name, instance->io_port, instance->n_io_port, @@ -1346,7 +1346,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) * selection. */ - timeout = jiffies + (250 * HZ / 1000); + timeout = jiffies + msecs_to_jiffies(250); /* * XXX very interesting - we're seeing a bounce where the BSY we diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index b32e77db0c48..9b3dd6ef6a0b 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -111,6 +111,41 @@ #define BYTE2(x) (unsigned char)((x) >> 16) #define BYTE3(x) (unsigned char)((x) >> 24) +/* MODE_SENSE data format */ +typedef struct { + struct { + u8 data_length; + u8 med_type; + u8 dev_par; + u8 bd_length; + } __attribute__((packed)) hd; + struct { + u8 dens_code; + u8 block_count[3]; + u8 reserved; + u8 block_length[3]; + } __attribute__((packed)) bd; + u8 mpc_buf[3]; +} __attribute__((packed)) aac_modep_data; + +/* MODE_SENSE_10 data format */ +typedef struct { + struct { + u8 data_length[2]; + u8 med_type; + u8 dev_par; + u8 rsrvd[2]; + u8 bd_length[2]; + } __attribute__((packed)) hd; + struct { + u8 dens_code; + u8 block_count[3]; + u8 reserved; + u8 block_length[3]; + } __attribute__((packed)) bd; + u8 mpc_buf[3]; +} __attribute__((packed)) aac_modep10_data; + /*------------------------------------------------------------------------------ * S T R U C T S / T Y P E D E F S *----------------------------------------------------------------------------*/ @@ -128,6 +163,48 @@ struct inquiry_data { u8 inqd_prl[4]; /* Product Revision Level */ }; +/* Added for VPD 0x83 */ +typedef struct { + u8 CodeSet:4; /* VPD_CODE_SET */ + u8 Reserved:4; + u8 IdentifierType:4; /* VPD_IDENTIFIER_TYPE */ + u8 Reserved2:4; + u8 Reserved3; + u8 IdentifierLength; + u8 VendId[8]; + u8 ProductId[16]; + u8 SerialNumber[8]; /* SN in ASCII */ + +} TVPD_ID_Descriptor_Type_1; + +typedef struct { + u8 CodeSet:4; /* VPD_CODE_SET */ + u8 Reserved:4; + u8 IdentifierType:4; /* VPD_IDENTIFIER_TYPE */ + u8 Reserved2:4; + u8 Reserved3; + u8 IdentifierLength; + struct TEU64Id { + u32 Serial; + /* The serial number supposed to be 40 bits, + * bit we only support 32, so make the last byte zero. */ + u8 Reserved; + u8 VendId[3]; + } EU64Id; + +} TVPD_ID_Descriptor_Type_2; + +typedef struct { + u8 DeviceType:5; + u8 DeviceTypeQualifier:3; + u8 PageCode; + u8 Reserved; + u8 PageLength; + TVPD_ID_Descriptor_Type_1 IdDescriptorType1; + TVPD_ID_Descriptor_Type_2 IdDescriptorType2; + +} TVPD_Page83; + /* * M O D U L E G L O B A L S */ @@ -385,6 +462,11 @@ int aac_get_containers(struct aac_dev *dev) if (status >= 0) { dresp = (struct aac_get_container_count_resp *)fib_data(fibptr); maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); + if (fibptr->dev->supplement_adapter_info.SupportedOptions2 & + AAC_OPTION_SUPPORTED_240_VOLUMES) { + maximum_num_containers = + le32_to_cpu(dresp->MaxSimpleVolumes); + } aac_fib_complete(fibptr); } /* FIB should be freed only after getting the response from the F/W */ @@ -438,7 +520,7 @@ static void get_container_name_callback(void *context, struct fib * fibptr) if ((le32_to_cpu(get_name_reply->status) == CT_OK) && (get_name_reply->data[0] != '\0')) { char *sp = get_name_reply->data; - sp[sizeof(((struct aac_get_name_resp *)NULL)->data)-1] = '\0'; + sp[sizeof(((struct aac_get_name_resp *)NULL)->data)] = '\0'; while (*sp == ' ') ++sp; if (*sp) { @@ -539,6 +621,14 @@ static void _aac_probe_container2(void * context, struct fib * fibptr) if ((le32_to_cpu(dresp->status) == ST_OK) && (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) && (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) { + if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 & + AAC_OPTION_VARIABLE_BLOCK_SIZE)) { + dresp->mnt[0].fileinfo.bdevinfo.block_size = 0x200; + fsa_dev_ptr->block_size = 0x200; + } else { + fsa_dev_ptr->block_size = + le32_to_cpu(dresp->mnt[0].fileinfo.bdevinfo.block_size); + } fsa_dev_ptr->valid = 1; /* sense_key holds the current state of the spin-up */ if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY)) @@ -571,7 +661,9 @@ static void _aac_probe_container1(void * context, struct fib * fibptr) int status; dresp = (struct aac_mount *) fib_data(fibptr); - dresp->mnt[0].capacityhigh = 0; + if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 & + AAC_OPTION_VARIABLE_BLOCK_SIZE)) + dresp->mnt[0].capacityhigh = 0; if ((le32_to_cpu(dresp->status) != ST_OK) || (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) { _aac_probe_container2(context, fibptr); @@ -586,7 +678,12 @@ static void _aac_probe_container1(void * context, struct fib * fibptr) dinfo = (struct aac_query_mount *)fib_data(fibptr); - dinfo->command = cpu_to_le32(VM_NameServe64); + if (fibptr->dev->supplement_adapter_info.SupportedOptions2 & + AAC_OPTION_VARIABLE_BLOCK_SIZE) + dinfo->command = cpu_to_le32(VM_NameServeAllBlk); + else + dinfo->command = cpu_to_le32(VM_NameServe64); + dinfo->count = cpu_to_le32(scmd_id(scsicmd)); dinfo->type = cpu_to_le32(FT_FILESYS); @@ -621,7 +718,12 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru dinfo = (struct aac_query_mount *)fib_data(fibptr); - dinfo->command = cpu_to_le32(VM_NameServe); + if (fibptr->dev->supplement_adapter_info.SupportedOptions2 & + AAC_OPTION_VARIABLE_BLOCK_SIZE) + dinfo->command = cpu_to_le32(VM_NameServeAllBlk); + else + dinfo->command = cpu_to_le32(VM_NameServe); + dinfo->count = cpu_to_le32(scmd_id(scsicmd)); dinfo->type = cpu_to_le32(FT_FILESYS); scsicmd->SCp.ptr = (char *)callback; @@ -835,14 +937,88 @@ static void get_container_serial_callback(void *context, struct fib * fibptr) get_serial_reply = (struct aac_get_serial_resp *) fib_data(fibptr); /* Failure is irrelevant, using default value instead */ if (le32_to_cpu(get_serial_reply->status) == CT_OK) { - char sp[13]; - /* EVPD bit set */ - sp[0] = INQD_PDT_DA; - sp[1] = scsicmd->cmnd[2]; - sp[2] = 0; - sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X", - le32_to_cpu(get_serial_reply->uid)); - scsi_sg_copy_from_buffer(scsicmd, sp, sizeof(sp)); + /*Check to see if it's for VPD 0x83 or 0x80 */ + if (scsicmd->cmnd[2] == 0x83) { + /* vpd page 0x83 - Device Identification Page */ + int i; + TVPD_Page83 VPDPage83Data; + + memset(((u8 *)&VPDPage83Data), 0, + sizeof(VPDPage83Data)); + + /* DIRECT_ACCESS_DEVIC */ + VPDPage83Data.DeviceType = 0; + /* DEVICE_CONNECTED */ + VPDPage83Data.DeviceTypeQualifier = 0; + /* VPD_DEVICE_IDENTIFIERS */ + VPDPage83Data.PageCode = 0x83; + VPDPage83Data.Reserved = 0; + VPDPage83Data.PageLength = + sizeof(VPDPage83Data.IdDescriptorType1) + + sizeof(VPDPage83Data.IdDescriptorType2); + + /* T10 Vendor Identifier Field Format */ + /* VpdCodeSetAscii */ + VPDPage83Data.IdDescriptorType1.CodeSet = 2; + /* VpdIdentifierTypeVendorId */ + VPDPage83Data.IdDescriptorType1.IdentifierType = 1; + VPDPage83Data.IdDescriptorType1.IdentifierLength = + sizeof(VPDPage83Data.IdDescriptorType1) - 4; + + /* "ADAPTEC " for adaptec */ + memcpy(VPDPage83Data.IdDescriptorType1.VendId, + "ADAPTEC ", + sizeof(VPDPage83Data.IdDescriptorType1.VendId)); + memcpy(VPDPage83Data.IdDescriptorType1.ProductId, + "ARRAY ", + sizeof( + VPDPage83Data.IdDescriptorType1.ProductId)); + + /* Convert to ascii based serial number. + * The LSB is the the end. + */ + for (i = 0; i < 8; i++) { + u8 temp = + (u8)((get_serial_reply->uid >> ((7 - i) * 4)) & 0xF); + if (temp > 0x9) { + VPDPage83Data.IdDescriptorType1.SerialNumber[i] = + 'A' + (temp - 0xA); + } else { + VPDPage83Data.IdDescriptorType1.SerialNumber[i] = + '0' + temp; + } + } + + /* VpdCodeSetBinary */ + VPDPage83Data.IdDescriptorType2.CodeSet = 1; + /* VpdIdentifierTypeEUI64 */ + VPDPage83Data.IdDescriptorType2.IdentifierType = 2; + VPDPage83Data.IdDescriptorType2.IdentifierLength = + sizeof(VPDPage83Data.IdDescriptorType2) - 4; + + VPDPage83Data.IdDescriptorType2.EU64Id.VendId[0] = 0xD0; + VPDPage83Data.IdDescriptorType2.EU64Id.VendId[1] = 0; + VPDPage83Data.IdDescriptorType2.EU64Id.VendId[2] = 0; + + VPDPage83Data.IdDescriptorType2.EU64Id.Serial = + get_serial_reply->uid; + VPDPage83Data.IdDescriptorType2.EU64Id.Reserved = 0; + + /* Move the inquiry data to the response buffer. */ + scsi_sg_copy_from_buffer(scsicmd, &VPDPage83Data, + sizeof(VPDPage83Data)); + } else { + /* It must be for VPD 0x80 */ + char sp[13]; + /* EVPD bit set */ + sp[0] = INQD_PDT_DA; + sp[1] = scsicmd->cmnd[2]; + sp[2] = 0; + sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X", + le32_to_cpu(get_serial_reply->uid)); + scsi_sg_copy_from_buffer(scsicmd, sp, + sizeof(sp)); + } } scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; @@ -982,7 +1158,8 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3 memset(readcmd2, 0, sizeof(struct aac_raw_io2)); readcmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff)); readcmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); - readcmd2->byteCount = cpu_to_le32(count<<9); + readcmd2->byteCount = cpu_to_le32(count * + dev->fsa_dev[scmd_id(cmd)].block_size); readcmd2->cid = cpu_to_le16(scmd_id(cmd)); readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ); ret = aac_build_sgraw2(cmd, readcmd2, @@ -997,7 +1174,8 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3 readcmd = (struct aac_raw_io *) fib_data(fib); readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff)); readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); - readcmd->count = cpu_to_le32(count<<9); + readcmd->count = cpu_to_le32(count * + dev->fsa_dev[scmd_id(cmd)].block_size); readcmd->cid = cpu_to_le16(scmd_id(cmd)); readcmd->flags = cpu_to_le16(RIO_TYPE_READ); readcmd->bpTotal = 0; @@ -1062,6 +1240,7 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 { u16 fibsize; struct aac_read *readcmd; + struct aac_dev *dev = fib->dev; long ret; aac_fib_init(fib); @@ -1069,7 +1248,8 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 readcmd->command = cpu_to_le32(VM_CtBlockRead); readcmd->cid = cpu_to_le32(scmd_id(cmd)); readcmd->block = cpu_to_le32((u32)(lba&0xffffffff)); - readcmd->count = cpu_to_le32(count * 512); + readcmd->count = cpu_to_le32(count * + dev->fsa_dev[scmd_id(cmd)].block_size); ret = aac_build_sg(cmd, &readcmd->sg); if (ret < 0) @@ -1104,7 +1284,8 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u memset(writecmd2, 0, sizeof(struct aac_raw_io2)); writecmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff)); writecmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); - writecmd2->byteCount = cpu_to_le32(count<<9); + writecmd2->byteCount = cpu_to_le32(count * + dev->fsa_dev[scmd_id(cmd)].block_size); writecmd2->cid = cpu_to_le16(scmd_id(cmd)); writecmd2->flags = (fua && ((aac_cache & 5) != 1) && (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ? @@ -1122,7 +1303,8 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u writecmd = (struct aac_raw_io *) fib_data(fib); writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff)); writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); - writecmd->count = cpu_to_le32(count<<9); + writecmd->count = cpu_to_le32(count * + dev->fsa_dev[scmd_id(cmd)].block_size); writecmd->cid = cpu_to_le16(scmd_id(cmd)); writecmd->flags = (fua && ((aac_cache & 5) != 1) && (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ? @@ -1190,6 +1372,7 @@ static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3 { u16 fibsize; struct aac_write *writecmd; + struct aac_dev *dev = fib->dev; long ret; aac_fib_init(fib); @@ -1197,7 +1380,8 @@ static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3 writecmd->command = cpu_to_le32(VM_CtBlockWrite); writecmd->cid = cpu_to_le32(scmd_id(cmd)); writecmd->block = cpu_to_le32((u32)(lba&0xffffffff)); - writecmd->count = cpu_to_le32(count * 512); + writecmd->count = cpu_to_le32(count * + dev->fsa_dev[scmd_id(cmd)].block_size); writecmd->sg.count = cpu_to_le32(1); /* ->stable is not used - it did mean which type of write */ @@ -2246,9 +2430,10 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) INQD_PDT_PROC : INQD_PDT_DA; if (scsicmd->cmnd[2] == 0) { /* supported vital product data pages */ - arr[3] = 2; + arr[3] = 3; arr[4] = 0x0; arr[5] = 0x80; + arr[6] = 0x83; arr[1] = scsicmd->cmnd[2]; scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data)); @@ -2264,7 +2449,16 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) if (aac_wwn != 2) return aac_get_container_serial( scsicmd); - /* SLES 10 SP1 special */ + scsicmd->result = DID_OK << 16 | + COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; + } else if (scsicmd->cmnd[2] == 0x83) { + /* vpd page 0x83 - Device Identification Page */ + char *sno = (char *)&inq_data; + sno[3] = setinqserial(dev, &sno[4], + scmd_id(scsicmd)); + if (aac_wwn != 2) + return aac_get_container_serial( + scsicmd); scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; } else { @@ -2329,10 +2523,10 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) cp[5] = (capacity >> 16) & 0xff; cp[6] = (capacity >> 8) & 0xff; cp[7] = (capacity >> 0) & 0xff; - cp[8] = 0; - cp[9] = 0; - cp[10] = 2; - cp[11] = 0; + cp[8] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff; + cp[9] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff; + cp[10] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff; + cp[11] = (fsa_dev_ptr[cid].block_size) & 0xff; cp[12] = 0; alloc_len = ((scsicmd->cmnd[10] << 24) @@ -2369,10 +2563,10 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) cp[1] = (capacity >> 16) & 0xff; cp[2] = (capacity >> 8) & 0xff; cp[3] = (capacity >> 0) & 0xff; - cp[4] = 0; - cp[5] = 0; - cp[6] = 2; - cp[7] = 0; + cp[4] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff; + cp[5] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff; + cp[6] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff; + cp[7] = (fsa_dev_ptr[cid].block_size) & 0xff; scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp)); /* Do not cache partition table for arrays */ scsicmd->device->removable = 1; @@ -2385,30 +2579,79 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) case MODE_SENSE: { - char mode_buf[7]; int mode_buf_length = 4; + u32 capacity; + aac_modep_data mpd; + + if (fsa_dev_ptr[cid].size <= 0x100000000ULL) + capacity = fsa_dev_ptr[cid].size - 1; + else + capacity = (u32)-1; dprintk((KERN_DEBUG "MODE SENSE command.\n")); - mode_buf[0] = 3; /* Mode data length */ - mode_buf[1] = 0; /* Medium type - default */ - mode_buf[2] = 0; /* Device-specific param, - bit 8: 0/1 = write enabled/protected - bit 4: 0/1 = FUA enabled */ + memset((char *)&mpd, 0, sizeof(aac_modep_data)); + + /* Mode data length */ + mpd.hd.data_length = sizeof(mpd.hd) - 1; + /* Medium type - default */ + mpd.hd.med_type = 0; + /* Device-specific param, + bit 8: 0/1 = write enabled/protected + bit 4: 0/1 = FUA enabled */ + mpd.hd.dev_par = 0; + if (dev->raw_io_interface && ((aac_cache & 5) != 1)) - mode_buf[2] = 0x10; - mode_buf[3] = 0; /* Block descriptor length */ + mpd.hd.dev_par = 0x10; + if (scsicmd->cmnd[1] & 0x8) + mpd.hd.bd_length = 0; /* Block descriptor length */ + else { + mpd.hd.bd_length = sizeof(mpd.bd); + mpd.hd.data_length += mpd.hd.bd_length; + mpd.bd.block_length[0] = + (fsa_dev_ptr[cid].block_size >> 16) & 0xff; + mpd.bd.block_length[1] = + (fsa_dev_ptr[cid].block_size >> 8) & 0xff; + mpd.bd.block_length[2] = + fsa_dev_ptr[cid].block_size & 0xff; + + mpd.mpc_buf[0] = scsicmd->cmnd[2]; + if (scsicmd->cmnd[2] == 0x1C) { + /* page length */ + mpd.mpc_buf[1] = 0xa; + /* Mode data length */ + mpd.hd.data_length = 23; + } else { + /* Mode data length */ + mpd.hd.data_length = 15; + } + + if (capacity > 0xffffff) { + mpd.bd.block_count[0] = 0xff; + mpd.bd.block_count[1] = 0xff; + mpd.bd.block_count[2] = 0xff; + } else { + mpd.bd.block_count[0] = (capacity >> 16) & 0xff; + mpd.bd.block_count[1] = (capacity >> 8) & 0xff; + mpd.bd.block_count[2] = capacity & 0xff; + } + } if (((scsicmd->cmnd[2] & 0x3f) == 8) || ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) { - mode_buf[0] = 6; - mode_buf[4] = 8; - mode_buf[5] = 1; - mode_buf[6] = ((aac_cache & 6) == 2) + mpd.hd.data_length += 3; + mpd.mpc_buf[0] = 8; + mpd.mpc_buf[1] = 1; + mpd.mpc_buf[2] = ((aac_cache & 6) == 2) ? 0 : 0x04; /* WCE */ - mode_buf_length = 7; - if (mode_buf_length > scsicmd->cmnd[4]) - mode_buf_length = scsicmd->cmnd[4]; + mode_buf_length = sizeof(mpd); } - scsi_sg_copy_from_buffer(scsicmd, mode_buf, mode_buf_length); + + if (mode_buf_length > scsicmd->cmnd[4]) + mode_buf_length = scsicmd->cmnd[4]; + else + mode_buf_length = sizeof(mpd); + scsi_sg_copy_from_buffer(scsicmd, + (char *)&mpd, + mode_buf_length); scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; scsicmd->scsi_done(scsicmd); @@ -2416,34 +2659,77 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) } case MODE_SENSE_10: { - char mode_buf[11]; + u32 capacity; int mode_buf_length = 8; + aac_modep10_data mpd10; + + if (fsa_dev_ptr[cid].size <= 0x100000000ULL) + capacity = fsa_dev_ptr[cid].size - 1; + else + capacity = (u32)-1; dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n")); - mode_buf[0] = 0; /* Mode data length (MSB) */ - mode_buf[1] = 6; /* Mode data length (LSB) */ - mode_buf[2] = 0; /* Medium type - default */ - mode_buf[3] = 0; /* Device-specific param, - bit 8: 0/1 = write enabled/protected - bit 4: 0/1 = FUA enabled */ + memset((char *)&mpd10, 0, sizeof(aac_modep10_data)); + /* Mode data length (MSB) */ + mpd10.hd.data_length[0] = 0; + /* Mode data length (LSB) */ + mpd10.hd.data_length[1] = sizeof(mpd10.hd) - 1; + /* Medium type - default */ + mpd10.hd.med_type = 0; + /* Device-specific param, + bit 8: 0/1 = write enabled/protected + bit 4: 0/1 = FUA enabled */ + mpd10.hd.dev_par = 0; + if (dev->raw_io_interface && ((aac_cache & 5) != 1)) - mode_buf[3] = 0x10; - mode_buf[4] = 0; /* reserved */ - mode_buf[5] = 0; /* reserved */ - mode_buf[6] = 0; /* Block descriptor length (MSB) */ - mode_buf[7] = 0; /* Block descriptor length (LSB) */ + mpd10.hd.dev_par = 0x10; + mpd10.hd.rsrvd[0] = 0; /* reserved */ + mpd10.hd.rsrvd[1] = 0; /* reserved */ + if (scsicmd->cmnd[1] & 0x8) { + /* Block descriptor length (MSB) */ + mpd10.hd.bd_length[0] = 0; + /* Block descriptor length (LSB) */ + mpd10.hd.bd_length[1] = 0; + } else { + mpd10.hd.bd_length[0] = 0; + mpd10.hd.bd_length[1] = sizeof(mpd10.bd); + + mpd10.hd.data_length[1] += mpd10.hd.bd_length[1]; + + mpd10.bd.block_length[0] = + (fsa_dev_ptr[cid].block_size >> 16) & 0xff; + mpd10.bd.block_length[1] = + (fsa_dev_ptr[cid].block_size >> 8) & 0xff; + mpd10.bd.block_length[2] = + fsa_dev_ptr[cid].block_size & 0xff; + + if (capacity > 0xffffff) { + mpd10.bd.block_count[0] = 0xff; + mpd10.bd.block_count[1] = 0xff; + mpd10.bd.block_count[2] = 0xff; + } else { + mpd10.bd.block_count[0] = + (capacity >> 16) & 0xff; + mpd10.bd.block_count[1] = + (capacity >> 8) & 0xff; + mpd10.bd.block_count[2] = + capacity & 0xff; + } + } if (((scsicmd->cmnd[2] & 0x3f) == 8) || ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) { - mode_buf[1] = 9; - mode_buf[8] = 8; - mode_buf[9] = 1; - mode_buf[10] = ((aac_cache & 6) == 2) + mpd10.hd.data_length[1] += 3; + mpd10.mpc_buf[0] = 8; + mpd10.mpc_buf[1] = 1; + mpd10.mpc_buf[2] = ((aac_cache & 6) == 2) ? 0 : 0x04; /* WCE */ - mode_buf_length = 11; + mode_buf_length = sizeof(mpd10); if (mode_buf_length > scsicmd->cmnd[8]) mode_buf_length = scsicmd->cmnd[8]; } - scsi_sg_copy_from_buffer(scsicmd, mode_buf, mode_buf_length); + scsi_sg_copy_from_buffer(scsicmd, + (char *)&mpd10, + mode_buf_length); scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; scsicmd->scsi_done(scsicmd); diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index eaaf8705a5f4..40fe65c91b41 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -6,13 +6,63 @@ #define nblank(x) _nblank(x)[0] #include <linux/interrupt.h> +#include <linux/pci.h> /*------------------------------------------------------------------------------ * D E F I N E S *----------------------------------------------------------------------------*/ +#define AAC_MAX_MSIX 8 /* vectors */ +#define AAC_PCI_MSI_ENABLE 0x8000 + +enum { + AAC_ENABLE_INTERRUPT = 0x0, + AAC_DISABLE_INTERRUPT, + AAC_ENABLE_MSIX, + AAC_DISABLE_MSIX, + AAC_CLEAR_AIF_BIT, + AAC_CLEAR_SYNC_BIT, + AAC_ENABLE_INTX +}; + +#define AAC_INT_MODE_INTX (1<<0) +#define AAC_INT_MODE_MSI (1<<1) +#define AAC_INT_MODE_AIF (1<<2) +#define AAC_INT_MODE_SYNC (1<<3) + +#define AAC_INT_ENABLE_TYPE1_INTX 0xfffffffb +#define AAC_INT_ENABLE_TYPE1_MSIX 0xfffffffa +#define AAC_INT_DISABLE_ALL 0xffffffff + +/* Bit definitions in IOA->Host Interrupt Register */ +#define PMC_TRANSITION_TO_OPERATIONAL (1<<31) +#define PMC_IOARCB_TRANSFER_FAILED (1<<28) +#define PMC_IOA_UNIT_CHECK (1<<27) +#define PMC_NO_HOST_RRQ_FOR_CMD_RESPONSE (1<<26) +#define PMC_CRITICAL_IOA_OP_IN_PROGRESS (1<<25) +#define PMC_IOARRIN_LOST (1<<4) +#define PMC_SYSTEM_BUS_MMIO_ERROR (1<<3) +#define PMC_IOA_PROCESSOR_IN_ERROR_STATE (1<<2) +#define PMC_HOST_RRQ_VALID (1<<1) +#define PMC_OPERATIONAL_STATUS (1<<31) +#define PMC_ALLOW_MSIX_VECTOR0 (1<<0) + +#define PMC_IOA_ERROR_INTERRUPTS (PMC_IOARCB_TRANSFER_FAILED | \ + PMC_IOA_UNIT_CHECK | \ + PMC_NO_HOST_RRQ_FOR_CMD_RESPONSE | \ + PMC_IOARRIN_LOST | \ + PMC_SYSTEM_BUS_MMIO_ERROR | \ + PMC_IOA_PROCESSOR_IN_ERROR_STATE) + +#define PMC_ALL_INTERRUPT_BITS (PMC_IOA_ERROR_INTERRUPTS | \ + PMC_HOST_RRQ_VALID | \ + PMC_TRANSITION_TO_OPERATIONAL | \ + PMC_ALLOW_MSIX_VECTOR0) +#define PMC_GLOBAL_INT_BIT2 0x00000004 +#define PMC_GLOBAL_INT_BIT0 0x00000001 + #ifndef AAC_DRIVER_BUILD -# define AAC_DRIVER_BUILD 30300 +# define AAC_DRIVER_BUILD 40709 # define AAC_DRIVER_BRANCH "-ms" #endif #define MAXIMUM_NUM_CONTAINERS 32 @@ -36,6 +86,7 @@ #define CONTAINER_TO_ID(cont) (cont) #define CONTAINER_TO_LUN(cont) (0) +#define PMC_DEVICE_S6 0x28b #define PMC_DEVICE_S7 0x28c #define PMC_DEVICE_S8 0x28d #define PMC_DEVICE_S9 0x28f @@ -434,7 +485,7 @@ enum fib_xfer_state { struct aac_init { __le32 InitStructRevision; - __le32 MiniPortRevision; + __le32 Sa_MSIXVectors; __le32 fsrev; __le32 CommHeaderAddress; __le32 FastIoCommAreaAddress; @@ -582,7 +633,8 @@ struct aac_queue { spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */ struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */ /* only valid for command queues which receive entries from the adapter. */ - u32 numpending; /* Number of entries on outstanding queue. */ + /* Number of entries on outstanding queue. */ + atomic_t numpending; struct aac_dev * dev; /* Back pointer to adapter structure */ }; @@ -755,7 +807,8 @@ struct rkt_registers { struct src_mu_registers { /* PCI*| Name */ - __le32 reserved0[8]; /* 00h | Reserved */ + __le32 reserved0[6]; /* 00h | Reserved */ + __le32 IOAR[2]; /* 18h | IOA->host interrupt register */ __le32 IDR; /* 20h | Inbound Doorbell Register */ __le32 IISR; /* 24h | Inbound Int. Status Register */ __le32 reserved1[3]; /* 28h | Reserved */ @@ -767,17 +820,18 @@ struct src_mu_registers { __le32 OMR; /* bch | Outbound Message Register */ __le32 IQ_L; /* c0h | Inbound Queue (Low address) */ __le32 IQ_H; /* c4h | Inbound Queue (High address) */ + __le32 ODR_MSI; /* c8h | MSI register for sync./AIF */ }; struct src_registers { - struct src_mu_registers MUnit; /* 00h - c7h */ + struct src_mu_registers MUnit; /* 00h - cbh */ union { struct { - __le32 reserved1[130790]; /* c8h - 7fc5fh */ + __le32 reserved1[130789]; /* cch - 7fc5fh */ struct src_inbound IndexRegs; /* 7fc60h */ } tupelo; struct { - __le32 reserved1[974]; /* c8h - fffh */ + __le32 reserved1[973]; /* cch - fffh */ struct src_inbound IndexRegs; /* 1000h */ } denali; } u; @@ -857,6 +911,7 @@ struct fsa_dev_info { u8 deleted; char devname[8]; struct sense_data sense_data; + u32 block_size; }; struct fib { @@ -960,6 +1015,10 @@ struct aac_supplement_adapter_info #define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002) #define AAC_OPTION_POWER_MANAGEMENT cpu_to_le32(0x00000004) #define AAC_OPTION_DOORBELL_RESET cpu_to_le32(0x00004000) +/* 4KB sector size */ +#define AAC_OPTION_VARIABLE_BLOCK_SIZE cpu_to_le32(0x00040000) +/* 240 simple volume support */ +#define AAC_OPTION_SUPPORTED_240_VOLUMES cpu_to_le32(0x10000000) #define AAC_SIS_VERSION_V3 3 #define AAC_SIS_SLOT_UNKNOWN 0xFF @@ -1026,6 +1085,11 @@ struct aac_bus_info_response { #define AAC_OPT_NEW_COMM_TYPE3 cpu_to_le32(1<<30) #define AAC_OPT_NEW_COMM_TYPE4 cpu_to_le32(1<<31) +/* MSIX context */ +struct aac_msix_ctx { + int vector_no; + struct aac_dev *dev; +}; struct aac_dev { @@ -1081,8 +1145,10 @@ struct aac_dev * if AAC_COMM_MESSAGE_TYPE1 */ dma_addr_t host_rrq_pa; /* phys. address */ - u32 host_rrq_idx; /* index into rrq buffer */ - + /* index into rrq buffer */ + u32 host_rrq_idx[AAC_MAX_MSIX]; + atomic_t rrq_outstanding[AAC_MAX_MSIX]; + u32 fibs_pushed_no; struct pci_dev *pdev; /* Our PCI interface */ void * printfbuf; /* pointer to buffer used for printf's from the adapter */ void * comm_addr; /* Base address of Comm area */ @@ -1151,6 +1217,13 @@ struct aac_dev int sync_mode; struct fib *sync_fib; struct list_head sync_fib_list; + u32 doorbell_mask; + u32 max_msix; /* max. MSI-X vectors */ + u32 vector_cap; /* MSI-X vector capab.*/ + int msi_enabled; /* MSI/MSI-X enabled */ + struct msix_entry msixentry[AAC_MAX_MSIX]; + struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */ + u8 adapter_shutdown; }; #define aac_adapter_interrupt(dev) \ @@ -1589,6 +1662,7 @@ struct aac_srb_reply #define VM_CtHostWrite64 20 #define VM_DrvErrTblLog 21 #define VM_NameServe64 22 +#define VM_NameServeAllBlk 30 #define MAX_VMCOMMAND_NUM 23 /* used for sizing stats array - leave last */ @@ -1611,8 +1685,13 @@ struct aac_fsinfo { __le32 fsInodeDensity; }; /* valid iff ObjType == FT_FILESYS && !(ContentState & FSCS_NOTCLEAN) */ +struct aac_blockdevinfo { + __le32 block_size; +}; + union aac_contentinfo { - struct aac_fsinfo filesys; /* valid iff ObjType == FT_FILESYS && !(ContentState & FSCS_NOTCLEAN) */ + struct aac_fsinfo filesys; + struct aac_blockdevinfo bdevinfo; }; /* @@ -1677,6 +1756,7 @@ struct aac_get_container_count_resp { __le32 MaxContainers; __le32 ContainerSwitchEntries; __le32 MaxPartitions; + __le32 MaxSimpleVolumes; }; @@ -1951,6 +2031,8 @@ extern struct aac_common aac_config; #define AifEnEnclosureManagement 13 /* EM_DRIVE_* */ #define EM_DRIVE_INSERTION 31 #define EM_DRIVE_REMOVAL 32 +#define EM_SES_DRIVE_INSERTION 33 +#define EM_SES_DRIVE_REMOVAL 26 #define AifEnBatteryEvent 14 /* Change in Battery State */ #define AifEnAddContainer 15 /* A new array was created */ #define AifEnDeleteContainer 16 /* A container was deleted */ @@ -1983,6 +2065,9 @@ extern struct aac_common aac_config; /* PMC NEW COMM: Request the event data */ #define AifReqEvent 200 +/* RAW device deleted */ +#define AifRawDeviceRemove 203 + /* * Adapter Initiated FIB command structures. Start with the adapter * initiated FIBs that really come from the adapter, and get responded @@ -2025,6 +2110,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum); int aac_fib_complete(struct fib * context); #define fib_data(fibctx) ((void *)(fibctx)->hw_fib_va->data) struct aac_dev *aac_init_adapter(struct aac_dev *dev); +void aac_src_access_devreg(struct aac_dev *dev, int mode); int aac_get_config_status(struct aac_dev *dev, int commit_flag); int aac_get_containers(struct aac_dev *dev); int aac_scsi_cmd(struct scsi_cmnd *cmd); diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index fbcd48d0bfc3..54195a117f72 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -689,7 +689,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) kfree (usg); } srbcmd->count = cpu_to_le32(byte_count); - psg->count = cpu_to_le32(sg_indx+1); + if (user_srbcmd->sg.count) + psg->count = cpu_to_le32(sg_indx+1); + else + psg->count = 0; status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL); } else { struct user_sgmap* upsg = &user_srbcmd->sg; @@ -775,7 +778,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) } } srbcmd->count = cpu_to_le32(byte_count); - psg->count = cpu_to_le32(sg_indx+1); + if (user_srbcmd->sg.count) + psg->count = cpu_to_le32(sg_indx+1); + else + psg->count = 0; status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); } if (status == -ERESTARTSYS) { diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 177b094c7792..45db84ad322f 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -43,6 +43,8 @@ #include "aacraid.h" +static void aac_define_int_mode(struct aac_dev *dev); + struct aac_common aac_config = { .irq_mod = 1 }; @@ -51,7 +53,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co { unsigned char *base; unsigned long size, align; - const unsigned long fibsize = 4096; + const unsigned long fibsize = dev->max_fib_size; const unsigned long printfbufsiz = 256; unsigned long host_rrq_size = 0; struct aac_init *init; @@ -91,7 +93,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION); if (dev->max_fib_size != sizeof(struct hw_fib)) init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4); - init->MiniPortRevision = cpu_to_le32(Sa_MINIPORT_REVISION); + init->Sa_MSIXVectors = cpu_to_le32(Sa_MINIPORT_REVISION); init->fsrev = cpu_to_le32(dev->fsrev); /* @@ -140,7 +142,8 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co INITFLAGS_NEW_COMM_TYPE2_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED); init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32)); init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff)); - init->MiniPortRevision = cpu_to_le32(0L); /* number of MSI-X */ + /* number of MSI-X */ + init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix); dprintk((KERN_WARNING"aacraid: New Comm Interface type2 enabled\n")); } @@ -179,7 +182,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize) { - q->numpending = 0; + atomic_set(&q->numpending, 0); q->dev = dev; init_waitqueue_head(&q->cmdready); INIT_LIST_HEAD(&q->cmdq); @@ -228,6 +231,12 @@ int aac_send_shutdown(struct aac_dev * dev) /* FIB should be freed only after getting the response from the F/W */ if (status != -ERESTARTSYS) aac_fib_free(fibctx); + dev->adapter_shutdown = 1; + if ((dev->pdev->device == PMC_DEVICE_S7 || + dev->pdev->device == PMC_DEVICE_S8 || + dev->pdev->device == PMC_DEVICE_S9) && + dev->msi_enabled) + aac_src_access_devreg(dev, AAC_ENABLE_INTX); return status; } @@ -350,8 +359,10 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) dev->raw_io_interface = dev->raw_io_64 = 0; if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, - 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) && + 0, 0, 0, 0, 0, 0, + status+0, status+1, status+2, status+3, NULL)) && (status[0] == 0x00000001)) { + dev->doorbell_mask = status[3]; if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64)) dev->raw_io_64 = 1; dev->sync_mode = aac_sync_mode; @@ -388,6 +399,9 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) } } } + dev->max_msix = 0; + dev->msi_enabled = 0; + dev->adapter_shutdown = 0; if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS, 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, status+3, status+4)) @@ -461,6 +475,11 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) if (host->can_queue > AAC_NUM_IO_FIB) host->can_queue = AAC_NUM_IO_FIB; + if (dev->pdev->device == PMC_DEVICE_S6 || + dev->pdev->device == PMC_DEVICE_S7 || + dev->pdev->device == PMC_DEVICE_S8 || + dev->pdev->device == PMC_DEVICE_S9) + aac_define_int_mode(dev); /* * Ok now init the communication subsystem */ @@ -489,4 +508,79 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) return dev; } - +static void aac_define_int_mode(struct aac_dev *dev) +{ + + int i, msi_count; + + msi_count = i = 0; + /* max. vectors from GET_COMM_PREFERRED_SETTINGS */ + if (dev->max_msix == 0 || + dev->pdev->device == PMC_DEVICE_S6 || + dev->sync_mode) { + dev->max_msix = 1; + dev->vector_cap = + dev->scsi_host_ptr->can_queue + + AAC_NUM_MGT_FIB; + return; + } + + msi_count = min(dev->max_msix, + (unsigned int)num_online_cpus()); + + dev->max_msix = msi_count; + + if (msi_count > AAC_MAX_MSIX) + msi_count = AAC_MAX_MSIX; + + for (i = 0; i < msi_count; i++) + dev->msixentry[i].entry = i; + + if (msi_count > 1 && + pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) { + i = pci_enable_msix(dev->pdev, + dev->msixentry, + msi_count); + /* Check how many MSIX vectors are allocated */ + if (i >= 0) { + dev->msi_enabled = 1; + if (i) { + msi_count = i; + if (pci_enable_msix(dev->pdev, + dev->msixentry, + msi_count)) { + dev->msi_enabled = 0; + printk(KERN_ERR "%s%d: MSIX not supported!! Will try MSI 0x%x.\n", + dev->name, dev->id, i); + } + } + } else { + dev->msi_enabled = 0; + printk(KERN_ERR "%s%d: MSIX not supported!! Will try MSI 0x%x.\n", + dev->name, dev->id, i); + } + } + + if (!dev->msi_enabled) { + msi_count = 1; + i = pci_enable_msi(dev->pdev); + + if (!i) { + dev->msi_enabled = 1; + dev->msi = 1; + } else { + printk(KERN_ERR "%s%d: MSI not supported!! Will try INTx 0x%x.\n", + dev->name, dev->id, i); + } + } + + if (!dev->msi_enabled) + dev->max_msix = msi_count = 1; + else { + if (dev->max_msix > msi_count) + dev->max_msix = msi_count; + } + dev->vector_cap = + (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) / + msi_count; +} diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index cab190af6345..4da574925284 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -208,14 +208,10 @@ struct fib *aac_fib_alloc(struct aac_dev *dev) void aac_fib_free(struct fib *fibptr) { - unsigned long flags, flagsv; + unsigned long flags; - spin_lock_irqsave(&fibptr->event_lock, flagsv); - if (fibptr->done == 2) { - spin_unlock_irqrestore(&fibptr->event_lock, flagsv); + if (fibptr->done == 2) return; - } - spin_unlock_irqrestore(&fibptr->event_lock, flagsv); spin_lock_irqsave(&fibptr->dev->fib_lock, flags); if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) @@ -321,7 +317,7 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr /* Queue is full */ if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { printk(KERN_WARNING "Queue %d full, %u outstanding.\n", - qid, q->numpending); + qid, atomic_read(&q->numpending)); return 0; } else { *entry = q->base + *index; @@ -414,7 +410,6 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, struct aac_dev * dev = fibptr->dev; struct hw_fib * hw_fib = fibptr->hw_fib_va; unsigned long flags = 0; - unsigned long qflags; unsigned long mflags = 0; unsigned long sflags = 0; @@ -568,9 +563,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, int blink; if (time_is_before_eq_jiffies(timeout)) { struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue]; - spin_lock_irqsave(q->lock, qflags); - q->numpending--; - spin_unlock_irqrestore(q->lock, qflags); + atomic_dec(&q->numpending); if (wait == -1) { printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n" "Usually a result of a PCI interrupt routing problem;\n" @@ -775,7 +768,6 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) int aac_fib_complete(struct fib *fibptr) { - unsigned long flags; struct hw_fib * hw_fib = fibptr->hw_fib_va; /* @@ -798,12 +790,6 @@ int aac_fib_complete(struct fib *fibptr) * command is complete that we had sent to the adapter and this * cdb could be reused. */ - spin_lock_irqsave(&fibptr->event_lock, flags); - if (fibptr->done == 2) { - spin_unlock_irqrestore(&fibptr->event_lock, flags); - return 0; - } - spin_unlock_irqrestore(&fibptr->event_lock, flags); if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) @@ -868,7 +854,7 @@ void aac_printf(struct aac_dev *dev, u32 val) * dispatches it to the appropriate routine for handling. */ -#define AIF_SNIFF_TIMEOUT (30*HZ) +#define AIF_SNIFF_TIMEOUT (500*HZ) static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) { struct hw_fib * hw_fib = fibptr->hw_fib_va; @@ -897,6 +883,39 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) switch (le32_to_cpu(aifcmd->command)) { case AifCmdDriverNotify: switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) { + case AifRawDeviceRemove: + container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); + if ((container >> 28)) { + container = (u32)-1; + break; + } + channel = (container >> 24) & 0xF; + if (channel >= dev->maximum_num_channels) { + container = (u32)-1; + break; + } + id = container & 0xFFFF; + if (id >= dev->maximum_num_physicals) { + container = (u32)-1; + break; + } + lun = (container >> 16) & 0xFF; + container = (u32)-1; + channel = aac_phys_to_logical(channel); + device_config_needed = + (((__le32 *)aifcmd->data)[0] == + cpu_to_le32(AifRawDeviceRemove)) ? DELETE : ADD; + + if (device_config_needed == ADD) { + device = scsi_device_lookup( + dev->scsi_host_ptr, + channel, id, lun); + if (device) { + scsi_remove_device(device); + scsi_device_put(device); + } + } + break; /* * Morph or Expand complete */ @@ -1044,6 +1063,8 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) { case EM_DRIVE_INSERTION: case EM_DRIVE_REMOVAL: + case EM_SES_DRIVE_INSERTION: + case EM_SES_DRIVE_REMOVAL: container = le32_to_cpu( ((__le32 *)aifcmd->data)[2]); if ((container >> 28)) { @@ -1069,8 +1090,10 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) } channel = aac_phys_to_logical(channel); device_config_needed = - (((__le32 *)aifcmd->data)[3] - == cpu_to_le32(EM_DRIVE_INSERTION)) ? + ((((__le32 *)aifcmd->data)[3] + == cpu_to_le32(EM_DRIVE_INSERTION)) || + (((__le32 *)aifcmd->data)[3] + == cpu_to_le32(EM_SES_DRIVE_INSERTION))) ? ADD : DELETE; break; } @@ -1247,12 +1270,13 @@ retry_next: static int _aac_reset_adapter(struct aac_dev *aac, int forced) { int index, quirks; - int retval; + int retval, i; struct Scsi_Host *host; struct scsi_device *dev; struct scsi_cmnd *command; struct scsi_cmnd *command_list; int jafo = 0; + int cpu; /* * Assumptions: @@ -1315,7 +1339,33 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced) aac->comm_phys = 0; kfree(aac->queues); aac->queues = NULL; - free_irq(aac->pdev->irq, aac); + cpu = cpumask_first(cpu_online_mask); + if (aac->pdev->device == PMC_DEVICE_S6 || + aac->pdev->device == PMC_DEVICE_S7 || + aac->pdev->device == PMC_DEVICE_S8 || + aac->pdev->device == PMC_DEVICE_S9) { + if (aac->max_msix > 1) { + for (i = 0; i < aac->max_msix; i++) { + if (irq_set_affinity_hint( + aac->msixentry[i].vector, + NULL)) { + printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n", + aac->name, + aac->id, + cpu); + } + cpu = cpumask_next(cpu, + cpu_online_mask); + free_irq(aac->msixentry[i].vector, + &(aac->aac_msix[i])); + } + pci_disable_msix(aac->pdev); + } else { + free_irq(aac->pdev->irq, &(aac->aac_msix[0])); + } + } else { + free_irq(aac->pdev->irq, aac); + } if (aac->msi) pci_disable_msi(aac->pdev); kfree(aac->fsa_dev); diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index d81b2810f0f7..da9d9936e995 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c @@ -84,7 +84,7 @@ unsigned int aac_response_normal(struct aac_queue * q) * continue. The caller has already been notified that * the fib timed out. */ - dev->queues->queue[AdapNormCmdQueue].numpending--; + atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending); if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { spin_unlock_irqrestore(q->lock, flags); @@ -354,7 +354,7 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, * continue. The caller has already been notified that * the fib timed out. */ - dev->queues->queue[AdapNormCmdQueue].numpending--; + atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending); if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { aac_fib_complete(fib); @@ -389,8 +389,13 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, * NOTE: we cannot touch the fib after this * call, because it may have been deallocated. */ - fib->flags &= FIB_CONTEXT_FLAG_FASTRESP; - fib->callback(fib->callback_data, fib); + if (likely(fib->callback && fib->callback_data)) { + fib->flags &= FIB_CONTEXT_FLAG_FASTRESP; + fib->callback(fib->callback_data, fib); + } else { + aac_fib_complete(fib); + aac_fib_free(fib); + } } else { unsigned long flagv; dprintk((KERN_INFO "event_wait up\n")); diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index fdcdf9f781bc..9eec02733c86 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -56,7 +56,7 @@ #include "aacraid.h" -#define AAC_DRIVER_VERSION "1.2-0" +#define AAC_DRIVER_VERSION "1.2-1" #ifndef AAC_DRIVER_BRANCH #define AAC_DRIVER_BRANCH "" #endif @@ -251,27 +251,15 @@ static struct aac_driver_ident aac_drivers[] = { * TODO: unify with aac_scsi_cmd(). */ -static int aac_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) +static int aac_queuecommand(struct Scsi_Host *shost, + struct scsi_cmnd *cmd) { - struct Scsi_Host *host = cmd->device->host; - struct aac_dev *dev = (struct aac_dev *)host->hostdata; - u32 count = 0; - cmd->scsi_done = done; - for (; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) { - struct fib * fib = &dev->fibs[count]; - struct scsi_cmnd * command; - if (fib->hw_fib_va->header.XferState && - ((command = fib->callback_data)) && - (command == cmd) && - (cmd->SCp.phase == AAC_OWNER_FIRMWARE)) - return 0; /* Already owned by Adapter */ - } + int r = 0; cmd->SCp.phase = AAC_OWNER_LOWLEVEL; - return (aac_scsi_cmd(cmd) ? FAILED : 0); + r = (aac_scsi_cmd(cmd) ? FAILED : 0); + return r; } -static DEF_SCSI_QCMD(aac_queuecommand) - /** * aac_info - Returns the host adapter name * @shost: Scsi host to report on @@ -713,7 +701,9 @@ static long aac_cfg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; - if (!capable(CAP_SYS_RAWIO)) + struct aac_dev *aac; + aac = (struct aac_dev *)file->private_data; + if (!capable(CAP_SYS_RAWIO) || aac->adapter_shutdown) return -EPERM; mutex_lock(&aac_mutex); ret = aac_do_ioctl(file->private_data, cmd, (void __user *)arg); @@ -1082,6 +1072,9 @@ static struct scsi_host_template aac_driver_template = { static void __aac_shutdown(struct aac_dev * aac) { + int i; + int cpu; + if (aac->aif_thread) { int i; /* Clear out events first */ @@ -1095,9 +1088,37 @@ static void __aac_shutdown(struct aac_dev * aac) } aac_send_shutdown(aac); aac_adapter_disable_int(aac); - free_irq(aac->pdev->irq, aac); + cpu = cpumask_first(cpu_online_mask); + if (aac->pdev->device == PMC_DEVICE_S6 || + aac->pdev->device == PMC_DEVICE_S7 || + aac->pdev->device == PMC_DEVICE_S8 || + aac->pdev->device == PMC_DEVICE_S9) { + if (aac->max_msix > 1) { + for (i = 0; i < aac->max_msix; i++) { + if (irq_set_affinity_hint( + aac->msixentry[i].vector, + NULL)) { + printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n", + aac->name, + aac->id, + cpu); + } + cpu = cpumask_next(cpu, + cpu_online_mask); + free_irq(aac->msixentry[i].vector, + &(aac->aac_msix[i])); + } + } else { + free_irq(aac->pdev->irq, + &(aac->aac_msix[0])); + } + } else { + free_irq(aac->pdev->irq, aac); + } if (aac->msi) pci_disable_msi(aac->pdev); + else if (aac->max_msix > 1) + pci_disable_msix(aac->pdev); } static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c index 5c6a8703f535..9570612b80ce 100644 --- a/drivers/scsi/aacraid/rx.c +++ b/drivers/scsi/aacraid/rx.c @@ -400,16 +400,13 @@ int aac_rx_deliver_producer(struct fib * fib) { struct aac_dev *dev = fib->dev; struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; - unsigned long qflags; u32 Index; unsigned long nointr = 0; - spin_lock_irqsave(q->lock, qflags); aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib_va, 1, fib, &nointr); - q->numpending++; + atomic_inc(&q->numpending); *(q->headers.producer) = cpu_to_le32(Index + 1); - spin_unlock_irqrestore(q->lock, qflags); if (!(nointr & aac_config.irq_mod)) aac_adapter_notify(dev, AdapNormCmdQueue); @@ -426,15 +423,12 @@ static int aac_rx_deliver_message(struct fib * fib) { struct aac_dev *dev = fib->dev; struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; - unsigned long qflags; u32 Index; u64 addr; volatile void __iomem *device; unsigned long count = 10000000L; /* 50 seconds */ - spin_lock_irqsave(q->lock, qflags); - q->numpending++; - spin_unlock_irqrestore(q->lock, qflags); + atomic_inc(&q->numpending); for(;;) { Index = rx_readl(dev, MUnit.InboundQueue); if (unlikely(Index == 0xFFFFFFFFL)) @@ -442,9 +436,7 @@ static int aac_rx_deliver_message(struct fib * fib) if (likely(Index != 0xFFFFFFFFL)) break; if (--count == 0) { - spin_lock_irqsave(q->lock, qflags); - q->numpending--; - spin_unlock_irqrestore(q->lock, qflags); + atomic_dec(&q->numpending); return -ETIMEDOUT; } udelay(5); diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 9c65aed26212..4596e9dd757c 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -44,98 +44,128 @@ #include "aacraid.h" -static irqreturn_t aac_src_intr_message(int irq, void *dev_id) +static int aac_src_get_sync_status(struct aac_dev *dev); + +irqreturn_t aac_src_intr_message(int irq, void *dev_id) { - struct aac_dev *dev = dev_id; + struct aac_msix_ctx *ctx; + struct aac_dev *dev; unsigned long bellbits, bellbits_shifted; - int our_interrupt = 0; - int isFastResponse; + int vector_no; + int isFastResponse, mode; u32 index, handle; - bellbits = src_readl(dev, MUnit.ODR_R); - if (bellbits & PmDoorBellResponseSent) { - bellbits = PmDoorBellResponseSent; - /* handle async. status */ - src_writel(dev, MUnit.ODR_C, bellbits); - src_readl(dev, MUnit.ODR_C); - our_interrupt = 1; - index = dev->host_rrq_idx; - for (;;) { - isFastResponse = 0; - /* remove toggle bit (31) */ - handle = le32_to_cpu(dev->host_rrq[index]) & 0x7fffffff; - /* check fast response bit (30) */ - if (handle & 0x40000000) - isFastResponse = 1; - handle &= 0x0000ffff; - if (handle == 0) - break; - - aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL); - - dev->host_rrq[index++] = 0; - if (index == dev->scsi_host_ptr->can_queue + - AAC_NUM_MGT_FIB) - index = 0; - dev->host_rrq_idx = index; + ctx = (struct aac_msix_ctx *)dev_id; + dev = ctx->dev; + vector_no = ctx->vector_no; + + if (dev->msi_enabled) { + mode = AAC_INT_MODE_MSI; + if (vector_no == 0) { + bellbits = src_readl(dev, MUnit.ODR_MSI); + if (bellbits & 0x40000) + mode |= AAC_INT_MODE_AIF; + if (bellbits & 0x1000) + mode |= AAC_INT_MODE_SYNC; } } else { - bellbits_shifted = (bellbits >> SRC_ODR_SHIFT); - if (bellbits_shifted & DoorBellAifPending) { + mode = AAC_INT_MODE_INTX; + bellbits = src_readl(dev, MUnit.ODR_R); + if (bellbits & PmDoorBellResponseSent) { + bellbits = PmDoorBellResponseSent; + src_writel(dev, MUnit.ODR_C, bellbits); + src_readl(dev, MUnit.ODR_C); + } else { + bellbits_shifted = (bellbits >> SRC_ODR_SHIFT); src_writel(dev, MUnit.ODR_C, bellbits); src_readl(dev, MUnit.ODR_C); - our_interrupt = 1; - /* handle AIF */ - aac_intr_normal(dev, 0, 2, 0, NULL); - } else if (bellbits_shifted & OUTBOUNDDOORBELL_0) { - unsigned long sflags; - struct list_head *entry; - int send_it = 0; - extern int aac_sync_mode; + if (bellbits_shifted & DoorBellAifPending) + mode |= AAC_INT_MODE_AIF; + else if (bellbits_shifted & OUTBOUNDDOORBELL_0) + mode |= AAC_INT_MODE_SYNC; + } + } + + if (mode & AAC_INT_MODE_SYNC) { + unsigned long sflags; + struct list_head *entry; + int send_it = 0; + extern int aac_sync_mode; + + if (!aac_sync_mode && !dev->msi_enabled) { src_writel(dev, MUnit.ODR_C, bellbits); src_readl(dev, MUnit.ODR_C); + } - if (!aac_sync_mode) { - src_writel(dev, MUnit.ODR_C, bellbits); - src_readl(dev, MUnit.ODR_C); - our_interrupt = 1; + if (dev->sync_fib) { + if (dev->sync_fib->callback) + dev->sync_fib->callback(dev->sync_fib->callback_data, + dev->sync_fib); + spin_lock_irqsave(&dev->sync_fib->event_lock, sflags); + if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) { + dev->management_fib_count--; + up(&dev->sync_fib->event_wait); } - - if (dev->sync_fib) { - our_interrupt = 1; - if (dev->sync_fib->callback) - dev->sync_fib->callback(dev->sync_fib->callback_data, - dev->sync_fib); - spin_lock_irqsave(&dev->sync_fib->event_lock, sflags); - if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) { - dev->management_fib_count--; - up(&dev->sync_fib->event_wait); - } - spin_unlock_irqrestore(&dev->sync_fib->event_lock, sflags); - spin_lock_irqsave(&dev->sync_lock, sflags); - if (!list_empty(&dev->sync_fib_list)) { - entry = dev->sync_fib_list.next; - dev->sync_fib = list_entry(entry, struct fib, fiblink); - list_del(entry); - send_it = 1; - } else { - dev->sync_fib = NULL; - } - spin_unlock_irqrestore(&dev->sync_lock, sflags); - if (send_it) { - aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB, - (u32)dev->sync_fib->hw_fib_pa, 0, 0, 0, 0, 0, - NULL, NULL, NULL, NULL, NULL); - } + spin_unlock_irqrestore(&dev->sync_fib->event_lock, + sflags); + spin_lock_irqsave(&dev->sync_lock, sflags); + if (!list_empty(&dev->sync_fib_list)) { + entry = dev->sync_fib_list.next; + dev->sync_fib = list_entry(entry, + struct fib, + fiblink); + list_del(entry); + send_it = 1; + } else { + dev->sync_fib = NULL; + } + spin_unlock_irqrestore(&dev->sync_lock, sflags); + if (send_it) { + aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB, + (u32)dev->sync_fib->hw_fib_pa, + 0, 0, 0, 0, 0, + NULL, NULL, NULL, NULL, NULL); } } + if (!dev->msi_enabled) + mode = 0; + + } + + if (mode & AAC_INT_MODE_AIF) { + /* handle AIF */ + aac_intr_normal(dev, 0, 2, 0, NULL); + if (dev->msi_enabled) + aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT); + mode = 0; } - if (our_interrupt) { - return IRQ_HANDLED; + if (mode) { + index = dev->host_rrq_idx[vector_no]; + + for (;;) { + isFastResponse = 0; + /* remove toggle bit (31) */ + handle = (dev->host_rrq[index] & 0x7fffffff); + /* check fast response bit (30) */ + if (handle & 0x40000000) + isFastResponse = 1; + handle &= 0x0000ffff; + if (handle == 0) + break; + if (dev->msi_enabled && dev->max_msix > 1) + atomic_dec(&dev->rrq_outstanding[vector_no]); + aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL); + dev->host_rrq[index++] = 0; + if (index == (vector_no + 1) * dev->vector_cap) + index = vector_no * dev->vector_cap; + dev->host_rrq_idx[vector_no] = index; + } + mode = 0; } - return IRQ_NONE; + + return IRQ_HANDLED; } /** @@ -155,7 +185,7 @@ static void aac_src_disable_interrupt(struct aac_dev *dev) static void aac_src_enable_interrupt_message(struct aac_dev *dev) { - src_writel(dev, MUnit.OIMR, dev->OIMR = 0xfffffff8); + aac_src_access_devreg(dev, AAC_ENABLE_INTERRUPT); } /** @@ -174,6 +204,7 @@ static int src_sync_cmd(struct aac_dev *dev, u32 command, u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4) { unsigned long start; + unsigned long delay; int ok; /* @@ -191,7 +222,10 @@ static int src_sync_cmd(struct aac_dev *dev, u32 command, /* * Clear the synch command doorbell to start on a clean slate. */ - src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); + if (!dev->msi_enabled) + src_writel(dev, + MUnit.ODR_C, + OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); /* * Disable doorbell interrupts @@ -213,19 +247,29 @@ static int src_sync_cmd(struct aac_dev *dev, u32 command, ok = 0; start = jiffies; - /* - * Wait up to 5 minutes - */ - while (time_before(jiffies, start+300*HZ)) { + if (command == IOP_RESET_ALWAYS) { + /* Wait up to 10 sec */ + delay = 10*HZ; + } else { + /* Wait up to 5 minutes */ + delay = 300*HZ; + } + while (time_before(jiffies, start+delay)) { udelay(5); /* Delay 5 microseconds to let Mon960 get info. */ /* * Mon960 will set doorbell0 bit when it has completed the command. */ - if ((src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT) & OUTBOUNDDOORBELL_0) { + if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) { /* * Clear the doorbell. */ - src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); + if (dev->msi_enabled) + aac_src_access_devreg(dev, + AAC_CLEAR_SYNC_BIT); + else + src_writel(dev, + MUnit.ODR_C, + OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); ok = 1; break; } @@ -254,11 +298,16 @@ static int src_sync_cmd(struct aac_dev *dev, u32 command, *r3 = readl(&dev->IndexRegs->Mailbox[3]); if (r4) *r4 = readl(&dev->IndexRegs->Mailbox[4]); - + if (command == GET_COMM_PREFERRED_SETTINGS) + dev->max_msix = + readl(&dev->IndexRegs->Mailbox[5]) & 0xFFFF; /* * Clear the synch command doorbell. */ - src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); + if (!dev->msi_enabled) + src_writel(dev, + MUnit.ODR_C, + OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); } /* @@ -335,9 +384,14 @@ static void aac_src_notify_adapter(struct aac_dev *dev, u32 event) static void aac_src_start_adapter(struct aac_dev *dev) { struct aac_init *init; + int i; /* reset host_rrq_idx first */ - dev->host_rrq_idx = 0; + for (i = 0; i < dev->max_msix; i++) { + dev->host_rrq_idx[i] = i * dev->vector_cap; + atomic_set(&dev->rrq_outstanding[i], 0); + } + dev->fibs_pushed_no = 0; init = dev->init; init->HostElapsedSeconds = cpu_to_le32(get_seconds()); @@ -390,15 +444,39 @@ static int aac_src_deliver_message(struct fib *fib) { struct aac_dev *dev = fib->dev; struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; - unsigned long qflags; u32 fibsize; dma_addr_t address; struct aac_fib_xporthdr *pFibX; u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size); - spin_lock_irqsave(q->lock, qflags); - q->numpending++; - spin_unlock_irqrestore(q->lock, qflags); + atomic_inc(&q->numpending); + + if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest && + dev->max_msix > 1) { + u_int16_t vector_no, first_choice = 0xffff; + + vector_no = dev->fibs_pushed_no % dev->max_msix; + do { + vector_no += 1; + if (vector_no == dev->max_msix) + vector_no = 1; + if (atomic_read(&dev->rrq_outstanding[vector_no]) < + dev->vector_cap) + break; + if (0xffff == first_choice) + first_choice = vector_no; + else if (vector_no == first_choice) + break; + } while (1); + if (vector_no == first_choice) + vector_no = 0; + atomic_inc(&dev->rrq_outstanding[vector_no]); + if (dev->fibs_pushed_no == 0xffffffff) + dev->fibs_pushed_no = 0; + else + dev->fibs_pushed_no++; + fib->hw_fib_va->header.Handle += (vector_no << 16); + } if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) { /* Calculate the amount to the fibsize bits */ @@ -498,15 +576,34 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled) if (bled) printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n", dev->name, dev->id, bled); + dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL); - if (bled || (var != 0x00000001)) - return -EINVAL; - if (dev->supplement_adapter_info.SupportedOptions2 & - AAC_OPTION_DOORBELL_RESET) { - src_writel(dev, MUnit.IDR, reset_mask); + if ((bled || (var != 0x00000001)) && + !dev->doorbell_mask) + return -EINVAL; + else if (dev->doorbell_mask) { + reset_mask = dev->doorbell_mask; + bled = 0; + var = 0x00000001; + } + + if ((dev->pdev->device == PMC_DEVICE_S7 || + dev->pdev->device == PMC_DEVICE_S8 || + dev->pdev->device == PMC_DEVICE_S9) && dev->msi_enabled) { + aac_src_access_devreg(dev, AAC_ENABLE_INTX); + dev->msi_enabled = 0; msleep(5000); /* Delay 5 seconds */ } + + if (!bled && (dev->supplement_adapter_info.SupportedOptions2 & + AAC_OPTION_DOORBELL_RESET)) { + src_writel(dev, MUnit.IDR, reset_mask); + ssleep(45); + } else { + src_writel(dev, MUnit.IDR, 0x100); + ssleep(45); + } } if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC) @@ -527,7 +624,6 @@ int aac_src_select_comm(struct aac_dev *dev, int comm) { switch (comm) { case AAC_COMM_MESSAGE: - dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; dev->a_ops.adapter_intr = aac_src_intr_message; dev->a_ops.adapter_deliver = aac_src_deliver_message; break; @@ -625,6 +721,7 @@ int aac_src_init(struct aac_dev *dev) */ dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter; dev->a_ops.adapter_disable_int = aac_src_disable_interrupt; + dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; dev->a_ops.adapter_notify = aac_src_notify_adapter; dev->a_ops.adapter_sync_cmd = src_sync_cmd; dev->a_ops.adapter_check_health = aac_src_check_health; @@ -646,8 +743,11 @@ int aac_src_init(struct aac_dev *dev) dev->msi = aac_msi && !pci_enable_msi(dev->pdev); + dev->aac_msix[0].vector_no = 0; + dev->aac_msix[0].dev = dev; + if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, - IRQF_SHARED, "aacraid", dev) < 0) { + IRQF_SHARED, "aacraid", &(dev->aac_msix[0])) < 0) { if (dev->msi) pci_disable_msi(dev->pdev); @@ -659,6 +759,7 @@ int aac_src_init(struct aac_dev *dev) dev->dbg_base = pci_resource_start(dev->pdev, 2); dev->dbg_base_mapped = dev->regs.src.bar1; dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE; + dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; aac_adapter_enable_int(dev); @@ -688,7 +789,9 @@ int aac_srcv_init(struct aac_dev *dev) unsigned long status; int restart = 0; int instance = dev->id; + int i, j; const char *name = dev->name; + int cpu; dev->a_ops.adapter_ioremap = aac_srcv_ioremap; dev->a_ops.adapter_comm = aac_src_select_comm; @@ -784,6 +887,7 @@ int aac_srcv_init(struct aac_dev *dev) */ dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter; dev->a_ops.adapter_disable_int = aac_src_disable_interrupt; + dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; dev->a_ops.adapter_notify = aac_src_notify_adapter; dev->a_ops.adapter_sync_cmd = src_sync_cmd; dev->a_ops.adapter_check_health = aac_src_check_health; @@ -802,18 +906,54 @@ int aac_srcv_init(struct aac_dev *dev) goto error_iounmap; if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE2) goto error_iounmap; - dev->msi = aac_msi && !pci_enable_msi(dev->pdev); - if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, - IRQF_SHARED, "aacraid", dev) < 0) { - if (dev->msi) - pci_disable_msi(dev->pdev); - printk(KERN_ERR "%s%d: Interrupt unavailable.\n", - name, instance); - goto error_iounmap; + if (dev->msi_enabled) + aac_src_access_devreg(dev, AAC_ENABLE_MSIX); + if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) { + cpu = cpumask_first(cpu_online_mask); + for (i = 0; i < dev->max_msix; i++) { + dev->aac_msix[i].vector_no = i; + dev->aac_msix[i].dev = dev; + + if (request_irq(dev->msixentry[i].vector, + dev->a_ops.adapter_intr, + 0, + "aacraid", + &(dev->aac_msix[i]))) { + printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n", + name, instance, i); + for (j = 0 ; j < i ; j++) + free_irq(dev->msixentry[j].vector, + &(dev->aac_msix[j])); + pci_disable_msix(dev->pdev); + goto error_iounmap; + } + if (irq_set_affinity_hint( + dev->msixentry[i].vector, + get_cpu_mask(cpu))) { + printk(KERN_ERR "%s%d: Failed to set IRQ affinity for cpu %d\n", + name, instance, cpu); + } + cpu = cpumask_next(cpu, cpu_online_mask); + } + } else { + dev->aac_msix[0].vector_no = 0; + dev->aac_msix[0].dev = dev; + + if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, + IRQF_SHARED, + "aacraid", + &(dev->aac_msix[0])) < 0) { + if (dev->msi) + pci_disable_msi(dev->pdev); + printk(KERN_ERR "%s%d: Interrupt unavailable.\n", + name, instance); + goto error_iounmap; + } } dev->dbg_base = dev->base_start; dev->dbg_base_mapped = dev->base; dev->dbg_size = dev->base_size; + dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; aac_adapter_enable_int(dev); @@ -831,3 +971,93 @@ error_iounmap: return -1; } +void aac_src_access_devreg(struct aac_dev *dev, int mode) +{ + u_int32_t val; + + switch (mode) { + case AAC_ENABLE_INTERRUPT: + src_writel(dev, + MUnit.OIMR, + dev->OIMR = (dev->msi_enabled ? + AAC_INT_ENABLE_TYPE1_MSIX : + AAC_INT_ENABLE_TYPE1_INTX)); + break; + + case AAC_DISABLE_INTERRUPT: + src_writel(dev, + MUnit.OIMR, + dev->OIMR = AAC_INT_DISABLE_ALL); + break; + + case AAC_ENABLE_MSIX: + /* set bit 6 */ + val = src_readl(dev, MUnit.IDR); + val |= 0x40; + src_writel(dev, MUnit.IDR, val); + src_readl(dev, MUnit.IDR); + /* unmask int. */ + val = PMC_ALL_INTERRUPT_BITS; + src_writel(dev, MUnit.IOAR, val); + val = src_readl(dev, MUnit.OIMR); + src_writel(dev, + MUnit.OIMR, + val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0))); + break; + + case AAC_DISABLE_MSIX: + /* reset bit 6 */ + val = src_readl(dev, MUnit.IDR); + val &= ~0x40; + src_writel(dev, MUnit.IDR, val); + src_readl(dev, MUnit.IDR); + break; + + case AAC_CLEAR_AIF_BIT: + /* set bit 5 */ + val = src_readl(dev, MUnit.IDR); + val |= 0x20; + src_writel(dev, MUnit.IDR, val); + src_readl(dev, MUnit.IDR); + break; + + case AAC_CLEAR_SYNC_BIT: + /* set bit 4 */ + val = src_readl(dev, MUnit.IDR); + val |= 0x10; + src_writel(dev, MUnit.IDR, val); + src_readl(dev, MUnit.IDR); + break; + + case AAC_ENABLE_INTX: + /* set bit 7 */ + val = src_readl(dev, MUnit.IDR); + val |= 0x80; + src_writel(dev, MUnit.IDR, val); + src_readl(dev, MUnit.IDR); + /* unmask int. */ + val = PMC_ALL_INTERRUPT_BITS; + src_writel(dev, MUnit.IOAR, val); + src_readl(dev, MUnit.IOAR); + val = src_readl(dev, MUnit.OIMR); + src_writel(dev, MUnit.OIMR, + val & (~(PMC_GLOBAL_INT_BIT2))); + break; + + default: + break; + } +} + +static int aac_src_get_sync_status(struct aac_dev *dev) +{ + + int val; + + if (dev->msi_enabled) + val = src_readl(dev, MUnit.ODR_MSI) & 0x1000 ? 1 : 0; + else + val = src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT; + + return val; +} diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c index 770c48ddbe5e..ec432763a29a 100644 --- a/drivers/scsi/aha1542.c +++ b/drivers/scsi/aha1542.c @@ -1,28 +1,9 @@ -/* $Id: aha1542.c,v 1.1 1992/07/24 06:27:38 root Exp root $ - * linux/kernel/aha1542.c +/* + * Driver for Adaptec AHA-1542 SCSI host adapters * * Copyright (C) 1992 Tommy Thorn * Copyright (C) 1993, 1994, 1995 Eric Youngdale - * - * Modified by Eric Youngdale - * Use request_irq and request_dma to help prevent unexpected conflicts - * Set up on-board DMA controller, such that we do not have to - * have the bios enabled to use the aha1542. - * Modified by David Gentzel - * Don't call request_dma if dma mask is 0 (for BusLogic BT-445S VL-Bus - * controller). - * Modified by Matti Aarnio - * Accept parameters from LILO cmd-line. -- 1-Oct-94 - * Modified by Mike McLagan <mike.mclagan@linux.org> - * Recognise extended mode on AHA1542CP, different bit than 1542CF - * 1-Jan-97 - * Modified by Bjorn L. Thordarson and Einar Thor Einarsson - * Recognize that DMA0 is valid DMA channel -- 13-Jul-98 - * Modified by Chris Faulhaber <jedgar@fxp.org> - * Added module command-line options - * 19-Jul-99 - * Modified by Adam Fritzler - * Added proper detection of the AHA-1640 (MCA, now deleted) + * Copyright (C) 2015 Ondrej Zary */ #include <linux/module.h> @@ -30,96 +11,44 @@ #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> -#include <linux/ioport.h> #include <linux/delay.h> -#include <linux/proc_fs.h> #include <linux/init.h> #include <linux/spinlock.h> -#include <linux/isapnp.h> -#include <linux/blkdev.h> +#include <linux/isa.h> +#include <linux/pnp.h> #include <linux/slab.h> - +#include <linux/io.h> #include <asm/dma.h> -#include <asm/io.h> - -#include "scsi.h" +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include "aha1542.h" -#define SCSI_BUF_PA(address) isa_virt_to_bus(address) -#define SCSI_SG_PA(sgent) (isa_page_to_bus(sg_page((sgent))) + (sgent)->offset) - -#include <linux/stat.h> - -#ifdef DEBUG -#define DEB(x) x -#else -#define DEB(x) -#endif - -/* - static const char RCSid[] = "$Header: /usr/src/linux/kernel/blk_drv/scsi/RCS/aha1542.c,v 1.1 1992/07/24 06:27:38 root Exp root $"; - */ - -/* The adaptec can be configured for quite a number of addresses, but - I generally do not want the card poking around at random. We allow - two addresses - this allows people to use the Adaptec with a Midi - card, which also used 0x330 -- can be overridden with LILO! */ - -#define MAXBOARDS 4 /* Increase this and the sizes of the - arrays below, if you need more.. */ - -/* Boards 3,4 slots are reserved for ISAPnP scans */ - -static unsigned int bases[MAXBOARDS] __initdata = {0x330, 0x334, 0, 0}; - -/* set by aha1542_setup according to the command line; they also may - be marked __initdata, but require zero initializers then */ - -static int setup_called[MAXBOARDS]; -static int setup_buson[MAXBOARDS]; -static int setup_busoff[MAXBOARDS]; -static int setup_dmaspeed[MAXBOARDS] __initdata = { -1, -1, -1, -1 }; +#define MAXBOARDS 4 -/* - * LILO/Module params: aha1542=<PORTBASE>[,<BUSON>,<BUSOFF>[,<DMASPEED>]] - * - * Where: <PORTBASE> is any of the valid AHA addresses: - * 0x130, 0x134, 0x230, 0x234, 0x330, 0x334 - * <BUSON> is the time (in microsecs) that AHA spends on the AT-bus - * when transferring data. 1542A power-on default is 11us, - * valid values are in range: 2..15 (decimal) - * <BUSOFF> is the time that AHA spends OFF THE BUS after while - * it is transferring data (not to monopolize the bus). - * Power-on default is 4us, valid range: 1..64 microseconds. - * <DMASPEED> Default is jumper selected (1542A: on the J1), - * but experimenter can alter it with this. - * Valid values: 5, 6, 7, 8, 10 (MB/s) - * Factory default is 5 MB/s. - */ - -#if defined(MODULE) -static bool isapnp = 0; -static int aha1542[] = {0x330, 11, 4, -1}; -module_param_array(aha1542, int, NULL, 0); +static bool isapnp = 1; module_param(isapnp, bool, 0); +MODULE_PARM_DESC(isapnp, "enable PnP support (default=1)"); -static struct isapnp_device_id id_table[] __initdata = { - { - ISAPNP_ANY_ID, ISAPNP_ANY_ID, - ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1542), - 0 - }, - {0} -}; +static int io[MAXBOARDS] = { 0x330, 0x334, 0, 0 }; +module_param_array(io, int, NULL, 0); +MODULE_PARM_DESC(io, "base IO address of controller (0x130,0x134,0x230,0x234,0x330,0x334, default=0x330,0x334)"); -MODULE_DEVICE_TABLE(isapnp, id_table); +/* time AHA spends on the AT-bus during data transfer */ +static int bus_on[MAXBOARDS] = { -1, -1, -1, -1 }; /* power-on default: 11us */ +module_param_array(bus_on, int, NULL, 0); +MODULE_PARM_DESC(bus_on, "bus on time [us] (2-15, default=-1 [HW default: 11])"); -#else -static int isapnp = 1; -#endif +/* time AHA spends off the bus (not to monopolize it) during data transfer */ +static int bus_off[MAXBOARDS] = { -1, -1, -1, -1 }; /* power-on default: 4us */ +module_param_array(bus_off, int, NULL, 0); +MODULE_PARM_DESC(bus_off, "bus off time [us] (1-64, default=-1 [HW default: 4])"); + +/* default is jumper selected (J1 on 1542A), factory default = 5 MB/s */ +static int dma_speed[MAXBOARDS] = { -1, -1, -1, -1 }; +module_param_array(dma_speed, int, NULL, 0); +MODULE_PARM_DESC(dma_speed, "DMA speed [MB/s] (5,6,7,8,10, default=-1 [by jumper])"); -#define BIOS_TRANSLATION_1632 0 /* Used by some old 1542A boards */ #define BIOS_TRANSLATION_6432 1 /* Default case these days */ #define BIOS_TRANSLATION_25563 2 /* Big disk case */ @@ -128,134 +57,71 @@ struct aha1542_hostdata { int bios_translation; /* Mapping bios uses - for compatibility */ int aha1542_last_mbi_used; int aha1542_last_mbo_used; - Scsi_Cmnd *SCint[AHA1542_MAILBOXES]; + struct scsi_cmnd *int_cmds[AHA1542_MAILBOXES]; struct mailbox mb[2 * AHA1542_MAILBOXES]; struct ccb ccb[AHA1542_MAILBOXES]; }; -#define HOSTDATA(host) ((struct aha1542_hostdata *) &host->hostdata) - -static DEFINE_SPINLOCK(aha1542_lock); - - - -#define WAITnexttimeout 3000000 - -static void setup_mailboxes(int base_io, struct Scsi_Host *shpnt); -static int aha1542_restart(struct Scsi_Host *shost); -static void aha1542_intr_handle(struct Scsi_Host *shost); +static inline void aha1542_intr_reset(u16 base) +{ + outb(IRST, CONTROL(base)); +} -#define aha1542_intr_reset(base) outb(IRST, CONTROL(base)) +static inline bool wait_mask(u16 port, u8 mask, u8 allof, u8 noneof, int timeout) +{ + bool delayed = true; -#define WAIT(port, mask, allof, noneof) \ - { register int WAITbits; \ - register int WAITtimeout = WAITnexttimeout; \ - while (1) { \ - WAITbits = inb(port) & (mask); \ - if ((WAITbits & (allof)) == (allof) && ((WAITbits & (noneof)) == 0)) \ - break; \ - if (--WAITtimeout == 0) goto fail; \ - } \ - } + if (timeout == 0) { + timeout = 3000000; + delayed = false; + } -/* Similar to WAIT, except we use the udelay call to regulate the - amount of time we wait. */ -#define WAITd(port, mask, allof, noneof, timeout) \ - { register int WAITbits; \ - register int WAITtimeout = timeout; \ - while (1) { \ - WAITbits = inb(port) & (mask); \ - if ((WAITbits & (allof)) == (allof) && ((WAITbits & (noneof)) == 0)) \ - break; \ - mdelay(1); \ - if (--WAITtimeout == 0) goto fail; \ - } \ - } + while (1) { + u8 bits = inb(port) & mask; + if ((bits & allof) == allof && ((bits & noneof) == 0)) + break; + if (delayed) + mdelay(1); + if (--timeout == 0) + return false; + } -static void aha1542_stat(void) -{ -/* int s = inb(STATUS), i = inb(INTRFLAGS); - printk("status=%x intrflags=%x\n", s, i, WAITnexttimeout-WAITtimeout); */ + return true; } -/* This is a bit complicated, but we need to make sure that an interrupt - routine does not send something out while we are in the middle of this. - Fortunately, it is only at boot time that multi-byte messages - are ever sent. */ -static int aha1542_out(unsigned int base, unchar * cmdp, int len) +static int aha1542_outb(unsigned int base, u8 val) { - unsigned long flags = 0; - int got_lock; - - if (len == 1) { - got_lock = 0; - while (1 == 1) { - WAIT(STATUS(base), CDF, 0, CDF); - spin_lock_irqsave(&aha1542_lock, flags); - if (inb(STATUS(base)) & CDF) { - spin_unlock_irqrestore(&aha1542_lock, flags); - continue; - } - outb(*cmdp, DATA(base)); - spin_unlock_irqrestore(&aha1542_lock, flags); - return 0; - } - } else { - spin_lock_irqsave(&aha1542_lock, flags); - got_lock = 1; - while (len--) { - WAIT(STATUS(base), CDF, 0, CDF); - outb(*cmdp++, DATA(base)); - } - spin_unlock_irqrestore(&aha1542_lock, flags); - } + if (!wait_mask(STATUS(base), CDF, 0, CDF, 0)) + return 1; + outb(val, DATA(base)); + return 0; -fail: - if (got_lock) - spin_unlock_irqrestore(&aha1542_lock, flags); - printk(KERN_ERR "aha1542_out failed(%d): ", len + 1); - aha1542_stat(); - return 1; } -/* Only used at boot time, so we do not need to worry about latency as much - here */ - -static int __init aha1542_in(unsigned int base, unchar * cmdp, int len) +static int aha1542_out(unsigned int base, u8 *buf, int len) { - unsigned long flags; - - spin_lock_irqsave(&aha1542_lock, flags); while (len--) { - WAIT(STATUS(base), DF, DF, 0); - *cmdp++ = inb(DATA(base)); + if (!wait_mask(STATUS(base), CDF, 0, CDF, 0)) + return 1; + outb(*buf++, DATA(base)); } - spin_unlock_irqrestore(&aha1542_lock, flags); + if (!wait_mask(INTRFLAGS(base), INTRMASK, HACC, 0, 0)) + return 1; + return 0; -fail: - spin_unlock_irqrestore(&aha1542_lock, flags); - printk(KERN_ERR "aha1542_in failed(%d): ", len + 1); - aha1542_stat(); - return 1; } -/* Similar to aha1542_in, except that we wait a very short period of time. - We use this if we know the board is alive and awake, but we are not sure - if the board will respond to the command we are about to send or not */ -static int __init aha1542_in1(unsigned int base, unchar * cmdp, int len) -{ - unsigned long flags; +/* Only used at boot time, so we do not need to worry about latency as much + here */ - spin_lock_irqsave(&aha1542_lock, flags); +static int aha1542_in(unsigned int base, u8 *buf, int len, int timeout) +{ while (len--) { - WAITd(STATUS(base), DF, DF, 0, 100); - *cmdp++ = inb(DATA(base)); + if (!wait_mask(STATUS(base), DF, DF, 0, timeout)) + return 1; + *buf++ = inb(DATA(base)); } - spin_unlock_irqrestore(&aha1542_lock, flags); return 0; -fail: - spin_unlock_irqrestore(&aha1542_lock, flags); - return 1; } static int makecode(unsigned hosterr, unsigned scsierr) @@ -297,7 +163,9 @@ static int makecode(unsigned hosterr, unsigned scsierr) case 0x1a: /* Invalid CCB or Segment List Parameter-A segment list with a zero length segment or invalid segment list boundaries was received. A CCB parameter was invalid. */ - DEB(printk("Aha1542: %x %x\n", hosterr, scsierr)); +#ifdef DEBUG + printk("Aha1542: %x %x\n", hosterr, scsierr); +#endif hosterr = DID_ERROR; /* Couldn't find any better */ break; @@ -314,106 +182,74 @@ static int makecode(unsigned hosterr, unsigned scsierr) return scsierr | (hosterr << 16); } -static int __init aha1542_test_port(int bse, struct Scsi_Host *shpnt) +static int aha1542_test_port(struct Scsi_Host *sh) { - unchar inquiry_cmd[] = {CMD_INQUIRY}; - unchar inquiry_result[4]; - unchar *cmdp; - int len; - volatile int debug = 0; + u8 inquiry_result[4]; + int i; /* Quick and dirty test for presence of the card. */ - if (inb(STATUS(bse)) == 0xff) + if (inb(STATUS(sh->io_port)) == 0xff) return 0; /* Reset the adapter. I ought to make a hard reset, but it's not really necessary */ - /* DEB(printk("aha1542_test_port called \n")); */ - /* In case some other card was probing here, reset interrupts */ - aha1542_intr_reset(bse); /* reset interrupts, so they don't block */ + aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */ - outb(SRST | IRST /*|SCRST */ , CONTROL(bse)); + outb(SRST | IRST /*|SCRST */ , CONTROL(sh->io_port)); mdelay(20); /* Wait a little bit for things to settle down. */ - debug = 1; /* Expect INIT and IDLE, any of the others are bad */ - WAIT(STATUS(bse), STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF); + if (!wait_mask(STATUS(sh->io_port), STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF, 0)) + return 0; - debug = 2; /* Shouldn't have generated any interrupts during reset */ - if (inb(INTRFLAGS(bse)) & INTRMASK) - goto fail; - + if (inb(INTRFLAGS(sh->io_port)) & INTRMASK) + return 0; /* Perform a host adapter inquiry instead so we do not need to set up the mailboxes ahead of time */ - aha1542_out(bse, inquiry_cmd, 1); - - debug = 3; - len = 4; - cmdp = &inquiry_result[0]; + aha1542_outb(sh->io_port, CMD_INQUIRY); - while (len--) { - WAIT(STATUS(bse), DF, DF, 0); - *cmdp++ = inb(DATA(bse)); + for (i = 0; i < 4; i++) { + if (!wait_mask(STATUS(sh->io_port), DF, DF, 0, 0)) + return 0; + inquiry_result[i] = inb(DATA(sh->io_port)); } - debug = 8; /* Reading port should reset DF */ - if (inb(STATUS(bse)) & DF) - goto fail; + if (inb(STATUS(sh->io_port)) & DF) + return 0; - debug = 9; /* When HACC, command is completed, and we're though testing */ - WAIT(INTRFLAGS(bse), HACC, HACC, 0); - /* now initialize adapter */ + if (!wait_mask(INTRFLAGS(sh->io_port), HACC, HACC, 0, 0)) + return 0; - debug = 10; /* Clear interrupts */ - outb(IRST, CONTROL(bse)); - - debug = 11; - - return debug; /* 1 = ok */ -fail: - return 0; /* 0 = not ok */ -} + outb(IRST, CONTROL(sh->io_port)); -/* A quick wrapper for do_aha1542_intr_handle to grab the spin lock */ -static irqreturn_t do_aha1542_intr_handle(int dummy, void *dev_id) -{ - unsigned long flags; - struct Scsi_Host *shost = dev_id; - - spin_lock_irqsave(shost->host_lock, flags); - aha1542_intr_handle(shost); - spin_unlock_irqrestore(shost->host_lock, flags); - return IRQ_HANDLED; + return 1; } -/* A "high" level interrupt handler */ -static void aha1542_intr_handle(struct Scsi_Host *shost) +static irqreturn_t aha1542_interrupt(int irq, void *dev_id) { - void (*my_done) (Scsi_Cmnd *) = NULL; + struct Scsi_Host *sh = dev_id; + struct aha1542_hostdata *aha1542 = shost_priv(sh); + void (*my_done)(struct scsi_cmnd *) = NULL; int errstatus, mbi, mbo, mbistatus; int number_serviced; unsigned long flags; - Scsi_Cmnd *SCtmp; + struct scsi_cmnd *tmp_cmd; int flag; - int needs_restart; - struct mailbox *mb; - struct ccb *ccb; - - mb = HOSTDATA(shost)->mb; - ccb = HOSTDATA(shost)->ccb; + struct mailbox *mb = aha1542->mb; + struct ccb *ccb = aha1542->ccb; #ifdef DEBUG { - flag = inb(INTRFLAGS(shost->io_port)); - printk(KERN_DEBUG "aha1542_intr_handle: "); + flag = inb(INTRFLAGS(sh->io_port)); + shost_printk(KERN_DEBUG, sh, "aha1542_intr_handle: "); if (!(flag & ANYINTR)) printk("no interrupt?"); if (flag & MBIF) @@ -424,14 +260,14 @@ static void aha1542_intr_handle(struct Scsi_Host *shost) printk("HACC "); if (flag & SCRD) printk("SCRD "); - printk("status %02x\n", inb(STATUS(shost->io_port))); + printk("status %02x\n", inb(STATUS(sh->io_port))); }; #endif number_serviced = 0; - needs_restart = 0; - while (1 == 1) { - flag = inb(INTRFLAGS(shost->io_port)); + spin_lock_irqsave(sh->host_lock, flags); + while (1) { + flag = inb(INTRFLAGS(sh->io_port)); /* Check for unusual interrupts. If any of these happen, we should probably do something special, but for now just printing a message @@ -442,15 +278,12 @@ static void aha1542_intr_handle(struct Scsi_Host *shost) printk("MBOF "); if (flag & HACC) printk("HACC "); - if (flag & SCRD) { - needs_restart = 1; + if (flag & SCRD) printk("SCRD "); - } } - aha1542_intr_reset(shost->io_port); + aha1542_intr_reset(sh->io_port); - spin_lock_irqsave(&aha1542_lock, flags); - mbi = HOSTDATA(shost)->aha1542_last_mbi_used + 1; + mbi = aha1542->aha1542_last_mbi_used + 1; if (mbi >= 2 * AHA1542_MAILBOXES) mbi = AHA1542_MAILBOXES; @@ -460,57 +293,51 @@ static void aha1542_intr_handle(struct Scsi_Host *shost) mbi++; if (mbi >= 2 * AHA1542_MAILBOXES) mbi = AHA1542_MAILBOXES; - } while (mbi != HOSTDATA(shost)->aha1542_last_mbi_used); + } while (mbi != aha1542->aha1542_last_mbi_used); if (mb[mbi].status == 0) { - spin_unlock_irqrestore(&aha1542_lock, flags); + spin_unlock_irqrestore(sh->host_lock, flags); /* Hmm, no mail. Must have read it the last time around */ - if (!number_serviced && !needs_restart) - printk(KERN_WARNING "aha1542.c: interrupt received, but no mail.\n"); - /* We detected a reset. Restart all pending commands for - devices that use the hard reset option */ - if (needs_restart) - aha1542_restart(shost); - return; + if (!number_serviced) + shost_printk(KERN_WARNING, sh, "interrupt received, but no mail.\n"); + return IRQ_HANDLED; }; - mbo = (scsi2int(mb[mbi].ccbptr) - (SCSI_BUF_PA(&ccb[0]))) / sizeof(struct ccb); + mbo = (scsi2int(mb[mbi].ccbptr) - (isa_virt_to_bus(&ccb[0]))) / sizeof(struct ccb); mbistatus = mb[mbi].status; mb[mbi].status = 0; - HOSTDATA(shost)->aha1542_last_mbi_used = mbi; - spin_unlock_irqrestore(&aha1542_lock, flags); + aha1542->aha1542_last_mbi_used = mbi; #ifdef DEBUG - { - if (ccb[mbo].tarstat | ccb[mbo].hastat) - printk(KERN_DEBUG "aha1542_command: returning %x (status %d)\n", - ccb[mbo].tarstat + ((int) ccb[mbo].hastat << 16), mb[mbi].status); - }; + if (ccb[mbo].tarstat | ccb[mbo].hastat) + shost_printk(KERN_DEBUG, sh, "aha1542_command: returning %x (status %d)\n", + ccb[mbo].tarstat + ((int) ccb[mbo].hastat << 16), mb[mbi].status); #endif if (mbistatus == 3) continue; /* Aborted command not found */ #ifdef DEBUG - printk(KERN_DEBUG "...done %d %d\n", mbo, mbi); + shost_printk(KERN_DEBUG, sh, "...done %d %d\n", mbo, mbi); #endif - SCtmp = HOSTDATA(shost)->SCint[mbo]; + tmp_cmd = aha1542->int_cmds[mbo]; - if (!SCtmp || !SCtmp->scsi_done) { - printk(KERN_WARNING "aha1542_intr_handle: Unexpected interrupt\n"); - printk(KERN_WARNING "tarstat=%x, hastat=%x idlun=%x ccb#=%d \n", ccb[mbo].tarstat, + if (!tmp_cmd || !tmp_cmd->scsi_done) { + spin_unlock_irqrestore(sh->host_lock, flags); + shost_printk(KERN_WARNING, sh, "Unexpected interrupt\n"); + shost_printk(KERN_WARNING, sh, "tarstat=%x, hastat=%x idlun=%x ccb#=%d\n", ccb[mbo].tarstat, ccb[mbo].hastat, ccb[mbo].idlun, mbo); - return; + return IRQ_HANDLED; } - my_done = SCtmp->scsi_done; - kfree(SCtmp->host_scribble); - SCtmp->host_scribble = NULL; + my_done = tmp_cmd->scsi_done; + kfree(tmp_cmd->host_scribble); + tmp_cmd->host_scribble = NULL; /* Fetch the sense data, and tuck it away, in the required slot. The Adaptec automatically fetches it, and there is no guarantee that we will still have it in the cdb when we come back */ if (ccb[mbo].tarstat == 2) - memcpy(SCtmp->sense_buffer, &ccb[mbo].cdb[ccb[mbo].cdblen], + memcpy(tmp_cmd->sense_buffer, &ccb[mbo].cdb[ccb[mbo].cdblen], SCSI_SENSE_BUFFERSIZE); @@ -525,166 +352,122 @@ static void aha1542_intr_handle(struct Scsi_Host *shost) #ifdef DEBUG if (errstatus) - printk(KERN_DEBUG "(aha1542 error:%x %x %x) ", errstatus, + shost_printk(KERN_DEBUG, sh, "(aha1542 error:%x %x %x) ", errstatus, ccb[mbo].hastat, ccb[mbo].tarstat); + if (ccb[mbo].tarstat == 2) + print_hex_dump_bytes("sense: ", DUMP_PREFIX_NONE, &ccb[mbo].cdb[ccb[mbo].cdblen], 12); + if (errstatus) + printk("aha1542_intr_handle: returning %6x\n", errstatus); #endif - - if (ccb[mbo].tarstat == 2) { -#ifdef DEBUG - int i; -#endif - DEB(printk("aha1542_intr_handle: sense:")); -#ifdef DEBUG - for (i = 0; i < 12; i++) - printk("%02x ", ccb[mbo].cdb[ccb[mbo].cdblen + i]); - printk("\n"); -#endif - /* - DEB(printk("aha1542_intr_handle: buf:")); - for (i = 0; i < bufflen; i++) - printk("%02x ", ((unchar *)buff)[i]); - printk("\n"); - */ - } - DEB(if (errstatus) printk("aha1542_intr_handle: returning %6x\n", errstatus)); - SCtmp->result = errstatus; - HOSTDATA(shost)->SCint[mbo] = NULL; /* This effectively frees up the mailbox slot, as - far as queuecommand is concerned */ - my_done(SCtmp); + tmp_cmd->result = errstatus; + aha1542->int_cmds[mbo] = NULL; /* This effectively frees up the mailbox slot, as + far as queuecommand is concerned */ + my_done(tmp_cmd); number_serviced++; }; } -static int aha1542_queuecommand_lck(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) +static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) { - unchar ahacmd = CMD_START_SCSI; - unchar direction; - unchar *cmd = (unchar *) SCpnt->cmnd; - unchar target = SCpnt->device->id; - unchar lun = SCpnt->device->lun; + struct aha1542_hostdata *aha1542 = shost_priv(sh); + u8 direction; + u8 target = cmd->device->id; + u8 lun = cmd->device->lun; unsigned long flags; - int bufflen = scsi_bufflen(SCpnt); + int bufflen = scsi_bufflen(cmd); int mbo; - struct mailbox *mb; - struct ccb *ccb; + struct mailbox *mb = aha1542->mb; + struct ccb *ccb = aha1542->ccb; - DEB(int i); - - mb = HOSTDATA(SCpnt->device->host)->mb; - ccb = HOSTDATA(SCpnt->device->host)->ccb; - - DEB(if (target > 1) { - SCpnt->result = DID_TIME_OUT << 16; - done(SCpnt); return 0; - } - ); - - if (*cmd == REQUEST_SENSE) { + if (*cmd->cmnd == REQUEST_SENSE) { /* Don't do the command - we have the sense data already */ -#if 0 - /* scsi_request_sense() provides a buffer of size 256, - so there is no reason to expect equality */ - if (bufflen != SCSI_SENSE_BUFFERSIZE) - printk(KERN_CRIT "aha1542: Wrong buffer length supplied " - "for request sense (%d)\n", bufflen); -#endif - SCpnt->result = 0; - done(SCpnt); + cmd->result = 0; + cmd->scsi_done(cmd); return 0; } #ifdef DEBUG - if (*cmd == READ_10 || *cmd == WRITE_10) - i = xscsi2int(cmd + 2); - else if (*cmd == READ_6 || *cmd == WRITE_6) - i = scsi2int(cmd + 2); - else - i = -1; - if (done) - printk(KERN_DEBUG "aha1542_queuecommand: dev %d cmd %02x pos %d len %d ", target, *cmd, i, bufflen); - else - printk(KERN_DEBUG "aha1542_command: dev %d cmd %02x pos %d len %d ", target, *cmd, i, bufflen); - aha1542_stat(); - printk(KERN_DEBUG "aha1542_queuecommand: dumping scsi cmd:"); - for (i = 0; i < SCpnt->cmd_len; i++) - printk("%02x ", cmd[i]); - printk("\n"); - if (*cmd == WRITE_10 || *cmd == WRITE_6) - return 0; /* we are still testing, so *don't* write */ + { + int i = -1; + if (*cmd->cmnd == READ_10 || *cmd->cmnd == WRITE_10) + i = xscsi2int(cmd->cmnd + 2); + else if (*cmd->cmnd == READ_6 || *cmd->cmnd == WRITE_6) + i = scsi2int(cmd->cmnd + 2); + shost_printk(KERN_DEBUG, sh, "aha1542_queuecommand: dev %d cmd %02x pos %d len %d", + target, *cmd->cmnd, i, bufflen); + print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len); + } #endif /* Use the outgoing mailboxes in a round-robin fashion, because this is how the host adapter will scan for them */ - spin_lock_irqsave(&aha1542_lock, flags); - mbo = HOSTDATA(SCpnt->device->host)->aha1542_last_mbo_used + 1; + spin_lock_irqsave(sh->host_lock, flags); + mbo = aha1542->aha1542_last_mbo_used + 1; if (mbo >= AHA1542_MAILBOXES) mbo = 0; do { - if (mb[mbo].status == 0 && HOSTDATA(SCpnt->device->host)->SCint[mbo] == NULL) + if (mb[mbo].status == 0 && aha1542->int_cmds[mbo] == NULL) break; mbo++; if (mbo >= AHA1542_MAILBOXES) mbo = 0; - } while (mbo != HOSTDATA(SCpnt->device->host)->aha1542_last_mbo_used); + } while (mbo != aha1542->aha1542_last_mbo_used); - if (mb[mbo].status || HOSTDATA(SCpnt->device->host)->SCint[mbo]) + if (mb[mbo].status || aha1542->int_cmds[mbo]) panic("Unable to find empty mailbox for aha1542.\n"); - HOSTDATA(SCpnt->device->host)->SCint[mbo] = SCpnt; /* This will effectively prevent someone else from - screwing with this cdb. */ + aha1542->int_cmds[mbo] = cmd; /* This will effectively prevent someone else from + screwing with this cdb. */ - HOSTDATA(SCpnt->device->host)->aha1542_last_mbo_used = mbo; - spin_unlock_irqrestore(&aha1542_lock, flags); + aha1542->aha1542_last_mbo_used = mbo; #ifdef DEBUG - printk(KERN_DEBUG "Sending command (%d %x)...", mbo, done); + shost_printk(KERN_DEBUG, sh, "Sending command (%d %p)...", mbo, cmd->scsi_done); #endif - any2scsi(mb[mbo].ccbptr, SCSI_BUF_PA(&ccb[mbo])); /* This gets trashed for some reason */ + any2scsi(mb[mbo].ccbptr, isa_virt_to_bus(&ccb[mbo])); /* This gets trashed for some reason */ memset(&ccb[mbo], 0, sizeof(struct ccb)); - ccb[mbo].cdblen = SCpnt->cmd_len; + ccb[mbo].cdblen = cmd->cmd_len; direction = 0; - if (*cmd == READ_10 || *cmd == READ_6) + if (*cmd->cmnd == READ_10 || *cmd->cmnd == READ_6) direction = 8; - else if (*cmd == WRITE_10 || *cmd == WRITE_6) + else if (*cmd->cmnd == WRITE_10 || *cmd->cmnd == WRITE_6) direction = 16; - memcpy(ccb[mbo].cdb, cmd, ccb[mbo].cdblen); + memcpy(ccb[mbo].cdb, cmd->cmnd, ccb[mbo].cdblen); if (bufflen) { struct scatterlist *sg; struct chain *cptr; -#ifdef DEBUG - unsigned char *ptr; -#endif - int i, sg_count = scsi_sg_count(SCpnt); + int i, sg_count = scsi_sg_count(cmd); + ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */ - SCpnt->host_scribble = kmalloc(sizeof(*cptr)*sg_count, + cmd->host_scribble = kmalloc(sizeof(*cptr)*sg_count, GFP_KERNEL | GFP_DMA); - cptr = (struct chain *) SCpnt->host_scribble; + cptr = (struct chain *) cmd->host_scribble; if (cptr == NULL) { /* free the claimed mailbox slot */ - HOSTDATA(SCpnt->device->host)->SCint[mbo] = NULL; + aha1542->int_cmds[mbo] = NULL; + spin_unlock_irqrestore(sh->host_lock, flags); return SCSI_MLQUEUE_HOST_BUSY; } - scsi_for_each_sg(SCpnt, sg, sg_count, i) { - any2scsi(cptr[i].dataptr, SCSI_SG_PA(sg)); + scsi_for_each_sg(cmd, sg, sg_count, i) { + any2scsi(cptr[i].dataptr, isa_page_to_bus(sg_page(sg)) + + sg->offset); any2scsi(cptr[i].datalen, sg->length); }; any2scsi(ccb[mbo].datalen, sg_count * sizeof(struct chain)); - any2scsi(ccb[mbo].dataptr, SCSI_BUF_PA(cptr)); + any2scsi(ccb[mbo].dataptr, isa_virt_to_bus(cptr)); #ifdef DEBUG - printk("cptr %x: ", cptr); - ptr = (unsigned char *) cptr; - for (i = 0; i < 18; i++) - printk("%02x ", ptr[i]); + shost_printk(KERN_DEBUG, sh, "cptr %p: ", cptr); + print_hex_dump_bytes("cptr: ", DUMP_PREFIX_NONE, cptr, 18); #endif } else { ccb[mbo].op = 0; /* SCSI Initiator Command */ - SCpnt->host_scribble = NULL; + cmd->host_scribble = NULL; any2scsi(ccb[mbo].datalen, 0); any2scsi(ccb[mbo].dataptr, 0); }; @@ -694,139 +477,116 @@ static int aha1542_queuecommand_lck(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd * ccb[mbo].commlinkid = 0; #ifdef DEBUG - { - int i; - printk(KERN_DEBUG "aha1542_command: sending.. "); - for (i = 0; i < sizeof(ccb[mbo]) - 10; i++) - printk("%02x ", ((unchar *) & ccb[mbo])[i]); - }; + print_hex_dump_bytes("sending: ", DUMP_PREFIX_NONE, &ccb[mbo], sizeof(ccb[mbo]) - 10); + printk("aha1542_queuecommand: now waiting for interrupt "); #endif - - if (done) { - DEB(printk("aha1542_queuecommand: now waiting for interrupt "); - aha1542_stat()); - SCpnt->scsi_done = done; - mb[mbo].status = 1; - aha1542_out(SCpnt->device->host->io_port, &ahacmd, 1); /* start scsi command */ - DEB(aha1542_stat()); - } else - printk("aha1542_queuecommand: done can't be NULL\n"); + mb[mbo].status = 1; + aha1542_outb(cmd->device->host->io_port, CMD_START_SCSI); + spin_unlock_irqrestore(sh->host_lock, flags); return 0; } -static DEF_SCSI_QCMD(aha1542_queuecommand) - /* Initialize mailboxes */ -static void setup_mailboxes(int bse, struct Scsi_Host *shpnt) +static void setup_mailboxes(struct Scsi_Host *sh) { + struct aha1542_hostdata *aha1542 = shost_priv(sh); int i; - struct mailbox *mb; - struct ccb *ccb; - - unchar cmd[5] = { CMD_MBINIT, AHA1542_MAILBOXES, 0, 0, 0}; + struct mailbox *mb = aha1542->mb; + struct ccb *ccb = aha1542->ccb; - mb = HOSTDATA(shpnt)->mb; - ccb = HOSTDATA(shpnt)->ccb; + u8 mb_cmd[5] = { CMD_MBINIT, AHA1542_MAILBOXES, 0, 0, 0}; for (i = 0; i < AHA1542_MAILBOXES; i++) { mb[i].status = mb[AHA1542_MAILBOXES + i].status = 0; - any2scsi(mb[i].ccbptr, SCSI_BUF_PA(&ccb[i])); + any2scsi(mb[i].ccbptr, isa_virt_to_bus(&ccb[i])); }; - aha1542_intr_reset(bse); /* reset interrupts, so they don't block */ - any2scsi((cmd + 2), SCSI_BUF_PA(mb)); - aha1542_out(bse, cmd, 5); - WAIT(INTRFLAGS(bse), INTRMASK, HACC, 0); - while (0) { -fail: - printk(KERN_ERR "aha1542_detect: failed setting up mailboxes\n"); - } - aha1542_intr_reset(bse); + aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */ + any2scsi((mb_cmd + 2), isa_virt_to_bus(mb)); + if (aha1542_out(sh->io_port, mb_cmd, 5)) + shost_printk(KERN_ERR, sh, "failed setting up mailboxes\n"); + aha1542_intr_reset(sh->io_port); } -static int __init aha1542_getconfig(int base_io, unsigned char *irq_level, unsigned char *dma_chan, unsigned char *scsi_id) +static int aha1542_getconfig(struct Scsi_Host *sh) { - unchar inquiry_cmd[] = {CMD_RETCONF}; - unchar inquiry_result[3]; + u8 inquiry_result[3]; int i; - i = inb(STATUS(base_io)); + i = inb(STATUS(sh->io_port)); if (i & DF) { - i = inb(DATA(base_io)); + i = inb(DATA(sh->io_port)); }; - aha1542_out(base_io, inquiry_cmd, 1); - aha1542_in(base_io, inquiry_result, 3); - WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0); - while (0) { -fail: - printk(KERN_ERR "aha1542_detect: query board settings\n"); - } - aha1542_intr_reset(base_io); + aha1542_outb(sh->io_port, CMD_RETCONF); + aha1542_in(sh->io_port, inquiry_result, 3, 0); + if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0)) + shost_printk(KERN_ERR, sh, "error querying board settings\n"); + aha1542_intr_reset(sh->io_port); switch (inquiry_result[0]) { case 0x80: - *dma_chan = 7; + sh->dma_channel = 7; break; case 0x40: - *dma_chan = 6; + sh->dma_channel = 6; break; case 0x20: - *dma_chan = 5; + sh->dma_channel = 5; break; case 0x01: - *dma_chan = 0; + sh->dma_channel = 0; break; case 0: /* This means that the adapter, although Adaptec 1542 compatible, doesn't use a DMA channel. Currently only aware of the BusLogic BT-445S VL-Bus adapter which needs this. */ - *dma_chan = 0xFF; + sh->dma_channel = 0xFF; break; default: - printk(KERN_ERR "Unable to determine Adaptec DMA priority. Disabling board\n"); + shost_printk(KERN_ERR, sh, "Unable to determine DMA channel.\n"); return -1; }; switch (inquiry_result[1]) { case 0x40: - *irq_level = 15; + sh->irq = 15; break; case 0x20: - *irq_level = 14; + sh->irq = 14; break; case 0x8: - *irq_level = 12; + sh->irq = 12; break; case 0x4: - *irq_level = 11; + sh->irq = 11; break; case 0x2: - *irq_level = 10; + sh->irq = 10; break; case 0x1: - *irq_level = 9; + sh->irq = 9; break; default: - printk(KERN_ERR "Unable to determine Adaptec IRQ level. Disabling board\n"); + shost_printk(KERN_ERR, sh, "Unable to determine IRQ level.\n"); return -1; }; - *scsi_id = inquiry_result[2] & 7; + sh->this_id = inquiry_result[2] & 7; return 0; } /* This function should only be called for 1542C boards - we can detect the special firmware settings and unlock the board */ -static int __init aha1542_mbenable(int base) +static int aha1542_mbenable(struct Scsi_Host *sh) { - static unchar mbenable_cmd[3]; - static unchar mbenable_result[2]; + static u8 mbenable_cmd[3]; + static u8 mbenable_result[2]; int retval; retval = BIOS_TRANSLATION_6432; - mbenable_cmd[0] = CMD_EXTBIOS; - aha1542_out(base, mbenable_cmd, 1); - if (aha1542_in1(base, mbenable_result, 2)) + aha1542_outb(sh->io_port, CMD_EXTBIOS); + if (aha1542_in(sh->io_port, mbenable_result, 2, 100)) return retval; - WAITd(INTRFLAGS(base), INTRMASK, HACC, 0, 100); - aha1542_intr_reset(base); + if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 100)) + goto fail; + aha1542_intr_reset(sh->io_port); if ((mbenable_result[0] & 0x08) || mbenable_result[1]) { mbenable_cmd[0] = CMD_MBENABLE; @@ -836,37 +596,34 @@ static int __init aha1542_mbenable(int base) if ((mbenable_result[0] & 0x08) && (mbenable_result[1] & 0x03)) retval = BIOS_TRANSLATION_25563; - aha1542_out(base, mbenable_cmd, 3); - WAIT(INTRFLAGS(base), INTRMASK, HACC, 0); + if (aha1542_out(sh->io_port, mbenable_cmd, 3)) + goto fail; }; while (0) { fail: - printk(KERN_ERR "aha1542_mbenable: Mailbox init failed\n"); + shost_printk(KERN_ERR, sh, "Mailbox init failed\n"); } - aha1542_intr_reset(base); + aha1542_intr_reset(sh->io_port); return retval; } /* Query the board to find out if it is a 1542 or a 1740, or whatever. */ -static int __init aha1542_query(int base_io, int *transl) +static int aha1542_query(struct Scsi_Host *sh) { - unchar inquiry_cmd[] = {CMD_INQUIRY}; - unchar inquiry_result[4]; + struct aha1542_hostdata *aha1542 = shost_priv(sh); + u8 inquiry_result[4]; int i; - i = inb(STATUS(base_io)); + i = inb(STATUS(sh->io_port)); if (i & DF) { - i = inb(DATA(base_io)); + i = inb(DATA(sh->io_port)); }; - aha1542_out(base_io, inquiry_cmd, 1); - aha1542_in(base_io, inquiry_result, 4); - WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0); - while (0) { -fail: - printk(KERN_ERR "aha1542_detect: query card type\n"); - } - aha1542_intr_reset(base_io); + aha1542_outb(sh->io_port, CMD_INQUIRY); + aha1542_in(sh->io_port, inquiry_result, 4, 0); + if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0)) + shost_printk(KERN_ERR, sh, "error querying card type\n"); + aha1542_intr_reset(sh->io_port); - *transl = BIOS_TRANSLATION_6432; /* Default case */ + aha1542->bios_translation = BIOS_TRANSLATION_6432; /* Default case */ /* For an AHA1740 series board, we ignore the board since there is a hardware bug which can lead to wrong blocks being returned if the board @@ -875,391 +632,198 @@ fail: */ if (inquiry_result[0] == 0x43) { - printk(KERN_INFO "aha1542.c: Emulation mode not supported for AHA 174N hardware.\n"); + shost_printk(KERN_INFO, sh, "Emulation mode not supported for AHA-1740 hardware, use aha1740 driver instead.\n"); return 1; }; /* Always call this - boards that do not support extended bios translation will ignore the command, and we will set the proper default */ - *transl = aha1542_mbenable(base_io); + aha1542->bios_translation = aha1542_mbenable(sh); return 0; } -#ifndef MODULE -static char *setup_str[MAXBOARDS] __initdata; -static int setup_idx = 0; - -static void __init aha1542_setup(char *str, int *ints) +static u8 dma_speed_hw(int dma_speed) { - const char *ahausage = "aha1542: usage: aha1542=<PORTBASE>[,<BUSON>,<BUSOFF>[,<DMASPEED>]]\n"; - int setup_portbase; - - if (setup_idx >= MAXBOARDS) { - printk(KERN_ERR "aha1542: aha1542_setup called too many times! Bad LILO params ?\n"); - printk(KERN_ERR " Entryline 1: %s\n", setup_str[0]); - printk(KERN_ERR " Entryline 2: %s\n", setup_str[1]); - printk(KERN_ERR " This line: %s\n", str); - return; - } - if (ints[0] < 1 || ints[0] > 4) { - printk(KERN_ERR "aha1542: %s\n", str); - printk(ahausage); - printk(KERN_ERR "aha1542: Wrong parameters may cause system malfunction.. We try anyway..\n"); - } - setup_called[setup_idx] = ints[0]; - setup_str[setup_idx] = str; - - setup_portbase = ints[0] >= 1 ? ints[1] : 0; /* Preserve the default value.. */ - setup_buson[setup_idx] = ints[0] >= 2 ? ints[2] : 7; - setup_busoff[setup_idx] = ints[0] >= 3 ? ints[3] : 5; - if (ints[0] >= 4) - { - int atbt = -1; - switch (ints[4]) { - case 5: - atbt = 0x00; - break; - case 6: - atbt = 0x04; - break; - case 7: - atbt = 0x01; - break; - case 8: - atbt = 0x02; - break; - case 10: - atbt = 0x03; - break; - default: - printk(KERN_ERR "aha1542: %s\n", str); - printk(ahausage); - printk(KERN_ERR "aha1542: Valid values for DMASPEED are 5-8, 10 MB/s. Using jumper defaults.\n"); - break; - } - setup_dmaspeed[setup_idx] = atbt; + switch (dma_speed) { + case 5: + return 0x00; + case 6: + return 0x04; + case 7: + return 0x01; + case 8: + return 0x02; + case 10: + return 0x03; } - if (setup_portbase != 0) - bases[setup_idx] = setup_portbase; - ++setup_idx; + return 0xff; /* invalid */ } -static int __init do_setup(char *str) +/* Set the Bus on/off-times as not to ruin floppy performance */ +static void aha1542_set_bus_times(struct Scsi_Host *sh, int bus_on, int bus_off, int dma_speed) { - int ints[5]; + if (bus_on > 0) { + u8 oncmd[] = { CMD_BUSON_TIME, clamp(bus_on, 2, 15) }; - int count=setup_idx; + aha1542_intr_reset(sh->io_port); + if (aha1542_out(sh->io_port, oncmd, 2)) + goto fail; + } - get_options(str, ARRAY_SIZE(ints), ints); - aha1542_setup(str,ints); + if (bus_off > 0) { + u8 offcmd[] = { CMD_BUSOFF_TIME, clamp(bus_off, 1, 64) }; - return count<setup_idx; -} + aha1542_intr_reset(sh->io_port); + if (aha1542_out(sh->io_port, offcmd, 2)) + goto fail; + } -__setup("aha1542=",do_setup); -#endif + if (dma_speed_hw(dma_speed) != 0xff) { + u8 dmacmd[] = { CMD_DMASPEED, dma_speed_hw(dma_speed) }; + + aha1542_intr_reset(sh->io_port); + if (aha1542_out(sh->io_port, dmacmd, 2)) + goto fail; + } + aha1542_intr_reset(sh->io_port); + return; +fail: + shost_printk(KERN_ERR, sh, "setting bus on/off-time failed\n"); + aha1542_intr_reset(sh->io_port); +} /* return non-zero on detection */ -static int __init aha1542_detect(struct scsi_host_template * tpnt) +static struct Scsi_Host *aha1542_hw_init(struct scsi_host_template *tpnt, struct device *pdev, int indx) { - unsigned char dma_chan; - unsigned char irq_level; - unsigned char scsi_id; - unsigned long flags; - unsigned int base_io; - int trans; - struct Scsi_Host *shpnt = NULL; - int count = 0; - int indx; - - DEB(printk("aha1542_detect: \n")); - - tpnt->proc_name = "aha1542"; - -#ifdef MODULE - bases[0] = aha1542[0]; - setup_buson[0] = aha1542[1]; - setup_busoff[0] = aha1542[2]; - { - int atbt = -1; - switch (aha1542[3]) { - case 5: - atbt = 0x00; - break; - case 6: - atbt = 0x04; - break; - case 7: - atbt = 0x01; - break; - case 8: - atbt = 0x02; - break; - case 10: - atbt = 0x03; - break; - }; - setup_dmaspeed[0] = atbt; + unsigned int base_io = io[indx]; + struct Scsi_Host *sh; + struct aha1542_hostdata *aha1542; + char dma_info[] = "no DMA"; + + if (base_io == 0) + return NULL; + + if (!request_region(base_io, AHA1542_REGION_SIZE, "aha1542")) + return NULL; + + sh = scsi_host_alloc(tpnt, sizeof(struct aha1542_hostdata)); + if (!sh) + goto release; + aha1542 = shost_priv(sh); + + sh->unique_id = base_io; + sh->io_port = base_io; + sh->n_io_port = AHA1542_REGION_SIZE; + aha1542->aha1542_last_mbi_used = 2 * AHA1542_MAILBOXES - 1; + aha1542->aha1542_last_mbo_used = AHA1542_MAILBOXES - 1; + + if (!aha1542_test_port(sh)) + goto unregister; + + aha1542_set_bus_times(sh, bus_on[indx], bus_off[indx], dma_speed[indx]); + if (aha1542_query(sh)) + goto unregister; + if (aha1542_getconfig(sh) == -1) + goto unregister; + + if (sh->dma_channel != 0xFF) + snprintf(dma_info, sizeof(dma_info), "DMA %d", sh->dma_channel); + shost_printk(KERN_INFO, sh, "Adaptec AHA-1542 (SCSI-ID %d) at IO 0x%x, IRQ %d, %s\n", + sh->this_id, base_io, sh->irq, dma_info); + if (aha1542->bios_translation == BIOS_TRANSLATION_25563) + shost_printk(KERN_INFO, sh, "Using extended bios translation\n"); + + setup_mailboxes(sh); + + if (request_irq(sh->irq, aha1542_interrupt, 0, "aha1542", sh)) { + shost_printk(KERN_ERR, sh, "Unable to allocate IRQ.\n"); + goto unregister; } -#endif - - /* - * Hunt for ISA Plug'n'Pray Adaptecs (AHA1535) - */ - - if(isapnp) - { - struct pnp_dev *pdev = NULL; - for(indx = 0; indx < ARRAY_SIZE(bases); indx++) { - if(bases[indx]) - continue; - pdev = pnp_find_dev(NULL, ISAPNP_VENDOR('A', 'D', 'P'), - ISAPNP_FUNCTION(0x1542), pdev); - if(pdev==NULL) - break; - /* - * Activate the PnP card - */ - - if(pnp_device_attach(pdev)<0) - continue; - - if(pnp_activate_dev(pdev)<0) { - pnp_device_detach(pdev); - continue; - } - - if(!pnp_port_valid(pdev, 0)) { - pnp_device_detach(pdev); - continue; - } - - bases[indx] = pnp_port_start(pdev, 0); - - /* The card can be queried for its DMA, we have - the DMA set up that is enough */ - - printk(KERN_INFO "ISAPnP found an AHA1535 at I/O 0x%03X\n", bases[indx]); + if (sh->dma_channel != 0xFF) { + if (request_dma(sh->dma_channel, "aha1542")) { + shost_printk(KERN_ERR, sh, "Unable to allocate DMA channel.\n"); + goto free_irq; + } + if (sh->dma_channel == 0 || sh->dma_channel >= 5) { + set_dma_mode(sh->dma_channel, DMA_MODE_CASCADE); + enable_dma(sh->dma_channel); } } - for (indx = 0; indx < ARRAY_SIZE(bases); indx++) - if (bases[indx] != 0 && request_region(bases[indx], 4, "aha1542")) { - shpnt = scsi_register(tpnt, - sizeof(struct aha1542_hostdata)); - - if(shpnt==NULL) { - release_region(bases[indx], 4); - continue; - } - if (!aha1542_test_port(bases[indx], shpnt)) - goto unregister; - - base_io = bases[indx]; - - /* Set the Bus on/off-times as not to ruin floppy performance */ - { - unchar oncmd[] = {CMD_BUSON_TIME, 7}; - unchar offcmd[] = {CMD_BUSOFF_TIME, 5}; - - if (setup_called[indx]) { - oncmd[1] = setup_buson[indx]; - offcmd[1] = setup_busoff[indx]; - } - aha1542_intr_reset(base_io); - aha1542_out(base_io, oncmd, 2); - WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0); - aha1542_intr_reset(base_io); - aha1542_out(base_io, offcmd, 2); - WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0); - if (setup_dmaspeed[indx] >= 0) { - unchar dmacmd[] = {CMD_DMASPEED, 0}; - dmacmd[1] = setup_dmaspeed[indx]; - aha1542_intr_reset(base_io); - aha1542_out(base_io, dmacmd, 2); - WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0); - } - while (0) { -fail: - printk(KERN_ERR "aha1542_detect: setting bus on/off-time failed\n"); - } - aha1542_intr_reset(base_io); - } - if (aha1542_query(base_io, &trans)) - goto unregister; - - if (aha1542_getconfig(base_io, &irq_level, &dma_chan, &scsi_id) == -1) - goto unregister; - - printk(KERN_INFO "Configuring Adaptec (SCSI-ID %d) at IO:%x, IRQ %d", scsi_id, base_io, irq_level); - if (dma_chan != 0xFF) - printk(", DMA priority %d", dma_chan); - printk("\n"); - - DEB(aha1542_stat()); - setup_mailboxes(base_io, shpnt); - - DEB(aha1542_stat()); - - DEB(printk("aha1542_detect: enable interrupt channel %d\n", irq_level)); - spin_lock_irqsave(&aha1542_lock, flags); - if (request_irq(irq_level, do_aha1542_intr_handle, 0, - "aha1542", shpnt)) { - printk(KERN_ERR "Unable to allocate IRQ for adaptec controller.\n"); - spin_unlock_irqrestore(&aha1542_lock, flags); - goto unregister; - } - if (dma_chan != 0xFF) { - if (request_dma(dma_chan, "aha1542")) { - printk(KERN_ERR "Unable to allocate DMA channel for Adaptec.\n"); - free_irq(irq_level, shpnt); - spin_unlock_irqrestore(&aha1542_lock, flags); - goto unregister; - } - if (dma_chan == 0 || dma_chan >= 5) { - set_dma_mode(dma_chan, DMA_MODE_CASCADE); - enable_dma(dma_chan); - } - } - - shpnt->this_id = scsi_id; - shpnt->unique_id = base_io; - shpnt->io_port = base_io; - shpnt->n_io_port = 4; /* Number of bytes of I/O space used */ - shpnt->dma_channel = dma_chan; - shpnt->irq = irq_level; - HOSTDATA(shpnt)->bios_translation = trans; - if (trans == BIOS_TRANSLATION_25563) - printk(KERN_INFO "aha1542.c: Using extended bios translation\n"); - HOSTDATA(shpnt)->aha1542_last_mbi_used = (2 * AHA1542_MAILBOXES - 1); - HOSTDATA(shpnt)->aha1542_last_mbo_used = (AHA1542_MAILBOXES - 1); - memset(HOSTDATA(shpnt)->SCint, 0, sizeof(HOSTDATA(shpnt)->SCint)); - spin_unlock_irqrestore(&aha1542_lock, flags); -#if 0 - DEB(printk(" *** READ CAPACITY ***\n")); - - { - unchar buf[8]; - static unchar cmd[] = { READ_CAPACITY, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - int i; - - for (i = 0; i < sizeof(buf); ++i) - buf[i] = 0x87; - for (i = 0; i < 2; ++i) - if (!aha1542_command(i, cmd, buf, sizeof(buf))) { - printk(KERN_DEBUG "aha_detect: LU %d sector_size %d device_size %d\n", - i, xscsi2int(buf + 4), xscsi2int(buf)); - } - } - DEB(printk(" *** NOW RUNNING MY OWN TEST *** \n")); + if (scsi_add_host(sh, pdev)) + goto free_dma; - for (i = 0; i < 4; ++i) { - unsigned char cmd[10]; - static buffer[512]; + scsi_scan_host(sh); - cmd[0] = READ_10; - cmd[1] = 0; - xany2scsi(cmd + 2, i); - cmd[6] = 0; - cmd[7] = 0; - cmd[8] = 1; - cmd[9] = 0; - aha1542_command(0, cmd, buffer, 512); - } -#endif - count++; - continue; + return sh; +free_dma: + if (sh->dma_channel != 0xff) + free_dma(sh->dma_channel); +free_irq: + free_irq(sh->irq, sh); unregister: - release_region(bases[indx], 4); - scsi_unregister(shpnt); - continue; + scsi_host_put(sh); +release: + release_region(base_io, AHA1542_REGION_SIZE); - }; - - return count; + return NULL; } -static int aha1542_release(struct Scsi_Host *shost) +static int aha1542_release(struct Scsi_Host *sh) { - if (shost->irq) - free_irq(shost->irq, shost); - if (shost->dma_channel != 0xff) - free_dma(shost->dma_channel); - if (shost->io_port && shost->n_io_port) - release_region(shost->io_port, shost->n_io_port); - scsi_unregister(shost); + scsi_remove_host(sh); + if (sh->dma_channel != 0xff) + free_dma(sh->dma_channel); + if (sh->irq) + free_irq(sh->irq, sh); + if (sh->io_port && sh->n_io_port) + release_region(sh->io_port, sh->n_io_port); + scsi_host_put(sh); return 0; } -static int aha1542_restart(struct Scsi_Host *shost) -{ - int i; - int count = 0; -#if 0 - unchar ahacmd = CMD_START_SCSI; -#endif - - for (i = 0; i < AHA1542_MAILBOXES; i++) - if (HOSTDATA(shost)->SCint[i] && - !(HOSTDATA(shost)->SCint[i]->device->soft_reset)) { -#if 0 - HOSTDATA(shost)->mb[i].status = 1; /* Indicate ready to restart... */ -#endif - count++; - } - printk(KERN_DEBUG "Potential to restart %d stalled commands...\n", count); -#if 0 - /* start scsi command */ - if (count) - aha1542_out(shost->io_port, &ahacmd, 1); -#endif - return 0; -} /* * This is a device reset. This is handled by sending a special command * to the device. */ -static int aha1542_dev_reset(Scsi_Cmnd * SCpnt) +static int aha1542_dev_reset(struct scsi_cmnd *cmd) { + struct Scsi_Host *sh = cmd->device->host; + struct aha1542_hostdata *aha1542 = shost_priv(sh); unsigned long flags; - struct mailbox *mb; - unchar target = SCpnt->device->id; - unchar lun = SCpnt->device->lun; + struct mailbox *mb = aha1542->mb; + u8 target = cmd->device->id; + u8 lun = cmd->device->lun; int mbo; - struct ccb *ccb; - unchar ahacmd = CMD_START_SCSI; - - ccb = HOSTDATA(SCpnt->device->host)->ccb; - mb = HOSTDATA(SCpnt->device->host)->mb; + struct ccb *ccb = aha1542->ccb; - spin_lock_irqsave(&aha1542_lock, flags); - mbo = HOSTDATA(SCpnt->device->host)->aha1542_last_mbo_used + 1; + spin_lock_irqsave(sh->host_lock, flags); + mbo = aha1542->aha1542_last_mbo_used + 1; if (mbo >= AHA1542_MAILBOXES) mbo = 0; do { - if (mb[mbo].status == 0 && HOSTDATA(SCpnt->device->host)->SCint[mbo] == NULL) + if (mb[mbo].status == 0 && aha1542->int_cmds[mbo] == NULL) break; mbo++; if (mbo >= AHA1542_MAILBOXES) mbo = 0; - } while (mbo != HOSTDATA(SCpnt->device->host)->aha1542_last_mbo_used); + } while (mbo != aha1542->aha1542_last_mbo_used); - if (mb[mbo].status || HOSTDATA(SCpnt->device->host)->SCint[mbo]) + if (mb[mbo].status || aha1542->int_cmds[mbo]) panic("Unable to find empty mailbox for aha1542.\n"); - HOSTDATA(SCpnt->device->host)->SCint[mbo] = SCpnt; /* This will effectively - prevent someone else from - screwing with this cdb. */ + aha1542->int_cmds[mbo] = cmd; /* This will effectively + prevent someone else from + screwing with this cdb. */ - HOSTDATA(SCpnt->device->host)->aha1542_last_mbo_used = mbo; - spin_unlock_irqrestore(&aha1542_lock, flags); + aha1542->aha1542_last_mbo_used = mbo; - any2scsi(mb[mbo].ccbptr, SCSI_BUF_PA(&ccb[mbo])); /* This gets trashed for some reason */ + any2scsi(mb[mbo].ccbptr, isa_virt_to_bus(&ccb[mbo])); /* This gets trashed for some reason */ memset(&ccb[mbo], 0, sizeof(struct ccb)); @@ -1274,141 +838,43 @@ static int aha1542_dev_reset(Scsi_Cmnd * SCpnt) * Now tell the 1542 to flush all pending commands for this * target */ - aha1542_out(SCpnt->device->host->io_port, &ahacmd, 1); + aha1542_outb(sh->io_port, CMD_START_SCSI); + spin_unlock_irqrestore(sh->host_lock, flags); - scmd_printk(KERN_WARNING, SCpnt, + scmd_printk(KERN_WARNING, cmd, "Trying device reset for target\n"); return SUCCESS; - - -#ifdef ERIC_neverdef - /* - * With the 1542 we apparently never get an interrupt to - * acknowledge a device reset being sent. Then again, Leonard - * says we are doing this wrong in the first place... - * - * Take a wait and see attitude. If we get spurious interrupts, - * then the device reset is doing something sane and useful, and - * we will wait for the interrupt to post completion. - */ - printk(KERN_WARNING "Sent BUS DEVICE RESET to target %d\n", SCpnt->target); - - /* - * Free the command block for all commands running on this - * target... - */ - for (i = 0; i < AHA1542_MAILBOXES; i++) { - if (HOSTDATA(SCpnt->host)->SCint[i] && - HOSTDATA(SCpnt->host)->SCint[i]->target == SCpnt->target) { - Scsi_Cmnd *SCtmp; - SCtmp = HOSTDATA(SCpnt->host)->SCint[i]; - kfree(SCtmp->host_scribble); - SCtmp->host_scribble = NULL; - HOSTDATA(SCpnt->host)->SCint[i] = NULL; - HOSTDATA(SCpnt->host)->mb[i].status = 0; - } - } - return SUCCESS; - - return FAILED; -#endif /* ERIC_neverdef */ } -static int aha1542_bus_reset(Scsi_Cmnd * SCpnt) +static int aha1542_reset(struct scsi_cmnd *cmd, u8 reset_cmd) { + struct Scsi_Host *sh = cmd->device->host; + struct aha1542_hostdata *aha1542 = shost_priv(sh); + unsigned long flags; int i; + spin_lock_irqsave(sh->host_lock, flags); /* * This does a scsi reset for all devices on the bus. * In principle, we could also reset the 1542 - should * we do this? Try this first, and we can add that later * if it turns out to be useful. */ - outb(SCRST, CONTROL(SCpnt->device->host->io_port)); + outb(reset_cmd, CONTROL(cmd->device->host->io_port)); - /* - * Wait for the thing to settle down a bit. Unfortunately - * this is going to basically lock up the machine while we - * wait for this to complete. To be 100% correct, we need to - * check for timeout, and if we are doing something like this - * we are pretty desperate anyways. - */ - ssleep(4); - - spin_lock_irq(SCpnt->device->host->host_lock); - - WAIT(STATUS(SCpnt->device->host->io_port), - STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF); - - /* - * Now try to pick up the pieces. For all pending commands, - * free any internal data structures, and basically clear things - * out. We do not try and restart any commands or anything - - * the strategy handler takes care of that crap. - */ - printk(KERN_WARNING "Sent BUS RESET to scsi host %d\n", SCpnt->device->host->host_no); - - for (i = 0; i < AHA1542_MAILBOXES; i++) { - if (HOSTDATA(SCpnt->device->host)->SCint[i] != NULL) { - Scsi_Cmnd *SCtmp; - SCtmp = HOSTDATA(SCpnt->device->host)->SCint[i]; - - - if (SCtmp->device->soft_reset) { - /* - * If this device implements the soft reset option, - * then it is still holding onto the command, and - * may yet complete it. In this case, we don't - * flush the data. - */ - continue; - } - kfree(SCtmp->host_scribble); - SCtmp->host_scribble = NULL; - HOSTDATA(SCpnt->device->host)->SCint[i] = NULL; - HOSTDATA(SCpnt->device->host)->mb[i].status = 0; - } + if (!wait_mask(STATUS(cmd->device->host->io_port), + STATMASK, IDLE, STST | DIAGF | INVDCMD | DF | CDF, 0)) { + spin_unlock_irqrestore(sh->host_lock, flags); + return FAILED; } - spin_unlock_irq(SCpnt->device->host->host_lock); - return SUCCESS; - -fail: - spin_unlock_irq(SCpnt->device->host->host_lock); - return FAILED; -} - -static int aha1542_host_reset(Scsi_Cmnd * SCpnt) -{ - int i; - - /* - * This does a scsi reset for all devices on the bus. - * In principle, we could also reset the 1542 - should - * we do this? Try this first, and we can add that later - * if it turns out to be useful. - */ - outb(HRST | SCRST, CONTROL(SCpnt->device->host->io_port)); - - /* - * Wait for the thing to settle down a bit. Unfortunately - * this is going to basically lock up the machine while we - * wait for this to complete. To be 100% correct, we need to - * check for timeout, and if we are doing something like this - * we are pretty desperate anyways. - */ - ssleep(4); - spin_lock_irq(SCpnt->device->host->host_lock); - - WAIT(STATUS(SCpnt->device->host->io_port), - STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF); - /* * We need to do this too before the 1542 can interact with - * us again. + * us again after host reset. */ - setup_mailboxes(SCpnt->device->host->io_port, SCpnt->device->host); + if (reset_cmd & HRST) + setup_mailboxes(cmd->device->host); /* * Now try to pick up the pieces. For all pending commands, @@ -1416,14 +882,14 @@ static int aha1542_host_reset(Scsi_Cmnd * SCpnt) * out. We do not try and restart any commands or anything - * the strategy handler takes care of that crap. */ - printk(KERN_WARNING "Sent BUS RESET to scsi host %d\n", SCpnt->device->host->host_no); + shost_printk(KERN_WARNING, cmd->device->host, "Sent BUS RESET to scsi host %d\n", cmd->device->host->host_no); for (i = 0; i < AHA1542_MAILBOXES; i++) { - if (HOSTDATA(SCpnt->device->host)->SCint[i] != NULL) { - Scsi_Cmnd *SCtmp; - SCtmp = HOSTDATA(SCpnt->device->host)->SCint[i]; + if (aha1542->int_cmds[i] != NULL) { + struct scsi_cmnd *tmp_cmd; + tmp_cmd = aha1542->int_cmds[i]; - if (SCtmp->device->soft_reset) { + if (tmp_cmd->device->soft_reset) { /* * If this device implements the soft reset option, * then it is still holding onto the command, and @@ -1432,241 +898,51 @@ static int aha1542_host_reset(Scsi_Cmnd * SCpnt) */ continue; } - kfree(SCtmp->host_scribble); - SCtmp->host_scribble = NULL; - HOSTDATA(SCpnt->device->host)->SCint[i] = NULL; - HOSTDATA(SCpnt->device->host)->mb[i].status = 0; + kfree(tmp_cmd->host_scribble); + tmp_cmd->host_scribble = NULL; + aha1542->int_cmds[i] = NULL; + aha1542->mb[i].status = 0; } } - spin_unlock_irq(SCpnt->device->host->host_lock); + spin_unlock_irqrestore(sh->host_lock, flags); return SUCCESS; - -fail: - spin_unlock_irq(SCpnt->device->host->host_lock); - return FAILED; } -#if 0 -/* - * These are the old error handling routines. They are only temporarily - * here while we play with the new error handling code. - */ -static int aha1542_old_abort(Scsi_Cmnd * SCpnt) +static int aha1542_bus_reset(struct scsi_cmnd *cmd) { -#if 0 - unchar ahacmd = CMD_START_SCSI; - unsigned long flags; - struct mailbox *mb; - int mbi, mbo, i; - - printk(KERN_DEBUG "In aha1542_abort: %x %x\n", - inb(STATUS(SCpnt->host->io_port)), - inb(INTRFLAGS(SCpnt->host->io_port))); - - spin_lock_irqsave(&aha1542_lock, flags); - mb = HOSTDATA(SCpnt->host)->mb; - mbi = HOSTDATA(SCpnt->host)->aha1542_last_mbi_used + 1; - if (mbi >= 2 * AHA1542_MAILBOXES) - mbi = AHA1542_MAILBOXES; - - do { - if (mb[mbi].status != 0) - break; - mbi++; - if (mbi >= 2 * AHA1542_MAILBOXES) - mbi = AHA1542_MAILBOXES; - } while (mbi != HOSTDATA(SCpnt->host)->aha1542_last_mbi_used); - spin_unlock_irqrestore(&aha1542_lock, flags); - - if (mb[mbi].status) { - printk(KERN_ERR "Lost interrupt discovered on irq %d - attempting to recover\n", - SCpnt->host->irq); - aha1542_intr_handle(SCpnt->host, NULL); - return 0; - } - /* OK, no lost interrupt. Try looking to see how many pending commands - we think we have. */ - - for (i = 0; i < AHA1542_MAILBOXES; i++) - if (HOSTDATA(SCpnt->host)->SCint[i]) { - if (HOSTDATA(SCpnt->host)->SCint[i] == SCpnt) { - printk(KERN_ERR "Timed out command pending for %s\n", - SCpnt->request->rq_disk ? - SCpnt->request->rq_disk->disk_name : "?" - ); - if (HOSTDATA(SCpnt->host)->mb[i].status) { - printk(KERN_ERR "OGMB still full - restarting\n"); - aha1542_out(SCpnt->host->io_port, &ahacmd, 1); - }; - } else - printk(KERN_ERR "Other pending command %s\n", - SCpnt->request->rq_disk ? - SCpnt->request->rq_disk->disk_name : "?" - ); - } -#endif - - DEB(printk("aha1542_abort\n")); -#if 0 - spin_lock_irqsave(&aha1542_lock, flags); - for (mbo = 0; mbo < AHA1542_MAILBOXES; mbo++) { - if (SCpnt == HOSTDATA(SCpnt->host)->SCint[mbo]) { - mb[mbo].status = 2; /* Abort command */ - aha1542_out(SCpnt->host->io_port, &ahacmd, 1); /* start scsi command */ - spin_unlock_irqrestore(&aha1542_lock, flags); - break; - } - } - if (AHA1542_MAILBOXES == mbo) - spin_unlock_irqrestore(&aha1542_lock, flags); -#endif - return SCSI_ABORT_SNOOZE; + return aha1542_reset(cmd, SCRST); } -/* We do not implement a reset function here, but the upper level code - assumes that it will get some kind of response for the command in - SCpnt. We must oblige, or the command will hang the scsi system. - For a first go, we assume that the 1542 notifies us with all of the - pending commands (it does implement soft reset, after all). */ - -static int aha1542_old_reset(Scsi_Cmnd * SCpnt, unsigned int reset_flags) +static int aha1542_host_reset(struct scsi_cmnd *cmd) { - unchar ahacmd = CMD_START_SCSI; - int i; - - /* - * See if a bus reset was suggested. - */ - if (reset_flags & SCSI_RESET_SUGGEST_BUS_RESET) { - /* - * This does a scsi reset for all devices on the bus. - * In principle, we could also reset the 1542 - should - * we do this? Try this first, and we can add that later - * if it turns out to be useful. - */ - outb(HRST | SCRST, CONTROL(SCpnt->host->io_port)); - - /* - * Wait for the thing to settle down a bit. Unfortunately - * this is going to basically lock up the machine while we - * wait for this to complete. To be 100% correct, we need to - * check for timeout, and if we are doing something like this - * we are pretty desperate anyways. - */ - WAIT(STATUS(SCpnt->host->io_port), - STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF); - - /* - * We need to do this too before the 1542 can interact with - * us again. - */ - setup_mailboxes(SCpnt->host->io_port, SCpnt->host); - - /* - * Now try to pick up the pieces. Restart all commands - * that are currently active on the bus, and reset all of - * the datastructures. We have some time to kill while - * things settle down, so print a nice message. - */ - printk(KERN_WARNING "Sent BUS RESET to scsi host %d\n", SCpnt->host->host_no); - - for (i = 0; i < AHA1542_MAILBOXES; i++) - if (HOSTDATA(SCpnt->host)->SCint[i] != NULL) { - Scsi_Cmnd *SCtmp; - SCtmp = HOSTDATA(SCpnt->host)->SCint[i]; - SCtmp->result = DID_RESET << 16; - kfree(SCtmp->host_scribble); - SCtmp->host_scribble = NULL; - printk(KERN_WARNING "Sending DID_RESET for target %d\n", SCpnt->target); - SCtmp->scsi_done(SCpnt); - - HOSTDATA(SCpnt->host)->SCint[i] = NULL; - HOSTDATA(SCpnt->host)->mb[i].status = 0; - } - /* - * Now tell the mid-level code what we did here. Since - * we have restarted all of the outstanding commands, - * then report SUCCESS. - */ - return (SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET); -fail: - printk(KERN_CRIT "aha1542.c: Unable to perform hard reset.\n"); - printk(KERN_CRIT "Power cycle machine to reset\n"); - return (SCSI_RESET_ERROR | SCSI_RESET_BUS_RESET); - - - } else { - /* This does a selective reset of just the one device */ - /* First locate the ccb for this command */ - for (i = 0; i < AHA1542_MAILBOXES; i++) - if (HOSTDATA(SCpnt->host)->SCint[i] == SCpnt) { - HOSTDATA(SCpnt->host)->ccb[i].op = 0x81; /* BUS DEVICE RESET */ - /* Now tell the 1542 to flush all pending commands for this target */ - aha1542_out(SCpnt->host->io_port, &ahacmd, 1); - - /* Here is the tricky part. What to do next. Do we get an interrupt - for the commands that we aborted with the specified target, or - do we generate this on our own? Try it without first and see - what happens */ - printk(KERN_WARNING "Sent BUS DEVICE RESET to target %d\n", SCpnt->target); - - /* If the first does not work, then try the second. I think the - first option is more likely to be correct. Free the command - block for all commands running on this target... */ - for (i = 0; i < AHA1542_MAILBOXES; i++) - if (HOSTDATA(SCpnt->host)->SCint[i] && - HOSTDATA(SCpnt->host)->SCint[i]->target == SCpnt->target) { - Scsi_Cmnd *SCtmp; - SCtmp = HOSTDATA(SCpnt->host)->SCint[i]; - SCtmp->result = DID_RESET << 16; - kfree(SCtmp->host_scribble); - SCtmp->host_scribble = NULL; - printk(KERN_WARNING "Sending DID_RESET for target %d\n", SCpnt->target); - SCtmp->scsi_done(SCpnt); - - HOSTDATA(SCpnt->host)->SCint[i] = NULL; - HOSTDATA(SCpnt->host)->mb[i].status = 0; - } - return SCSI_RESET_SUCCESS; - } - } - /* No active command at this time, so this means that each time we got - some kind of response the last time through. Tell the mid-level code - to request sense information in order to decide what to do next. */ - return SCSI_RESET_PUNT; + return aha1542_reset(cmd, HRST | SCRST); } -#endif /* end of big comment block around old_abort + old_reset */ static int aha1542_biosparam(struct scsi_device *sdev, - struct block_device *bdev, sector_t capacity, int *ip) + struct block_device *bdev, sector_t capacity, int geom[]) { - int translation_algorithm; - int size = capacity; - - translation_algorithm = HOSTDATA(sdev->host)->bios_translation; + struct aha1542_hostdata *aha1542 = shost_priv(sdev->host); - if ((size >> 11) > 1024 && translation_algorithm == BIOS_TRANSLATION_25563) { + if (capacity >= 0x200000 && + aha1542->bios_translation == BIOS_TRANSLATION_25563) { /* Please verify that this is the same as what DOS returns */ - ip[0] = 255; - ip[1] = 63; - ip[2] = size / 255 / 63; + geom[0] = 255; /* heads */ + geom[1] = 63; /* sectors */ } else { - ip[0] = 64; - ip[1] = 32; - ip[2] = size >> 11; + geom[0] = 64; /* heads */ + geom[1] = 32; /* sectors */ } + geom[2] = sector_div(capacity, geom[0] * geom[1]); /* cylinders */ return 0; } MODULE_LICENSE("GPL"); - static struct scsi_host_template driver_template = { + .module = THIS_MODULE, .proc_name = "aha1542", .name = "Adaptec 1542", - .detect = aha1542_detect, - .release = aha1542_release, .queuecommand = aha1542_queuecommand, .eh_device_reset_handler= aha1542_dev_reset, .eh_bus_reset_handler = aha1542_bus_reset, @@ -1674,9 +950,124 @@ static struct scsi_host_template driver_template = { .bios_param = aha1542_biosparam, .can_queue = AHA1542_MAILBOXES, .this_id = 7, - .sg_tablesize = AHA1542_SCATTER, - .cmd_per_lun = AHA1542_CMDLUN, + .sg_tablesize = 16, + .cmd_per_lun = 1, .unchecked_isa_dma = 1, .use_clustering = ENABLE_CLUSTERING, }; -#include "scsi_module.c" + +static int aha1542_isa_match(struct device *pdev, unsigned int ndev) +{ + struct Scsi_Host *sh = aha1542_hw_init(&driver_template, pdev, ndev); + + if (!sh) + return 0; + + dev_set_drvdata(pdev, sh); + return 1; +} + +static int aha1542_isa_remove(struct device *pdev, + unsigned int ndev) +{ + aha1542_release(dev_get_drvdata(pdev)); + dev_set_drvdata(pdev, NULL); + return 0; +} + +static struct isa_driver aha1542_isa_driver = { + .match = aha1542_isa_match, + .remove = aha1542_isa_remove, + .driver = { + .name = "aha1542" + }, +}; +static int isa_registered; + +#ifdef CONFIG_PNP +static struct pnp_device_id aha1542_pnp_ids[] = { + { .id = "ADP1542" }, + { .id = "" } +}; +MODULE_DEVICE_TABLE(pnp, aha1542_pnp_ids); + +static int aha1542_pnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *id) +{ + int indx; + struct Scsi_Host *sh; + + for (indx = 0; indx < ARRAY_SIZE(io); indx++) { + if (io[indx]) + continue; + + if (pnp_activate_dev(pdev) < 0) + continue; + + io[indx] = pnp_port_start(pdev, 0); + + /* The card can be queried for its DMA, we have + the DMA set up that is enough */ + + dev_info(&pdev->dev, "ISAPnP found an AHA1535 at I/O 0x%03X", io[indx]); + } + + sh = aha1542_hw_init(&driver_template, &pdev->dev, indx); + if (!sh) + return -ENODEV; + + pnp_set_drvdata(pdev, sh); + return 0; +} + +static void aha1542_pnp_remove(struct pnp_dev *pdev) +{ + aha1542_release(pnp_get_drvdata(pdev)); + pnp_set_drvdata(pdev, NULL); +} + +static struct pnp_driver aha1542_pnp_driver = { + .name = "aha1542", + .id_table = aha1542_pnp_ids, + .probe = aha1542_pnp_probe, + .remove = aha1542_pnp_remove, +}; +static int pnp_registered; +#endif /* CONFIG_PNP */ + +static int __init aha1542_init(void) +{ + int ret = 0; + +#ifdef CONFIG_PNP + if (isapnp) { + ret = pnp_register_driver(&aha1542_pnp_driver); + if (!ret) + pnp_registered = 1; + } +#endif + ret = isa_register_driver(&aha1542_isa_driver, MAXBOARDS); + if (!ret) + isa_registered = 1; + +#ifdef CONFIG_PNP + if (pnp_registered) + ret = 0; +#endif + if (isa_registered) + ret = 0; + + return ret; +} + +static void __exit aha1542_exit(void) +{ +#ifdef CONFIG_PNP + if (pnp_registered) + pnp_unregister_driver(&aha1542_pnp_driver); +#endif + if (isa_registered) + isa_unregister_driver(&aha1542_isa_driver); +} + +module_init(aha1542_init); +module_exit(aha1542_exit); diff --git a/drivers/scsi/aha1542.h b/drivers/scsi/aha1542.h index b871d2b57f93..0fe9bae1b3d1 100644 --- a/drivers/scsi/aha1542.h +++ b/drivers/scsi/aha1542.h @@ -1,64 +1,35 @@ -#ifndef _AHA1542_H - -/* $Id: aha1542.h,v 1.1 1992/07/24 06:27:38 root Exp root $ - * - * Header file for the adaptec 1542 driver for Linux - * - * $Log: aha1542.h,v $ - * Revision 1.1 1992/07/24 06:27:38 root - * Initial revision - * - * Revision 1.2 1992/07/04 18:41:49 root - * Replaced distribution with current drivers - * - * Revision 1.3 1992/06/23 23:58:20 root - * Fixes. - * - * Revision 1.2 1992/05/26 22:13:23 root - * Changed bug that prevented DMA above first 2 mbytes. - * - * Revision 1.1 1992/05/22 21:00:29 root - * Initial revision - * - * Revision 1.1 1992/04/24 18:01:50 root - * Initial revision - * - * Revision 1.1 1992/04/02 03:23:13 drew - * Initial revision - * - * Revision 1.3 1992/01/27 14:46:29 tthorn - * *** empty log message *** - * - */ +#ifndef _AHA1542_H_ +#define _AHA1542_H_ #include <linux/types.h> /* I/O Port interface 4.2 */ /* READ */ #define STATUS(base) base -#define STST 0x80 /* Self Test in Progress */ -#define DIAGF 0x40 /* Internal Diagnostic Failure */ -#define INIT 0x20 /* Mailbox Initialization Required */ -#define IDLE 0x10 /* SCSI Host Adapter Idle */ -#define CDF 0x08 /* Command/Data Out Port Full */ -#define DF 0x04 /* Data In Port Full */ -#define INVDCMD 0x01 /* Invalid H A Command */ -#define STATMASK 0xfd /* 0x02 is reserved */ +#define STST BIT(7) /* Self Test in Progress */ +#define DIAGF BIT(6) /* Internal Diagnostic Failure */ +#define INIT BIT(5) /* Mailbox Initialization Required */ +#define IDLE BIT(4) /* SCSI Host Adapter Idle */ +#define CDF BIT(3) /* Command/Data Out Port Full */ +#define DF BIT(2) /* Data In Port Full */ +/* BIT(1) is reserved */ +#define INVDCMD BIT(0) /* Invalid H A Command */ +#define STATMASK (STST | DIAGF | INIT | IDLE | CDF | DF | INVDCMD) #define INTRFLAGS(base) (STATUS(base)+2) -#define ANYINTR 0x80 /* Any Interrupt */ -#define SCRD 0x08 /* SCSI Reset Detected */ -#define HACC 0x04 /* HA Command Complete */ -#define MBOA 0x02 /* MBO Empty */ -#define MBIF 0x01 /* MBI Full */ -#define INTRMASK 0x8f +#define ANYINTR BIT(7) /* Any Interrupt */ +#define SCRD BIT(3) /* SCSI Reset Detected */ +#define HACC BIT(2) /* HA Command Complete */ +#define MBOA BIT(1) /* MBO Empty */ +#define MBIF BIT(0) /* MBI Full */ +#define INTRMASK (ANYINTR | SCRD | HACC | MBOA | MBIF) /* WRITE */ #define CONTROL(base) STATUS(base) -#define HRST 0x80 /* Hard Reset */ -#define SRST 0x40 /* Soft Reset */ -#define IRST 0x20 /* Interrupt Reset */ -#define SCRST 0x10 /* SCSI Bus Reset */ +#define HRST BIT(7) /* Hard Reset */ +#define SRST BIT(6) /* Soft Reset */ +#define IRST BIT(5) /* Interrupt Reset */ +#define SCRST BIT(4) /* SCSI Bus Reset */ /* READ/WRITE */ #define DATA(base) (STATUS(base)+1) @@ -80,14 +51,14 @@ /* Mailbox Definition 5.2.1 and 5.2.2 */ struct mailbox { - unchar status; /* Command/Status */ - unchar ccbptr[3]; /* msb, .., lsb */ + u8 status; /* Command/Status */ + u8 ccbptr[3]; /* msb, .., lsb */ }; /* This is used with scatter-gather */ struct chain { - unchar datalen[3]; /* Size of this part of chain */ - unchar dataptr[3]; /* Location of data */ + u8 datalen[3]; /* Size of this part of chain */ + u8 dataptr[3]; /* Location of data */ }; /* These belong in scsi.h also */ @@ -100,51 +71,32 @@ static inline void any2scsi(u8 *p, u32 v) #define scsi2int(up) ( (((long)*(up)) << 16) + (((long)(up)[1]) << 8) + ((long)(up)[2]) ) -#define xany2scsi(up, p) \ -(up)[0] = ((long)(p)) >> 24; \ -(up)[1] = ((long)(p)) >> 16; \ -(up)[2] = ((long)(p)) >> 8; \ -(up)[3] = ((long)(p)); - #define xscsi2int(up) ( (((long)(up)[0]) << 24) + (((long)(up)[1]) << 16) \ + (((long)(up)[2]) << 8) + ((long)(up)[3]) ) #define MAX_CDB 12 #define MAX_SENSE 14 -struct ccb { /* Command Control Block 5.3 */ - unchar op; /* Command Control Block Operation Code */ - unchar idlun; /* op=0,2:Target Id, op=1:Initiator Id */ - /* Outbound data transfer, length is checked*/ - /* Inbound data transfer, length is checked */ - /* Logical Unit Number */ - unchar cdblen; /* SCSI Command Length */ - unchar rsalen; /* Request Sense Allocation Length/Disable */ - unchar datalen[3]; /* Data Length (msb, .., lsb) */ - unchar dataptr[3]; /* Data Pointer */ - unchar linkptr[3]; /* Link Pointer */ - unchar commlinkid; /* Command Linking Identifier */ - unchar hastat; /* Host Adapter Status (HASTAT) */ - unchar tarstat; /* Target Device Status */ - unchar reserved[2]; - unchar cdb[MAX_CDB+MAX_SENSE];/* SCSI Command Descriptor Block */ - /* REQUEST SENSE */ +struct ccb { /* Command Control Block 5.3 */ + u8 op; /* Command Control Block Operation Code */ + u8 idlun; /* op=0,2:Target Id, op=1:Initiator Id */ + /* Outbound data transfer, length is checked*/ + /* Inbound data transfer, length is checked */ + /* Logical Unit Number */ + u8 cdblen; /* SCSI Command Length */ + u8 rsalen; /* Request Sense Allocation Length/Disable */ + u8 datalen[3]; /* Data Length (msb, .., lsb) */ + u8 dataptr[3]; /* Data Pointer */ + u8 linkptr[3]; /* Link Pointer */ + u8 commlinkid; /* Command Linking Identifier */ + u8 hastat; /* Host Adapter Status (HASTAT) */ + u8 tarstat; /* Target Device Status */ + u8 reserved[2]; + u8 cdb[MAX_CDB+MAX_SENSE]; /* SCSI Command Descriptor Block */ + /* REQUEST SENSE */ }; -static int aha1542_detect(struct scsi_host_template *); -static int aha1542_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); -static int aha1542_bus_reset(Scsi_Cmnd * SCpnt); -static int aha1542_dev_reset(Scsi_Cmnd * SCpnt); -static int aha1542_host_reset(Scsi_Cmnd * SCpnt); -#if 0 -static int aha1542_old_abort(Scsi_Cmnd * SCpnt); -static int aha1542_old_reset(Scsi_Cmnd *, unsigned int); -#endif -static int aha1542_biosparam(struct scsi_device *, struct block_device *, - sector_t, int *); - +#define AHA1542_REGION_SIZE 4 #define AHA1542_MAILBOXES 8 -#define AHA1542_SCATTER 16 -#define AHA1542_CMDLUN 1 -#endif +#endif /* _AHA1542_H_ */ diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c index 97f2accd3dbb..109e2c99e6c1 100644 --- a/drivers/scsi/aic7xxx/aic79xx_core.c +++ b/drivers/scsi/aic7xxx/aic79xx_core.c @@ -10437,14 +10437,13 @@ ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb) return; } } - lstate = kmalloc(sizeof(*lstate), GFP_ATOMIC); + lstate = kzalloc(sizeof(*lstate), GFP_ATOMIC); if (lstate == NULL) { xpt_print_path(ccb->ccb_h.path); printk("Couldn't allocate lstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } - memset(lstate, 0, sizeof(*lstate)); status = xpt_create_path(&lstate->path, /*periph*/NULL, xpt_path_path_id(ccb->ccb_h.path), xpt_path_target_id(ccb->ccb_h.path), diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c index d5c7b193d8d3..ce96a0be3282 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm.c @@ -1326,10 +1326,9 @@ int ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg) { ahd->platform_data = - kmalloc(sizeof(struct ahd_platform_data), GFP_ATOMIC); + kzalloc(sizeof(struct ahd_platform_data), GFP_ATOMIC); if (ahd->platform_data == NULL) return (ENOMEM); - memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data)); ahd->platform_data->irq = AHD_LINUX_NOIRQ; ahd_lockinit(ahd); ahd->seltime = (aic79xx_seltime & 0x3) << 4; diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c index 10172a3af1b9..c4829d84b335 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_core.c +++ b/drivers/scsi/aic7xxx/aic7xxx_core.c @@ -4464,10 +4464,9 @@ ahc_softc_init(struct ahc_softc *ahc) ahc->pause = ahc->unpause | PAUSE; /* XXX The shared scb data stuff should be deprecated */ if (ahc->scb_data == NULL) { - ahc->scb_data = kmalloc(sizeof(*ahc->scb_data), GFP_ATOMIC); + ahc->scb_data = kzalloc(sizeof(*ahc->scb_data), GFP_ATOMIC); if (ahc->scb_data == NULL) return (ENOMEM); - memset(ahc->scb_data, 0, sizeof(*ahc->scb_data)); } return (0); @@ -4780,10 +4779,10 @@ ahc_init_scbdata(struct ahc_softc *ahc) SLIST_INIT(&scb_data->sg_maps); /* Allocate SCB resources */ - scb_data->scbarray = kmalloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, GFP_ATOMIC); + scb_data->scbarray = kzalloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, + GFP_ATOMIC); if (scb_data->scbarray == NULL) return (ENOMEM); - memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC); /* Determine the number of hardware SCBs and initialize them */ @@ -7558,14 +7557,13 @@ ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) return; } } - lstate = kmalloc(sizeof(*lstate), GFP_ATOMIC); + lstate = kzalloc(sizeof(*lstate), GFP_ATOMIC); if (lstate == NULL) { xpt_print_path(ccb->ccb_h.path); printk("Couldn't allocate lstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } - memset(lstate, 0, sizeof(*lstate)); status = xpt_create_path(&lstate->path, /*periph*/NULL, xpt_path_path_id(ccb->ccb_h.path), xpt_path_target_id(ccb->ccb_h.path), diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index 88360116dbcb..a2f2c774cd6b 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c @@ -1214,10 +1214,9 @@ ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg) { ahc->platform_data = - kmalloc(sizeof(struct ahc_platform_data), GFP_ATOMIC); + kzalloc(sizeof(struct ahc_platform_data), GFP_ATOMIC); if (ahc->platform_data == NULL) return (ENOMEM); - memset(ahc->platform_data, 0, sizeof(struct ahc_platform_data)); ahc->platform_data->irq = AHC_LINUX_NOIRQ; ahc_lockinit(ahc); ahc->seltime = (aic7xxx_seltime & 0x3) << 4; diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c index a70255413e7f..db87ece6edb2 100644 --- a/drivers/scsi/atari_NCR5380.c +++ b/drivers/scsi/atari_NCR5380.c @@ -1486,7 +1486,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) * selection. */ - timeout = jiffies + (250 * HZ / 1000); + timeout = jiffies + msecs_to_jiffies(250); /* * XXX very interesting - we're seeing a bounce where the BSY we diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index d1c37a386947..5ede3daa93dc 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -1014,7 +1014,6 @@ static struct platform_driver atari_scsi_driver = { .remove = __exit_p(atari_scsi_remove), .driver = { .name = DRV_MODULE_NAME, - .owner = THIS_MODULE, }, }; diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index f35792f7051c..f8d2478b11cc 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c @@ -57,9 +57,9 @@ */ /* settings for DTC3181E card with only Mustek scanner attached */ -#define USLEEP_POLL 1 -#define USLEEP_SLEEP 20 -#define USLEEP_WAITLONG 500 +#define USLEEP_POLL msecs_to_jiffies(10) +#define USLEEP_SLEEP msecs_to_jiffies(200) +#define USLEEP_WAITLONG msecs_to_jiffies(5000) #define AUTOPROBE_IRQ @@ -723,7 +723,7 @@ module_param(ncr_53c400a, int, 0); module_param(dtc_3181e, int, 0); MODULE_LICENSE("GPL"); -#ifndef SCSI_G_NCR5380_MEM +#if !defined(SCSI_G_NCR5380_MEM) && defined(MODULE) static struct isapnp_device_id id_table[] = { { ISAPNP_ANY_ID, ISAPNP_ANY_ID, diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index d9afc51af7d3..882744852aac 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -99,6 +99,7 @@ static unsigned int ipr_debug = 0; static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS; static unsigned int ipr_dual_ioa_raid = 1; static unsigned int ipr_number_of_msix = 2; +static unsigned int ipr_fast_reboot; static DEFINE_SPINLOCK(ipr_driver_lock); /* This table describes the differences between DMA controller chips */ @@ -221,6 +222,8 @@ MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. " "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]"); module_param_named(number_of_msix, ipr_number_of_msix, int, 0); MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)"); +module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)"); MODULE_LICENSE("GPL"); MODULE_VERSION(IPR_DRIVER_VERSION); @@ -495,6 +498,10 @@ struct ipr_error_table_t ipr_error_table[] = { "4061: Multipath redundancy level got better"}, {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL, "4060: Multipath redundancy level got worse"}, + {0x06808100, 0, IPR_DEFAULT_LOG_LEVEL, + "9083: Device raw mode enabled"}, + {0x06808200, 0, IPR_DEFAULT_LOG_LEVEL, + "9084: Device raw mode disabled"}, {0x07270000, 0, 0, "Failure due to other device"}, {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL, @@ -1462,7 +1469,8 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd) list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); if (ioasc) { - if (ioasc != IPR_IOASC_IOA_WAS_RESET) + if (ioasc != IPR_IOASC_IOA_WAS_RESET && + ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) dev_err(&ioa_cfg->pdev->dev, "Host RCB failed with IOASC: 0x%08X\n", ioasc); @@ -2566,7 +2574,8 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd) ipr_handle_log_data(ioa_cfg, hostrcb); if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED) ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); - } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) { + } else if (ioasc != IPR_IOASC_IOA_WAS_RESET && + ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) { dev_err(&ioa_cfg->pdev->dev, "Host RCB failed with IOASC: 0x%08X\n", ioasc); } @@ -4491,11 +4500,83 @@ static struct device_attribute ipr_resource_type_attr = { .show = ipr_show_resource_type }; +/** + * ipr_show_raw_mode - Show the adapter's raw mode + * @dev: class device struct + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_show_raw_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; + struct ipr_resource_entry *res; + unsigned long lock_flags = 0; + ssize_t len; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + res = (struct ipr_resource_entry *)sdev->hostdata; + if (res) + len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode); + else + len = -ENXIO; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return len; +} + +/** + * ipr_store_raw_mode - Change the adapter's raw mode + * @dev: class device struct + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_store_raw_mode(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; + struct ipr_resource_entry *res; + unsigned long lock_flags = 0; + ssize_t len; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + res = (struct ipr_resource_entry *)sdev->hostdata; + if (res) { + if (ioa_cfg->sis64 && ipr_is_af_dasd_device(res)) { + res->raw_mode = simple_strtoul(buf, NULL, 10); + len = strlen(buf); + if (res->sdev) + sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n", + res->raw_mode ? "enabled" : "disabled"); + } else + len = -EINVAL; + } else + len = -ENXIO; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return len; +} + +static struct device_attribute ipr_raw_mode_attr = { + .attr = { + .name = "raw_mode", + .mode = S_IRUGO | S_IWUSR, + }, + .show = ipr_show_raw_mode, + .store = ipr_store_raw_mode +}; + static struct device_attribute *ipr_dev_attrs[] = { &ipr_adapter_handle_attr, &ipr_resource_path_attr, &ipr_device_id_attr, &ipr_resource_type_attr, + &ipr_raw_mode_attr, NULL, }; @@ -5379,9 +5460,6 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { /* Mask the interrupt */ writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg); - - /* Clear the interrupt */ - writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg); int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); list_del(&ioa_cfg->reset_cmd->queue); @@ -6150,6 +6228,13 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, break; case IPR_IOASC_NR_INIT_CMD_REQUIRED: break; + case IPR_IOASC_IR_NON_OPTIMIZED: + if (res->raw_mode) { + res->raw_mode = 0; + scsi_cmd->result |= (DID_IMM_RETRY << 16); + } else + scsi_cmd->result |= (DID_ERROR << 16); + break; default: if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) scsi_cmd->result |= (DID_ERROR << 16); @@ -6289,6 +6374,8 @@ static int ipr_queuecommand(struct Scsi_Host *shost, (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) { ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; } + if (res->raw_mode && ipr_is_af_dasd_device(res)) + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE; if (ioa_cfg->sis64) rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd); @@ -6402,7 +6489,6 @@ static struct scsi_host_template driver_template = { .shost_attrs = ipr_ioa_attrs, .sdev_attrs = ipr_dev_attrs, .proc_name = IPR_NAME, - .no_write_same = 1, .use_blk_tags = 1, }; @@ -8318,7 +8404,6 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd) static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd) { ENTER; - pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset); ipr_cmd->job_step = ipr_reset_bist_done; ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); LEAVE; @@ -8326,6 +8411,32 @@ static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd) } /** + * ipr_reset_reset_work - Pulse a PCIe fundamental reset + * @work: work struct + * + * Description: This pulses warm reset to a slot. + * + **/ +static void ipr_reset_reset_work(struct work_struct *work) +{ + struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work); + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct pci_dev *pdev = ioa_cfg->pdev; + unsigned long lock_flags = 0; + + ENTER; + pci_set_pcie_reset_state(pdev, pcie_warm_reset); + msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT)); + pci_set_pcie_reset_state(pdev, pcie_deassert_reset); + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + if (ioa_cfg->reset_cmd == ipr_cmd) + ipr_reset_ioa_job(ipr_cmd); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + LEAVE; +} + +/** * ipr_reset_slot_reset - Reset the PCI slot of the adapter. * @ipr_cmd: ipr command struct * @@ -8337,12 +8448,11 @@ static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd) static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; - struct pci_dev *pdev = ioa_cfg->pdev; ENTER; - pci_set_pcie_reset_state(pdev, pcie_warm_reset); + INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work); + queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work); ipr_cmd->job_step = ipr_reset_slot_reset_done; - ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT); LEAVE; return IPR_RC_JOB_RETURN; } @@ -8480,6 +8590,122 @@ static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd) } /** + * ipr_reset_quiesce_done - Complete IOA disconnect + * @ipr_cmd: ipr command struct + * + * Description: Freeze the adapter to complete quiesce processing + * + * Return value: + * IPR_RC_JOB_CONTINUE + **/ +static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + ENTER; + ipr_cmd->job_step = ipr_ioa_bringdown_done; + ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); + LEAVE; + return IPR_RC_JOB_CONTINUE; +} + +/** + * ipr_reset_cancel_hcam_done - Check for outstanding commands + * @ipr_cmd: ipr command struct + * + * Description: Ensure nothing is outstanding to the IOA and + * proceed with IOA disconnect. Otherwise reset the IOA. + * + * Return value: + * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE + **/ +static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_cmnd *loop_cmd; + struct ipr_hrr_queue *hrrq; + int rc = IPR_RC_JOB_CONTINUE; + int count = 0; + + ENTER; + ipr_cmd->job_step = ipr_reset_quiesce_done; + + for_each_hrrq(hrrq, ioa_cfg) { + spin_lock(&hrrq->_lock); + list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) { + count++; + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); + rc = IPR_RC_JOB_RETURN; + break; + } + spin_unlock(&hrrq->_lock); + + if (count) + break; + } + + LEAVE; + return rc; +} + +/** + * ipr_reset_cancel_hcam - Cancel outstanding HCAMs + * @ipr_cmd: ipr command struct + * + * Description: Cancel any oustanding HCAMs to the IOA. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + int rc = IPR_RC_JOB_CONTINUE; + struct ipr_cmd_pkt *cmd_pkt; + struct ipr_cmnd *hcam_cmd; + struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ]; + + ENTER; + ipr_cmd->job_step = ipr_reset_cancel_hcam_done; + + if (!hrrq->ioa_is_dead) { + if (!list_empty(&ioa_cfg->hostrcb_pending_q)) { + list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) { + if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC) + continue; + + ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; + cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; + cmd_pkt->request_type = IPR_RQTYPE_IOACMD; + cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST; + cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB; + cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff; + cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff; + cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff; + cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff; + cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff; + cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff; + cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff; + cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff; + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, + IPR_CANCEL_TIMEOUT); + + rc = IPR_RC_JOB_RETURN; + ipr_cmd->job_step = ipr_reset_cancel_hcam; + break; + } + } + } else + ipr_cmd->job_step = ipr_reset_alert; + + LEAVE; + return rc; +} + +/** * ipr_reset_ucode_download_done - Microcode download completion * @ipr_cmd: ipr command struct * @@ -8561,7 +8787,9 @@ static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd) int rc = IPR_RC_JOB_CONTINUE; ENTER; - if (shutdown_type != IPR_SHUTDOWN_NONE && + if (shutdown_type == IPR_SHUTDOWN_QUIESCE) + ipr_cmd->job_step = ipr_reset_cancel_hcam; + else if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; @@ -8917,13 +9145,15 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) { int i; - for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { - if (ioa_cfg->ipr_cmnd_list[i]) - dma_pool_free(ioa_cfg->ipr_cmd_pool, - ioa_cfg->ipr_cmnd_list[i], - ioa_cfg->ipr_cmnd_list_dma[i]); + if (ioa_cfg->ipr_cmnd_list) { + for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { + if (ioa_cfg->ipr_cmnd_list[i]) + dma_pool_free(ioa_cfg->ipr_cmd_pool, + ioa_cfg->ipr_cmnd_list[i], + ioa_cfg->ipr_cmnd_list_dma[i]); - ioa_cfg->ipr_cmnd_list[i] = NULL; + ioa_cfg->ipr_cmnd_list[i] = NULL; + } } if (ioa_cfg->ipr_cmd_pool) @@ -8973,26 +9203,25 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg) } /** - * ipr_free_all_resources - Free all allocated resources for an adapter. - * @ipr_cmd: ipr command struct + * ipr_free_irqs - Free all allocated IRQs for the adapter. + * @ioa_cfg: ipr cfg struct * - * This function frees all allocated resources for the + * This function frees all allocated IRQs for the * specified adapter. * * Return value: * none **/ -static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg) +static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg) { struct pci_dev *pdev = ioa_cfg->pdev; - ENTER; if (ioa_cfg->intr_flag == IPR_USE_MSI || ioa_cfg->intr_flag == IPR_USE_MSIX) { int i; for (i = 0; i < ioa_cfg->nvectors; i++) free_irq(ioa_cfg->vectors_info[i].vec, - &ioa_cfg->hrrq[i]); + &ioa_cfg->hrrq[i]); } else free_irq(pdev->irq, &ioa_cfg->hrrq[0]); @@ -9003,7 +9232,26 @@ static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg) pci_disable_msix(pdev); ioa_cfg->intr_flag &= ~IPR_USE_MSIX; } +} +/** + * ipr_free_all_resources - Free all allocated resources for an adapter. + * @ipr_cmd: ipr command struct + * + * This function frees all allocated resources for the + * specified adapter. + * + * Return value: + * none + **/ +static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg) +{ + struct pci_dev *pdev = ioa_cfg->pdev; + + ENTER; + ipr_free_irqs(ioa_cfg); + if (ioa_cfg->reset_work_q) + destroy_workqueue(ioa_cfg->reset_work_q); iounmap(ioa_cfg->hdw_dma_regs); pci_release_regions(pdev); ipr_free_mem(ioa_cfg); @@ -9823,6 +10071,14 @@ static int ipr_probe_ioa(struct pci_dev *pdev, (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) { ioa_cfg->needs_warm_reset = 1; ioa_cfg->reset = ipr_reset_slot_reset; + + ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d", + WQ_MEM_RECLAIM, host->host_no); + + if (!ioa_cfg->reset_work_q) { + dev_err(&pdev->dev, "Couldn't register reset workqueue\n"); + goto out_free_irq; + } } else ioa_cfg->reset = ipr_reset_start_bist; @@ -9834,6 +10090,8 @@ static int ipr_probe_ioa(struct pci_dev *pdev, out: return rc; +out_free_irq: + ipr_free_irqs(ioa_cfg); cleanup_nolog: ipr_free_mem(ioa_cfg); out_msi_disable: @@ -9914,6 +10172,8 @@ static void __ipr_remove(struct pci_dev *pdev) spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); flush_work(&ioa_cfg->work_q); + if (ioa_cfg->reset_work_q) + flush_workqueue(ioa_cfg->reset_work_q); INIT_LIST_HEAD(&ioa_cfg->used_res_q); spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); @@ -10036,6 +10296,7 @@ static void ipr_shutdown(struct pci_dev *pdev) { struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); unsigned long lock_flags = 0; + enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL; int i; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); @@ -10051,9 +10312,16 @@ static void ipr_shutdown(struct pci_dev *pdev) spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); } - ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); + if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) + shutdown_type = IPR_SHUTDOWN_QUIESCE; + + ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) { + ipr_free_irqs(ioa_cfg); + pci_disable_device(ioa_cfg->pdev); + } } static struct pci_device_id ipr_pci_table[] = { @@ -10211,7 +10479,8 @@ static int ipr_halt(struct notifier_block *nb, ulong event, void *buf) list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) { spin_lock_irqsave(ioa_cfg->host->host_lock, flags); - if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { + if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds || + (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); continue; } diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index ec03b42fa2b9..47412cf4eaac 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -39,8 +39,8 @@ /* * Literals */ -#define IPR_DRIVER_VERSION "2.6.0" -#define IPR_DRIVER_DATE "(November 16, 2012)" +#define IPR_DRIVER_VERSION "2.6.1" +#define IPR_DRIVER_DATE "(March 12, 2015)" /* * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding @@ -138,6 +138,7 @@ #define IPR_IOASC_BUS_WAS_RESET 0x06290000 #define IPR_IOASC_BUS_WAS_RESET_BY_OTHER 0x06298000 #define IPR_IOASC_ABORTED_CMD_TERM_BY_HOST 0x0B5A0000 +#define IPR_IOASC_IR_NON_OPTIMIZED 0x05258200 #define IPR_FIRST_DRIVER_IOASC 0x10000000 #define IPR_IOASC_IOA_WAS_RESET 0x10000001 @@ -196,6 +197,8 @@ /* * Adapter Commands */ +#define IPR_CANCEL_REQUEST 0xC0 +#define IPR_CANCEL_64BIT_IOARCB 0x01 #define IPR_QUERY_RSRC_STATE 0xC2 #define IPR_RESET_DEVICE 0xC3 #define IPR_RESET_TYPE_SELECT 0x80 @@ -222,6 +225,7 @@ #define IPR_ABBREV_SHUTDOWN_TIMEOUT (10 * HZ) #define IPR_DUAL_IOA_ABBR_SHUTDOWN_TO (2 * 60 * HZ) #define IPR_DEVICE_RESET_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ) +#define IPR_CANCEL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ) #define IPR_CANCEL_ALL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ) #define IPR_ABORT_TASK_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ) #define IPR_INTERNAL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ) @@ -518,6 +522,7 @@ struct ipr_cmd_pkt { #define IPR_RQTYPE_IOACMD 0x01 #define IPR_RQTYPE_HCAM 0x02 #define IPR_RQTYPE_ATA_PASSTHRU 0x04 +#define IPR_RQTYPE_PIPE 0x05 u8 reserved2; @@ -1271,6 +1276,7 @@ struct ipr_resource_entry { u8 del_from_ml:1; u8 resetting_device:1; u8 reset_occurred:1; + u8 raw_mode:1; u32 bus; /* AKA channel */ u32 target; /* AKA id */ @@ -1402,7 +1408,8 @@ enum ipr_shutdown_type { IPR_SHUTDOWN_NORMAL = 0x00, IPR_SHUTDOWN_PREPARE_FOR_NORMAL = 0x40, IPR_SHUTDOWN_ABBREV = 0x80, - IPR_SHUTDOWN_NONE = 0x100 + IPR_SHUTDOWN_NONE = 0x100, + IPR_SHUTDOWN_QUIESCE = 0x101, }; struct ipr_trace_entry { @@ -1536,6 +1543,7 @@ struct ipr_ioa_cfg { u8 saved_mode_page_len; struct work_struct work_q; + struct workqueue_struct *reset_work_q; wait_queue_head_t reset_wait_q; wait_queue_head_t msi_wait_q; @@ -1587,6 +1595,7 @@ struct ipr_cmnd { struct ata_queued_cmd *qc; struct completion completion; struct timer_list timer; + struct work_struct work; void (*fast_done) (struct ipr_cmnd *); void (*done) (struct ipr_cmnd *); int (*job_step) (struct ipr_cmnd *); diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 434e9037908e..9b81a34d7449 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2014 Emulex. All rights reserved. * + * Copyright (C) 2004-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -413,6 +413,9 @@ struct lpfc_vport { uint32_t cfg_fcp_class; uint32_t cfg_use_adisc; uint32_t cfg_fdmi_on; +#define LPFC_FDMI_SUPPORT 1 /* bit 0 - FDMI supported? */ +#define LPFC_FDMI_REG_DELAY 2 /* bit 1 - 60 sec registration delay */ +#define LPFC_FDMI_ALL_ATTRIB 4 /* bit 2 - register ALL attributes? */ uint32_t cfg_discovery_threads; uint32_t cfg_log_verbose; uint32_t cfg_max_luns; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 2f9b96826ac0..d65bd178d131 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2014 Emulex. All rights reserved. * + * Copyright (C) 2004-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -406,8 +406,13 @@ lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr, struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; + char fwrev[FW_REV_STR_SIZE]; + + if (phba->sli_rev < LPFC_SLI_REV4) + return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion); - return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion); + lpfc_decode_firmware_rev(phba, fwrev, 1); + return snprintf(buf, PAGE_SIZE, "%s\n", fwrev); } /** @@ -4568,12 +4573,18 @@ LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1, /* # lpfc_fdmi_on: controls FDMI support. -# 0 = no FDMI support -# 1 = support FDMI without attribute of hostname -# 2 = support FDMI with attribute of hostname -# Value range [0,2]. Default value is 0. +# Set NOT Set +# bit 0 = FDMI support no FDMI support +# LPFC_FDMI_SUPPORT just turns basic support on/off +# bit 1 = Register delay no register delay (60 seconds) +# LPFC_FDMI_REG_DELAY 60 sec registration delay after FDMI login +# bit 2 = All attributes Use a attribute subset +# LPFC_FDMI_ALL_ATTRIB applies to both port and HBA attributes +# Port attrutes subset: 1 thru 6 OR all: 1 thru 0xd 0x101 0x102 0x103 +# HBA attributes subset: 1 thru 0xb OR all: 1 thru 0xc +# Value range [0,7]. Default value is 0. */ -LPFC_VPORT_ATTR_RW(fdmi_on, 0, 0, 2, "Enable FDMI support"); +LPFC_VPORT_ATTR_RW(fdmi_on, 0, 0, 7, "Enable FDMI support"); /* # Specifies the maximum number of ELS cmds we can have outstanding (for diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index a7bf359aa0c6..b705068079c0 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2009-2014 Emulex. All rights reserved. * + * Copyright (C) 2009-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -3194,6 +3194,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job) cmd->unsli3.rcvsli3.ox_id = 0xffff; } cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; + cmdiocbq->iocb_flag |= LPFC_IO_LOOPBACK; cmdiocbq->vport = phba->pport; cmdiocbq->iocb_cmpl = NULL; iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, @@ -4179,6 +4180,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, switch (opcode) { case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES: case COMN_OPCODE_GET_CNTL_ATTRIBUTES: + case COMN_OPCODE_GET_PROFILE_CONFIG: lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "3106 Handled SLI_CONFIG " "subsys_comn, opcode:x%x\n", diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h index 928ef609f363..e557bcdbcb19 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.h +++ b/drivers/scsi/lpfc/lpfc_bsg.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2010-2014 Emulex. All rights reserved. * + * Copyright (C) 2010-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -246,6 +246,7 @@ struct lpfc_sli_config_emb1_subsys { #define lpfc_emb1_subcmnd_subsys_WORD word6 /* Subsystem COMN (0x01) OpCodes */ #define SLI_CONFIG_SUBSYS_COMN 0x01 +#define COMN_OPCODE_GET_PROFILE_CONFIG 0xA4 #define COMN_OPCODE_READ_OBJECT 0xAB #define COMN_OPCODE_WRITE_OBJECT 0xAC #define COMN_OPCODE_READ_OBJECT_LIST 0xAD diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 00665a5d92fd..587e3e962f2b 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2014 Emulex. All rights reserved. * + * Copyright (C) 2004-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -284,6 +284,7 @@ void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, struct lpfc_sli_ring *, uint32_t); void lpfc_sli4_handle_received_buffer(struct lpfc_hba *, struct hbq_dmabuf *); void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *); int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *, uint32_t); void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); @@ -354,6 +355,7 @@ void lpfc_free_sysfs_attr(struct lpfc_vport *); extern struct device_attribute *lpfc_hba_attrs[]; extern struct device_attribute *lpfc_vport_attrs[]; extern struct scsi_host_template lpfc_template; +extern struct scsi_host_template lpfc_template_s3; extern struct scsi_host_template lpfc_vport_template; extern struct fc_function_template lpfc_transport_functions; extern struct fc_function_template lpfc_vport_transport_functions; diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 61a32cd23f79..af129966bd11 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2013 Emulex. All rights reserved. * + * Copyright (C) 2004-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -555,7 +555,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size) } } } - if (CTentry & (be32_to_cpu(SLI_CT_LAST_ENTRY))) + if (CTentry & (cpu_to_be32(SLI_CT_LAST_ENTRY))) goto nsout1; Cnt -= sizeof (uint32_t); } @@ -641,7 +641,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Good status, continue checking */ CTrsp = (struct lpfc_sli_ct_request *) outp->virt; if (CTrsp->CommandResponse.bits.CmdRsp == - be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) { + cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0208 NameServer Rsp Data: x%x\n", vport->fc_flag); @@ -1074,11 +1074,48 @@ lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol, lpfc_decode_firmware_rev(vport->phba, fwrev, 0); - n = snprintf(symbol, size, "Emulex %s FV%s DV%s", - vport->phba->ModelName, fwrev, lpfc_release_version); + n = snprintf(symbol, size, "Emulex %s", vport->phba->ModelName); + + if (size < n) + return n; + n += snprintf(symbol + n, size - n, " FV%s", fwrev); + + if (size < n) + return n; + n += snprintf(symbol + n, size - n, " DV%s", lpfc_release_version); + + if (size < n) + return n; + n += snprintf(symbol + n, size - n, " HN:%s", init_utsname()->nodename); + + /* Note :- OS name is "Linux" */ + if (size < n) + return n; + n += snprintf(symbol + n, size - n, " OS:%s", init_utsname()->sysname); + return n; } +static uint32_t +lpfc_find_map_node(struct lpfc_vport *vport) +{ + struct lpfc_nodelist *ndlp, *next_ndlp; + struct Scsi_Host *shost; + uint32_t cnt = 0; + + shost = lpfc_shost_from_vport(vport); + spin_lock_irq(shost->host_lock); + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { + if (ndlp->nlp_type & NLP_FABRIC) + continue; + if ((ndlp->nlp_state == NLP_STE_MAPPED_NODE) || + (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)) + cnt++; + } + spin_unlock_irq(shost->host_lock); + return cnt; +} + /* * lpfc_ns_cmd * Description: @@ -1177,7 +1214,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, switch (cmdcode) { case SLI_CTNS_GID_FT: CtReq->CommandResponse.bits.CmdRsp = - be16_to_cpu(SLI_CTNS_GID_FT); + cpu_to_be16(SLI_CTNS_GID_FT); CtReq->un.gid.Fc4Type = SLI_CTPT_FCP; if (vport->port_state < LPFC_NS_QRY) vport->port_state = LPFC_NS_QRY; @@ -1188,7 +1225,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, case SLI_CTNS_GFF_ID: CtReq->CommandResponse.bits.CmdRsp = - be16_to_cpu(SLI_CTNS_GFF_ID); + cpu_to_be16(SLI_CTNS_GFF_ID); CtReq->un.gff.PortId = cpu_to_be32(context); cmpl = lpfc_cmpl_ct_cmd_gff_id; break; @@ -1196,7 +1233,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, case SLI_CTNS_RFT_ID: vport->ct_flags &= ~FC_CT_RFT_ID; CtReq->CommandResponse.bits.CmdRsp = - be16_to_cpu(SLI_CTNS_RFT_ID); + cpu_to_be16(SLI_CTNS_RFT_ID); CtReq->un.rft.PortId = cpu_to_be32(vport->fc_myDID); CtReq->un.rft.fcpReg = 1; cmpl = lpfc_cmpl_ct_cmd_rft_id; @@ -1205,7 +1242,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, case SLI_CTNS_RNN_ID: vport->ct_flags &= ~FC_CT_RNN_ID; CtReq->CommandResponse.bits.CmdRsp = - be16_to_cpu(SLI_CTNS_RNN_ID); + cpu_to_be16(SLI_CTNS_RNN_ID); CtReq->un.rnn.PortId = cpu_to_be32(vport->fc_myDID); memcpy(CtReq->un.rnn.wwnn, &vport->fc_nodename, sizeof (struct lpfc_name)); @@ -1215,7 +1252,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, case SLI_CTNS_RSPN_ID: vport->ct_flags &= ~FC_CT_RSPN_ID; CtReq->CommandResponse.bits.CmdRsp = - be16_to_cpu(SLI_CTNS_RSPN_ID); + cpu_to_be16(SLI_CTNS_RSPN_ID); CtReq->un.rspn.PortId = cpu_to_be32(vport->fc_myDID); size = sizeof(CtReq->un.rspn.symbname); CtReq->un.rspn.len = @@ -1226,7 +1263,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, case SLI_CTNS_RSNN_NN: vport->ct_flags &= ~FC_CT_RSNN_NN; CtReq->CommandResponse.bits.CmdRsp = - be16_to_cpu(SLI_CTNS_RSNN_NN); + cpu_to_be16(SLI_CTNS_RSNN_NN); memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename, sizeof (struct lpfc_name)); size = sizeof(CtReq->un.rsnn.symbname); @@ -1238,14 +1275,14 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, case SLI_CTNS_DA_ID: /* Implement DA_ID Nameserver request */ CtReq->CommandResponse.bits.CmdRsp = - be16_to_cpu(SLI_CTNS_DA_ID); + cpu_to_be16(SLI_CTNS_DA_ID); CtReq->un.da_id.port_id = cpu_to_be32(vport->fc_myDID); cmpl = lpfc_cmpl_ct_cmd_da_id; break; case SLI_CTNS_RFF_ID: vport->ct_flags &= ~FC_CT_RFF_ID; CtReq->CommandResponse.bits.CmdRsp = - be16_to_cpu(SLI_CTNS_RFF_ID); + cpu_to_be16(SLI_CTNS_RFF_ID); CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID); CtReq->un.rff.fbits = FC4_FEATURE_INIT; CtReq->un.rff.type_code = FC_TYPE_FCP; @@ -1299,7 +1336,6 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, uint32_t latt; latt = lpfc_els_chk_latt(vport); - lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "FDMI cmpl: status:x%x/x%x latt:%d", irsp->ulpStatus, irsp->un.ulpWord[4], latt); @@ -1310,29 +1346,49 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, "ulpStatus: x%x, rid x%x\n", be16_to_cpu(fdmi_cmd), latt, irsp->ulpStatus, irsp->un.ulpWord[4]); - lpfc_ct_free_iocb(phba, cmdiocb); - return; + goto fail_out; } ndlp = lpfc_findnode_did(vport, FDMI_DID); if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) goto fail_out; - if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { + if (fdmi_rsp == cpu_to_be16(SLI_CT_RESPONSE_FS_RJT)) { /* FDMI rsp failed */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0220 FDMI rsp failed Data: x%x\n", be16_to_cpu(fdmi_cmd)); } +fail_out: + lpfc_ct_free_iocb(phba, cmdiocb); +} + +static void +lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + struct lpfc_dmabuf *inp = cmdiocb->context1; + struct lpfc_sli_ct_request *CTcmd = inp->virt; + uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp; + struct lpfc_nodelist *ndlp; + + lpfc_cmpl_ct_cmd_fdmi(phba, cmdiocb, rspiocb); + + ndlp = lpfc_findnode_did(vport, FDMI_DID); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) + return; + + /* + * Need to cycle thru FDMI registration for discovery + * DHBA -> DPRT -> RHBA -> RPA + */ switch (be16_to_cpu(fdmi_cmd)) { case SLI_MGMT_RHBA: lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA); break; - case SLI_MGMT_RPA: - break; - case SLI_MGMT_DHBA: lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT); break; @@ -1341,12 +1397,9 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA); break; } - -fail_out: - lpfc_ct_free_iocb(phba, cmdiocb); - return; } + int lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode) { @@ -1355,18 +1408,28 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode) struct lpfc_sli_ct_request *CtReq; struct ulp_bde64 *bpl; uint32_t size; - REG_HBA *rh; - PORT_ENTRY *pe; - REG_PORT_ATTRIBUTE *pab; - ATTRIBUTE_BLOCK *ab; - ATTRIBUTE_ENTRY *ae; + uint32_t rsp_size; + struct lpfc_fdmi_reg_hba *rh; + struct lpfc_fdmi_port_entry *pe; + struct lpfc_fdmi_reg_portattr *pab = NULL; + struct lpfc_fdmi_attr_block *ab = NULL; + struct lpfc_fdmi_attr_entry *ae; + struct lpfc_fdmi_attr_def *ad; void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); + if (ndlp == NULL) { + ndlp = lpfc_findnode_did(vport, FDMI_DID); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) + return 0; + cmpl = lpfc_cmpl_ct_cmd_fdmi; /* cmd interface */ + } else { + cmpl = lpfc_cmpl_ct_disc_fdmi; /* called from discovery */ + } /* fill in BDEs for command */ /* Allocate buffer for command payload */ - mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); + mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!mp) goto fdmi_cmd_exit; @@ -1375,7 +1438,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode) goto fdmi_cmd_free_mp; /* Allocate buffer for Buffer ptr list */ - bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); + bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!bmp) goto fdmi_cmd_free_mpvirt; @@ -1390,205 +1453,330 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode) lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0218 FDMI Request Data: x%x x%x x%x\n", vport->fc_flag, vport->port_state, cmdcode); - CtReq = (struct lpfc_sli_ct_request *) mp->virt; + CtReq = (struct lpfc_sli_ct_request *)mp->virt; + /* First populate the CT_IU preamble */ memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request)); CtReq->RevisionId.bits.Revision = SLI_CT_REVISION; CtReq->RevisionId.bits.InId = 0; CtReq->FsType = SLI_CT_MANAGEMENT_SERVICE; CtReq->FsSubType = SLI_CT_FDMI_Subtypes; + + CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode); + rsp_size = LPFC_BPL_SIZE; size = 0; + /* Next fill in the specific FDMI cmd information */ switch (cmdcode) { + case SLI_MGMT_RHAT: case SLI_MGMT_RHBA: { lpfc_vpd_t *vp = &phba->vpd; uint32_t i, j, incr; - int len; + int len = 0; - CtReq->CommandResponse.bits.CmdRsp = - be16_to_cpu(SLI_MGMT_RHBA); - CtReq->CommandResponse.bits.Size = 0; - rh = (REG_HBA *) & CtReq->un.PortID; + rh = (struct lpfc_fdmi_reg_hba *)&CtReq->un.PortID; + /* HBA Identifier */ memcpy(&rh->hi.PortName, &vport->fc_sparam.portName, - sizeof (struct lpfc_name)); - /* One entry (port) per adapter */ - rh->rpl.EntryCnt = be32_to_cpu(1); - memcpy(&rh->rpl.pe, &vport->fc_sparam.portName, - sizeof (struct lpfc_name)); - - /* point to the HBA attribute block */ - size = 2 * sizeof (struct lpfc_name) + FOURBYTES; - ab = (ATTRIBUTE_BLOCK *) ((uint8_t *) rh + size); + sizeof(struct lpfc_name)); + + if (cmdcode == SLI_MGMT_RHBA) { + /* Registered Port List */ + /* One entry (port) per adapter */ + rh->rpl.EntryCnt = cpu_to_be32(1); + memcpy(&rh->rpl.pe, &vport->fc_sparam.portName, + sizeof(struct lpfc_name)); + + /* point to the HBA attribute block */ + size = 2 * sizeof(struct lpfc_name) + + FOURBYTES; + } else { + size = sizeof(struct lpfc_name); + } + ab = (struct lpfc_fdmi_attr_block *) + ((uint8_t *)rh + size); ab->EntryCnt = 0; + size += FOURBYTES; - /* Point to the beginning of the first HBA attribute - entry */ + /* + * Point to beginning of first HBA attribute entry + */ /* #1 HBA attribute entry */ - size += FOURBYTES; - ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); - ae->ad.bits.AttrType = be16_to_cpu(NODE_NAME); - ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES - + sizeof (struct lpfc_name)); + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)rh + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, sizeof(struct lpfc_name)); + ad->AttrType = cpu_to_be16(RHBA_NODENAME); + ad->AttrLen = cpu_to_be16(FOURBYTES + + sizeof(struct lpfc_name)); memcpy(&ae->un.NodeName, &vport->fc_sparam.nodeName, - sizeof (struct lpfc_name)); + sizeof(struct lpfc_name)); ab->EntryCnt++; - size += FOURBYTES + sizeof (struct lpfc_name); + size += FOURBYTES + sizeof(struct lpfc_name); + if ((size + LPFC_FDMI_MAX_AE_SIZE) > + (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto hba_out; /* #2 HBA attribute entry */ - ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); - ae->ad.bits.AttrType = be16_to_cpu(MANUFACTURER); - strncpy(ae->un.Manufacturer, "Emulex Corporation", 64); - len = strlen(ae->un.Manufacturer); + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)rh + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, sizeof(ae->un.Manufacturer)); + ad->AttrType = cpu_to_be16(RHBA_MANUFACTURER); + strncpy(ae->un.Manufacturer, "Emulex Corporation", + sizeof(ae->un.Manufacturer)); + len = strnlen(ae->un.Manufacturer, + sizeof(ae->un.Manufacturer)); len += (len & 3) ? (4 - (len & 3)) : 4; - ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len); + ad->AttrLen = cpu_to_be16(FOURBYTES + len); ab->EntryCnt++; size += FOURBYTES + len; + if ((size + LPFC_FDMI_MAX_AE_SIZE) > + (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto hba_out; /* #3 HBA attribute entry */ - ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); - ae->ad.bits.AttrType = be16_to_cpu(SERIAL_NUMBER); - strncpy(ae->un.SerialNumber, phba->SerialNumber, 64); - len = strlen(ae->un.SerialNumber); + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)rh + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, sizeof(ae->un.SerialNumber)); + ad->AttrType = cpu_to_be16(RHBA_SERIAL_NUMBER); + strncpy(ae->un.SerialNumber, phba->SerialNumber, + sizeof(ae->un.SerialNumber)); + len = strnlen(ae->un.SerialNumber, + sizeof(ae->un.SerialNumber)); len += (len & 3) ? (4 - (len & 3)) : 4; - ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len); + ad->AttrLen = cpu_to_be16(FOURBYTES + len); ab->EntryCnt++; size += FOURBYTES + len; + if ((size + LPFC_FDMI_MAX_AE_SIZE) > + (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto hba_out; /* #4 HBA attribute entry */ - ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); - ae->ad.bits.AttrType = be16_to_cpu(MODEL); - strncpy(ae->un.Model, phba->ModelName, 256); - len = strlen(ae->un.Model); + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)rh + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, sizeof(ae->un.Model)); + ad->AttrType = cpu_to_be16(RHBA_MODEL); + strncpy(ae->un.Model, phba->ModelName, + sizeof(ae->un.Model)); + len = strnlen(ae->un.Model, sizeof(ae->un.Model)); len += (len & 3) ? (4 - (len & 3)) : 4; - ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len); + ad->AttrLen = cpu_to_be16(FOURBYTES + len); ab->EntryCnt++; size += FOURBYTES + len; + if ((size + LPFC_FDMI_MAX_AE_SIZE) > + (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto hba_out; /* #5 HBA attribute entry */ - ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); - ae->ad.bits.AttrType = be16_to_cpu(MODEL_DESCRIPTION); - strncpy(ae->un.ModelDescription, phba->ModelDesc, 256); - len = strlen(ae->un.ModelDescription); + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)rh + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, sizeof(ae->un.ModelDescription)); + ad->AttrType = cpu_to_be16(RHBA_MODEL_DESCRIPTION); + strncpy(ae->un.ModelDescription, phba->ModelDesc, + sizeof(ae->un.ModelDescription)); + len = strnlen(ae->un.ModelDescription, + sizeof(ae->un.ModelDescription)); len += (len & 3) ? (4 - (len & 3)) : 4; - ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len); + ad->AttrLen = cpu_to_be16(FOURBYTES + len); ab->EntryCnt++; size += FOURBYTES + len; + if ((size + 8) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto hba_out; /* #6 HBA attribute entry */ - ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); - ae->ad.bits.AttrType = be16_to_cpu(HARDWARE_VERSION); - ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 8); + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)rh + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, 8); + ad->AttrType = cpu_to_be16(RHBA_HARDWARE_VERSION); + ad->AttrLen = cpu_to_be16(FOURBYTES + 8); /* Convert JEDEC ID to ascii for hardware version */ incr = vp->rev.biuRev; for (i = 0; i < 8; i++) { j = (incr & 0xf); if (j <= 9) ae->un.HardwareVersion[7 - i] = - (char)((uint8_t) 0x30 + - (uint8_t) j); + (char)((uint8_t)0x30 + + (uint8_t)j); else ae->un.HardwareVersion[7 - i] = - (char)((uint8_t) 0x61 + - (uint8_t) (j - 10)); + (char)((uint8_t)0x61 + + (uint8_t)(j - 10)); incr = (incr >> 4); } ab->EntryCnt++; size += FOURBYTES + 8; + if ((size + LPFC_FDMI_MAX_AE_SIZE) > + (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto hba_out; /* #7 HBA attribute entry */ - ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); - ae->ad.bits.AttrType = be16_to_cpu(DRIVER_VERSION); - strncpy(ae->un.DriverVersion, - lpfc_release_version, 256); - len = strlen(ae->un.DriverVersion); + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)rh + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, sizeof(ae->un.DriverVersion)); + ad->AttrType = cpu_to_be16(RHBA_DRIVER_VERSION); + strncpy(ae->un.DriverVersion, lpfc_release_version, + sizeof(ae->un.DriverVersion)); + len = strnlen(ae->un.DriverVersion, + sizeof(ae->un.DriverVersion)); len += (len & 3) ? (4 - (len & 3)) : 4; - ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len); + ad->AttrLen = cpu_to_be16(FOURBYTES + len); ab->EntryCnt++; size += FOURBYTES + len; + if ((size + LPFC_FDMI_MAX_AE_SIZE) > + (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto hba_out; /* #8 HBA attribute entry */ - ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); - ae->ad.bits.AttrType = be16_to_cpu(OPTION_ROM_VERSION); - strncpy(ae->un.OptionROMVersion, - phba->OptionROMVersion, 256); - len = strlen(ae->un.OptionROMVersion); + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)rh + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, sizeof(ae->un.OptionROMVersion)); + ad->AttrType = cpu_to_be16(RHBA_OPTION_ROM_VERSION); + strncpy(ae->un.OptionROMVersion, phba->OptionROMVersion, + sizeof(ae->un.OptionROMVersion)); + len = strnlen(ae->un.OptionROMVersion, + sizeof(ae->un.OptionROMVersion)); len += (len & 3) ? (4 - (len & 3)) : 4; - ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len); + ad->AttrLen = cpu_to_be16(FOURBYTES + len); ab->EntryCnt++; size += FOURBYTES + len; + if ((size + LPFC_FDMI_MAX_AE_SIZE) > + (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto hba_out; /* #9 HBA attribute entry */ - ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); - ae->ad.bits.AttrType = be16_to_cpu(FIRMWARE_VERSION); + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)rh + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, sizeof(ae->un.FirmwareVersion)); + ad->AttrType = cpu_to_be16(RHBA_FIRMWARE_VERSION); lpfc_decode_firmware_rev(phba, ae->un.FirmwareVersion, 1); - len = strlen(ae->un.FirmwareVersion); + len = strnlen(ae->un.FirmwareVersion, + sizeof(ae->un.FirmwareVersion)); len += (len & 3) ? (4 - (len & 3)) : 4; - ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len); + ad->AttrLen = cpu_to_be16(FOURBYTES + len); ab->EntryCnt++; size += FOURBYTES + len; + if ((size + LPFC_FDMI_MAX_AE_SIZE) > + (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto hba_out; /* #10 HBA attribute entry */ - ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); - ae->ad.bits.AttrType = be16_to_cpu(OS_NAME_VERSION); - sprintf(ae->un.OsNameVersion, "%s %s %s", - init_utsname()->sysname, - init_utsname()->release, - init_utsname()->version); - len = strlen(ae->un.OsNameVersion); + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)rh + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, sizeof(ae->un.OsNameVersion)); + ad->AttrType = cpu_to_be16(RHBA_OS_NAME_VERSION); + snprintf(ae->un.OsNameVersion, + sizeof(ae->un.OsNameVersion), + "%s %s %s", + init_utsname()->sysname, + init_utsname()->release, + init_utsname()->version); + len = strnlen(ae->un.OsNameVersion, + sizeof(ae->un.OsNameVersion)); len += (len & 3) ? (4 - (len & 3)) : 4; - ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len); + ad->AttrLen = cpu_to_be16(FOURBYTES + len); ab->EntryCnt++; size += FOURBYTES + len; + if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto hba_out; /* #11 HBA attribute entry */ - ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); - ae->ad.bits.AttrType = be16_to_cpu(MAX_CT_PAYLOAD_LEN); - ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4); - ae->un.MaxCTPayloadLen = (65 * 4096); + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)rh + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ad->AttrType = + cpu_to_be16(RHBA_MAX_CT_PAYLOAD_LEN); + ad->AttrLen = cpu_to_be16(FOURBYTES + 4); + ae->un.MaxCTPayloadLen = cpu_to_be32(LPFC_MAX_CT_SIZE); ab->EntryCnt++; size += FOURBYTES + 4; + if ((size + LPFC_FDMI_MAX_AE_SIZE) > + (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto hba_out; - ab->EntryCnt = be32_to_cpu(ab->EntryCnt); + /* + * Currently switches don't seem to support the + * following extended HBA attributes. + */ + if (!(vport->cfg_fdmi_on & LPFC_FDMI_ALL_ATTRIB)) + goto hba_out; + + /* #12 HBA attribute entry */ + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)rh + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, sizeof(ae->un.NodeSymName)); + ad->AttrType = cpu_to_be16(RHBA_SYM_NODENAME); + len = lpfc_vport_symbolic_node_name(vport, + ae->un.NodeSymName, sizeof(ae->un.NodeSymName)); + len += (len & 3) ? (4 - (len & 3)) : 4; + ad->AttrLen = cpu_to_be16(FOURBYTES + len); + ab->EntryCnt++; + size += FOURBYTES + len; +hba_out: + ab->EntryCnt = cpu_to_be32(ab->EntryCnt); /* Total size */ size = GID_REQUEST_SZ - 4 + size; } break; + case SLI_MGMT_RPRT: case SLI_MGMT_RPA: { lpfc_vpd_t *vp; struct serv_parm *hsp; - int len; + int len = 0; vp = &phba->vpd; - CtReq->CommandResponse.bits.CmdRsp = - be16_to_cpu(SLI_MGMT_RPA); - CtReq->CommandResponse.bits.Size = 0; - pab = (REG_PORT_ATTRIBUTE *) & CtReq->un.PortID; - size = sizeof (struct lpfc_name) + FOURBYTES; - memcpy((uint8_t *) & pab->PortName, - (uint8_t *) & vport->fc_sparam.portName, - sizeof (struct lpfc_name)); + if (cmdcode == SLI_MGMT_RPRT) { + rh = (struct lpfc_fdmi_reg_hba *) + &CtReq->un.PortID; + /* HBA Identifier */ + memcpy(&rh->hi.PortName, + &vport->fc_sparam.portName, + sizeof(struct lpfc_name)); + pab = (struct lpfc_fdmi_reg_portattr *) + &rh->rpl.EntryCnt; + } else + pab = (struct lpfc_fdmi_reg_portattr *) + &CtReq->un.PortID; + size = sizeof(struct lpfc_name) + FOURBYTES; + memcpy((uint8_t *)&pab->PortName, + (uint8_t *)&vport->fc_sparam.portName, + sizeof(struct lpfc_name)); pab->ab.EntryCnt = 0; /* #1 Port attribute entry */ - ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size); - ae->ad.bits.AttrType = be16_to_cpu(SUPPORTED_FC4_TYPES); - ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 32); - ae->un.SupportFC4Types[2] = 1; - ae->un.SupportFC4Types[7] = 1; + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)pab + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, sizeof(ae->un.FC4Types)); + ad->AttrType = + cpu_to_be16(RPRT_SUPPORTED_FC4_TYPES); + ad->AttrLen = cpu_to_be16(FOURBYTES + 32); + ae->un.FC4Types[0] = 0x40; /* Type 1 - ELS */ + ae->un.FC4Types[1] = 0x80; /* Type 8 - FCP */ + ae->un.FC4Types[4] = 0x80; /* Type 32 - CT */ pab->ab.EntryCnt++; size += FOURBYTES + 32; /* #2 Port attribute entry */ - ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size); - ae->ad.bits.AttrType = be16_to_cpu(SUPPORTED_SPEED); - ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4); - + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)pab + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_SPEED); + ad->AttrLen = cpu_to_be16(FOURBYTES + 4); ae->un.SupportSpeed = 0; if (phba->lmt & LMT_16Gb) ae->un.SupportSpeed |= HBA_PORTSPEED_16GBIT; @@ -1602,15 +1790,19 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode) ae->un.SupportSpeed |= HBA_PORTSPEED_2GBIT; if (phba->lmt & LMT_1Gb) ae->un.SupportSpeed |= HBA_PORTSPEED_1GBIT; + ae->un.SupportSpeed = + cpu_to_be32(ae->un.SupportSpeed); pab->ab.EntryCnt++; size += FOURBYTES + 4; /* #3 Port attribute entry */ - ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size); - ae->ad.bits.AttrType = be16_to_cpu(PORT_SPEED); - ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4); - switch(phba->fc_linkspeed) { + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)pab + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ad->AttrType = cpu_to_be16(RPRT_PORT_SPEED); + ad->AttrLen = cpu_to_be16(FOURBYTES + 4); + switch (phba->fc_linkspeed) { case LPFC_LINK_SPEED_1GHZ: ae->un.PortSpeed = HBA_PORTSPEED_1GBIT; break; @@ -1633,93 +1825,273 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode) ae->un.PortSpeed = HBA_PORTSPEED_UNKNOWN; break; } + ae->un.PortSpeed = cpu_to_be32(ae->un.PortSpeed); pab->ab.EntryCnt++; size += FOURBYTES + 4; /* #4 Port attribute entry */ - ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size); - ae->ad.bits.AttrType = be16_to_cpu(MAX_FRAME_SIZE); - ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4); - hsp = (struct serv_parm *) & vport->fc_sparam; + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)pab + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ad->AttrType = cpu_to_be16(RPRT_MAX_FRAME_SIZE); + ad->AttrLen = cpu_to_be16(FOURBYTES + 4); + hsp = (struct serv_parm *)&vport->fc_sparam; ae->un.MaxFrameSize = - (((uint32_t) hsp->cmn. - bbRcvSizeMsb) << 8) | (uint32_t) hsp->cmn. + (((uint32_t)hsp->cmn. + bbRcvSizeMsb) << 8) | (uint32_t)hsp->cmn. bbRcvSizeLsb; + ae->un.MaxFrameSize = + cpu_to_be32(ae->un.MaxFrameSize); pab->ab.EntryCnt++; size += FOURBYTES + 4; + if ((size + LPFC_FDMI_MAX_AE_SIZE) > + (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto port_out; /* #5 Port attribute entry */ - ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size); - ae->ad.bits.AttrType = be16_to_cpu(OS_DEVICE_NAME); - strcpy((char *)ae->un.OsDeviceName, LPFC_DRIVER_NAME); - len = strlen((char *)ae->un.OsDeviceName); + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)pab + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, sizeof(ae->un.OsDeviceName)); + ad->AttrType = cpu_to_be16(RPRT_OS_DEVICE_NAME); + strncpy((char *)ae->un.OsDeviceName, LPFC_DRIVER_NAME, + sizeof(ae->un.OsDeviceName)); + len = strnlen((char *)ae->un.OsDeviceName, + sizeof(ae->un.OsDeviceName)); len += (len & 3) ? (4 - (len & 3)) : 4; - ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len); + ad->AttrLen = cpu_to_be16(FOURBYTES + len); pab->ab.EntryCnt++; size += FOURBYTES + len; + if ((size + LPFC_FDMI_MAX_AE_SIZE) > + (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto port_out; + + /* #6 Port attribute entry */ + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)pab + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, sizeof(ae->un.HostName)); + snprintf(ae->un.HostName, sizeof(ae->un.HostName), "%s", + init_utsname()->nodename); + ad->AttrType = cpu_to_be16(RPRT_HOST_NAME); + len = strnlen(ae->un.HostName, + sizeof(ae->un.HostName)); + len += (len & 3) ? (4 - (len & 3)) : 4; + ad->AttrLen = + cpu_to_be16(FOURBYTES + len); + pab->ab.EntryCnt++; + size += FOURBYTES + len; + if ((size + sizeof(struct lpfc_name)) > + (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto port_out; - if (vport->cfg_fdmi_on == 2) { - /* #6 Port attribute entry */ - ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + - size); - ae->ad.bits.AttrType = be16_to_cpu(HOST_NAME); - sprintf(ae->un.HostName, "%s", - init_utsname()->nodename); - len = strlen(ae->un.HostName); - len += (len & 3) ? (4 - (len & 3)) : 4; - ae->ad.bits.AttrLen = - be16_to_cpu(FOURBYTES + len); - pab->ab.EntryCnt++; - size += FOURBYTES + len; - } - - pab->ab.EntryCnt = be32_to_cpu(pab->ab.EntryCnt); + /* + * Currently switches don't seem to support the + * following extended Port attributes. + */ + if (!(vport->cfg_fdmi_on & LPFC_FDMI_ALL_ATTRIB)) + goto port_out; + + /* #7 Port attribute entry */ + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)pab + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, sizeof(struct lpfc_name)); + ad->AttrType = cpu_to_be16(RPRT_NODENAME); + ad->AttrLen = cpu_to_be16(FOURBYTES + + sizeof(struct lpfc_name)); + memcpy(&ae->un.NodeName, &vport->fc_sparam.nodeName, + sizeof(struct lpfc_name)); + pab->ab.EntryCnt++; + size += FOURBYTES + sizeof(struct lpfc_name); + if ((size + sizeof(struct lpfc_name)) > + (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto port_out; + + /* #8 Port attribute entry */ + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)pab + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, sizeof(struct lpfc_name)); + ad->AttrType = cpu_to_be16(RPRT_PORTNAME); + ad->AttrLen = cpu_to_be16(FOURBYTES + + sizeof(struct lpfc_name)); + memcpy(&ae->un.PortName, &vport->fc_sparam.portName, + sizeof(struct lpfc_name)); + pab->ab.EntryCnt++; + size += FOURBYTES + sizeof(struct lpfc_name); + if ((size + LPFC_FDMI_MAX_AE_SIZE) > + (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto port_out; + + /* #9 Port attribute entry */ + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)pab + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, sizeof(ae->un.NodeSymName)); + ad->AttrType = cpu_to_be16(RPRT_SYM_PORTNAME); + len = lpfc_vport_symbolic_port_name(vport, + ae->un.NodeSymName, sizeof(ae->un.NodeSymName)); + len += (len & 3) ? (4 - (len & 3)) : 4; + ad->AttrLen = cpu_to_be16(FOURBYTES + len); + pab->ab.EntryCnt++; + size += FOURBYTES + len; + if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto port_out; + + /* #10 Port attribute entry */ + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)pab + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ad->AttrType = cpu_to_be16(RPRT_PORT_TYPE); + ae->un.PortState = 0; + ad->AttrLen = cpu_to_be16(FOURBYTES + 4); + pab->ab.EntryCnt++; + size += FOURBYTES + 4; + if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto port_out; + + /* #11 Port attribute entry */ + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)pab + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_CLASS); + ae->un.SupportClass = + cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3); + ad->AttrLen = cpu_to_be16(FOURBYTES + 4); + pab->ab.EntryCnt++; + size += FOURBYTES + 4; + if ((size + sizeof(struct lpfc_name)) > + (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto port_out; + + /* #12 Port attribute entry */ + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)pab + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, sizeof(struct lpfc_name)); + ad->AttrType = cpu_to_be16(RPRT_FABRICNAME); + ad->AttrLen = cpu_to_be16(FOURBYTES + + sizeof(struct lpfc_name)); + memcpy(&ae->un.FabricName, &vport->fabric_nodename, + sizeof(struct lpfc_name)); + pab->ab.EntryCnt++; + size += FOURBYTES + sizeof(struct lpfc_name); + if ((size + LPFC_FDMI_MAX_AE_SIZE) > + (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto port_out; + + /* #13 Port attribute entry */ + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)pab + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, sizeof(ae->un.FC4Types)); + ad->AttrType = + cpu_to_be16(RPRT_ACTIVE_FC4_TYPES); + ad->AttrLen = cpu_to_be16(FOURBYTES + 32); + ae->un.FC4Types[0] = 0x40; /* Type 1 - ELS */ + ae->un.FC4Types[1] = 0x80; /* Type 8 - FCP */ + ae->un.FC4Types[4] = 0x80; /* Type 32 - CT */ + pab->ab.EntryCnt++; + size += FOURBYTES + 32; + if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto port_out; + + /* #257 Port attribute entry */ + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)pab + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ad->AttrType = cpu_to_be16(RPRT_PORT_STATE); + ae->un.PortState = 0; + ad->AttrLen = cpu_to_be16(FOURBYTES + 4); + pab->ab.EntryCnt++; + size += FOURBYTES + 4; + if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto port_out; + + /* #258 Port attribute entry */ + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)pab + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ad->AttrType = cpu_to_be16(RPRT_DISC_PORT); + ae->un.PortState = lpfc_find_map_node(vport); + ae->un.PortState = cpu_to_be32(ae->un.PortState); + ad->AttrLen = cpu_to_be16(FOURBYTES + 4); + pab->ab.EntryCnt++; + size += FOURBYTES + 4; + if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto port_out; + + /* #259 Port attribute entry */ + ad = (struct lpfc_fdmi_attr_def *) + ((uint8_t *)pab + size); + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ad->AttrType = cpu_to_be16(RPRT_PORT_ID); + ae->un.PortId = cpu_to_be32(vport->fc_myDID); + ad->AttrLen = cpu_to_be16(FOURBYTES + 4); + pab->ab.EntryCnt++; + size += FOURBYTES + 4; +port_out: + pab->ab.EntryCnt = cpu_to_be32(pab->ab.EntryCnt); /* Total size */ size = GID_REQUEST_SZ - 4 + size; } break; + case SLI_MGMT_GHAT: + case SLI_MGMT_GRPL: + rsp_size = FC_MAX_NS_RSP; case SLI_MGMT_DHBA: - CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_MGMT_DHBA); - CtReq->CommandResponse.bits.Size = 0; - pe = (PORT_ENTRY *) & CtReq->un.PortID; - memcpy((uint8_t *) & pe->PortName, - (uint8_t *) & vport->fc_sparam.portName, - sizeof (struct lpfc_name)); - size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name); + case SLI_MGMT_DHAT: + pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID; + memcpy((uint8_t *)&pe->PortName, + (uint8_t *)&vport->fc_sparam.portName, + sizeof(struct lpfc_name)); + size = GID_REQUEST_SZ - 4 + sizeof(struct lpfc_name); break; + case SLI_MGMT_GPAT: + case SLI_MGMT_GPAS: + rsp_size = FC_MAX_NS_RSP; case SLI_MGMT_DPRT: - CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_MGMT_DPRT); - CtReq->CommandResponse.bits.Size = 0; - pe = (PORT_ENTRY *) & CtReq->un.PortID; - memcpy((uint8_t *) & pe->PortName, - (uint8_t *) & vport->fc_sparam.portName, - sizeof (struct lpfc_name)); - size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name); + case SLI_MGMT_DPA: + pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID; + memcpy((uint8_t *)&pe->PortName, + (uint8_t *)&vport->fc_sparam.portName, + sizeof(struct lpfc_name)); + size = GID_REQUEST_SZ - 4 + sizeof(struct lpfc_name); + break; + case SLI_MGMT_GRHL: + size = GID_REQUEST_SZ - 4; break; + default: + lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, + "0298 FDMI cmdcode x%x not supported\n", + cmdcode); + goto fdmi_cmd_free_bmpvirt; } + CtReq->CommandResponse.bits.Size = cpu_to_be16(rsp_size); - bpl = (struct ulp_bde64 *) bmp->virt; - bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) ); - bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) ); + bpl = (struct ulp_bde64 *)bmp->virt; + bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys)); + bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys)); bpl->tus.f.bdeFlags = 0; bpl->tus.f.bdeSize = size; - bpl->tus.w = le32_to_cpu(bpl->tus.w); - - cmpl = lpfc_cmpl_ct_cmd_fdmi; - /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count + /* + * The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count * to hold ndlp reference for the corresponding callback function. */ - if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP, 0)) + if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, 0)) return 0; - /* Decrement ndlp reference count to release ndlp reference held + /* + * Decrement ndlp reference count to release ndlp reference held * for the failed command's callback function. */ lpfc_nlp_put(ndlp); +fdmi_cmd_free_bmpvirt: lpfc_mbuf_free(phba, bmp->virt, bmp->phys); fdmi_cmd_free_bmp: kfree(bmp); diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 5633e7dadc08..513edcb0c2da 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2007-2014 Emulex. All rights reserved. * + * Copyright (C) 2007-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index c66088d0fd2a..851e8efe364e 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2014 Emulex. All rights reserved. * + * Copyright (C) 2004-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -2243,8 +2243,7 @@ lpfc_adisc_done(struct lpfc_vport *vport) */ if (vport->port_state < LPFC_VPORT_READY) { /* If we get here, there is nothing to ADISC */ - if (vport->port_type == LPFC_PHYSICAL_PORT) - lpfc_issue_clear_la(phba, vport); + lpfc_issue_clear_la(phba, vport); if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { vport->num_disc_nodes = 0; /* go thru NPR list, issue ELS PLOGIs */ @@ -3338,7 +3337,11 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* FLOGI retry policy */ retry = 1; /* retry FLOGI forever */ - maxretry = 0; + if (phba->link_flag != LS_LOOPBACK_MODE) + maxretry = 0; + else + maxretry = 2; + if (cmdiocb->retry >= 100) delay = 5000; else if (cmdiocb->retry >= 32) @@ -3701,6 +3704,11 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); if (ndlp) { + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, + "0006 rpi%x DID:%x flg:%x %d map:%x %p\n", + ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, + atomic_read(&ndlp->kref.refcount), + ndlp->nlp_usg_map, ndlp); if (NLP_CHK_NODE_ACT(ndlp)) { lpfc_nlp_put(ndlp); /* This is the end of the default RPI cleanup logic for @@ -5198,7 +5206,6 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, port_state = vport->port_state; vport->fc_flag |= FC_PT2PT; vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); - vport->port_state = LPFC_FLOGI; spin_unlock_irq(shost->host_lock); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3311 Rcv Flogi PS x%x new PS x%x " @@ -7173,7 +7180,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) return; } - if (vport->cfg_fdmi_on) { + if (vport->cfg_fdmi_on & LPFC_FDMI_SUPPORT) { /* If this is the first time, allocate an ndlp and initialize * it. Otherwise, make sure the node is enabled and then do the * login. diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 5452f1f4220e..2500f15d437f 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2014 Emulex. All rights reserved. * + * Copyright (C) 2004-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -3439,6 +3439,11 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) pmb->context1 = NULL; pmb->context2 = NULL; + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + "0002 rpi:%x DID:%x flg:%x %d map:%x %p\n", + ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, + atomic_read(&ndlp->kref.refcount), + ndlp->nlp_usg_map, ndlp); if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; @@ -3855,6 +3860,11 @@ out: ndlp->nlp_flag |= NLP_RPI_REGISTERED; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + "0003 rpi:%x DID:%x flg:%x %d map%x %p\n", + ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, + atomic_read(&ndlp->kref.refcount), + ndlp->nlp_usg_map, ndlp); if (vport->port_state < LPFC_VPORT_READY) { /* Link up discovery requires Fabric registration. */ @@ -4250,8 +4260,15 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap; spin_unlock_irqrestore(&phba->ndlp_lock, flags); - if (vport->phba->sli_rev == LPFC_SLI_REV4) + if (vport->phba->sli_rev == LPFC_SLI_REV4) { ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "0008 rpi:%x DID:%x flg:%x refcnt:%d " + "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID, + ndlp->nlp_flag, + atomic_read(&ndlp->kref.refcount), + ndlp->nlp_usg_map, ndlp); + } if (state != NLP_STE_UNUSED_NODE) @@ -4276,9 +4293,12 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) return; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); - if (vport->phba->sli_rev == LPFC_SLI_REV4) + if (vport->phba->sli_rev == LPFC_SLI_REV4) { lpfc_cleanup_vports_rrqs(vport, ndlp); - lpfc_nlp_put(ndlp); + lpfc_unreg_rpi(vport, ndlp); + } else { + lpfc_nlp_put(ndlp); + } return; } @@ -4515,7 +4535,17 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) mbox->context1 = ndlp; mbox->mbox_cmpl = lpfc_nlp_logo_unreg; } else { - mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + if (phba->sli_rev == LPFC_SLI_REV4 && + (!(vport->load_flag & FC_UNLOADING)) && + (bf_get(lpfc_sli_intf_if_type, + &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_IF_TYPE_2)) { + mbox->context1 = lpfc_nlp_get(ndlp); + mbox->mbox_cmpl = + lpfc_sli4_unreg_rpi_cmpl_clr; + } else + mbox->mbox_cmpl = + lpfc_sli_def_mbox_cmpl; } rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); @@ -4741,6 +4771,11 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) /* For this case we need to cleanup the default rpi * allocated by the firmware. */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "0005 rpi:%x DID:%x flg:%x %d map:%x %p\n", + ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, + atomic_read(&ndlp->kref.refcount), + ndlp->nlp_usg_map, ndlp); if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) { rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID, @@ -5070,8 +5105,7 @@ lpfc_disc_start(struct lpfc_vport *vport) !(vport->fc_flag & FC_PT2PT) && !(vport->fc_flag & FC_RSCN_MODE) && (phba->sli_rev < LPFC_SLI_REV4)) { - if (vport->port_type == LPFC_PHYSICAL_PORT) - lpfc_issue_clear_la(phba, vport); + lpfc_issue_clear_la(phba, vport); lpfc_issue_reg_vpi(phba, vport); return; } @@ -5082,8 +5116,7 @@ lpfc_disc_start(struct lpfc_vport *vport) */ if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) { /* If we get here, there is nothing to ADISC */ - if (vport->port_type == LPFC_PHYSICAL_PORT) - lpfc_issue_clear_la(phba, vport); + lpfc_issue_clear_la(phba, vport); if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { vport->num_disc_nodes = 0; @@ -5484,18 +5517,22 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) ndlp->nlp_flag |= NLP_RPI_REGISTERED; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); - + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + "0004 rpi:%x DID:%x flg:%x %d map:%x %p\n", + ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, + atomic_read(&ndlp->kref.refcount), + ndlp->nlp_usg_map, ndlp); /* * Start issuing Fabric-Device Management Interface (FDMI) command to * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if * fdmi-on=2 (supporting RPA/hostnmae) */ - if (vport->cfg_fdmi_on == 1) - lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); - else + if (vport->cfg_fdmi_on & LPFC_FDMI_REG_DELAY) mod_timer(&vport->fc_fdmitmo, jiffies + msecs_to_jiffies(1000 * 60)); + else + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); /* decrement the node reference count held for this callback * function. @@ -5650,6 +5687,13 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, INIT_LIST_HEAD(&ndlp->nlp_listp); if (vport->phba->sli_rev == LPFC_SLI_REV4) { ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "0007 rpi:%x DID:%x flg:%x refcnt:%d " + "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID, + ndlp->nlp_flag, + atomic_read(&ndlp->kref.refcount), + ndlp->nlp_usg_map, ndlp); + ndlp->active_rrqs_xri_bitmap = mempool_alloc(vport->phba->active_rrq_pool, GFP_KERNEL); @@ -5684,9 +5728,9 @@ lpfc_nlp_release(struct kref *kref) lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, "0279 lpfc_nlp_release: ndlp:x%p did %x " - "usgmap:x%x refcnt:%d\n", + "usgmap:x%x refcnt:%d rpi:%x\n", (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map, - atomic_read(&ndlp->kref.refcount)); + atomic_read(&ndlp->kref.refcount), ndlp->nlp_rpi); /* remove ndlp from action. */ lpfc_nlp_remove(ndlp->vport, ndlp); diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 236259252379..37beb9dc1311 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2014 Emulex. All rights reserved. * + * Copyright (C) 2004-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -107,6 +107,7 @@ struct lpfc_sli_ct_request { uint8_t ReasonCode; uint8_t Explanation; uint8_t VendorUnique; +#define LPFC_CT_PREAMBLE 20 /* Size of CTReq + 4 up to here */ union { uint32_t PortID; @@ -170,6 +171,8 @@ struct lpfc_sli_ct_request { } un; }; +#define LPFC_MAX_CT_SIZE (60 * 4096) + #define SLI_CT_REVISION 1 #define GID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ sizeof(struct gid)) @@ -1007,78 +1010,45 @@ typedef struct _ELS_PKT { /* Structure is in Big Endian format */ } un; } ELS_PKT; -/* - * FDMI - * HBA MAnagement Operations Command Codes - */ -#define SLI_MGMT_GRHL 0x100 /* Get registered HBA list */ -#define SLI_MGMT_GHAT 0x101 /* Get HBA attributes */ -#define SLI_MGMT_GRPL 0x102 /* Get registered Port list */ -#define SLI_MGMT_GPAT 0x110 /* Get Port attributes */ -#define SLI_MGMT_RHBA 0x200 /* Register HBA */ -#define SLI_MGMT_RHAT 0x201 /* Register HBA attributes */ -#define SLI_MGMT_RPRT 0x210 /* Register Port */ -#define SLI_MGMT_RPA 0x211 /* Register Port attributes */ -#define SLI_MGMT_DHBA 0x300 /* De-register HBA */ -#define SLI_MGMT_DPRT 0x310 /* De-register Port */ +/******** FDMI ********/ -/* - * Management Service Subtypes - */ -#define SLI_CT_FDMI_Subtypes 0x10 +/* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */ +#define SLI_CT_FDMI_Subtypes 0x10 /* Management Service Subtype */ /* - * HBA Management Service Reject Code + * Registered Port List Format */ -#define REJECT_CODE 0x9 /* Unable to perform command request */ +struct lpfc_fdmi_reg_port_list { + uint32_t EntryCnt; + uint32_t pe; /* Variable-length array */ +}; -/* - * HBA Management Service Reject Reason Code - * Please refer to the Reason Codes above - */ -/* - * HBA Attribute Types - */ -#define NODE_NAME 0x1 -#define MANUFACTURER 0x2 -#define SERIAL_NUMBER 0x3 -#define MODEL 0x4 -#define MODEL_DESCRIPTION 0x5 -#define HARDWARE_VERSION 0x6 -#define DRIVER_VERSION 0x7 -#define OPTION_ROM_VERSION 0x8 -#define FIRMWARE_VERSION 0x9 -#define OS_NAME_VERSION 0xa -#define MAX_CT_PAYLOAD_LEN 0xb +/* Definitions for HBA / Port attribute entries */ -/* - * Port Attrubute Types - */ -#define SUPPORTED_FC4_TYPES 0x1 -#define SUPPORTED_SPEED 0x2 -#define PORT_SPEED 0x3 -#define MAX_FRAME_SIZE 0x4 -#define OS_DEVICE_NAME 0x5 -#define HOST_NAME 0x6 - -union AttributesDef { +struct lpfc_fdmi_attr_def { /* Defined in TLV format */ /* Structure is in Big Endian format */ - struct { - uint32_t AttrType:16; - uint32_t AttrLen:16; - } bits; - uint32_t word; + uint32_t AttrType:16; + uint32_t AttrLen:16; + uint32_t AttrValue; /* Marks start of Value (ATTRIBUTE_ENTRY) */ }; -/* - * HBA Attribute Entry (8 - 260 bytes) - */ -typedef struct { - union AttributesDef ad; +/* Attribute Entry */ +struct lpfc_fdmi_attr_entry { union { uint32_t VendorSpecific; + uint32_t SupportClass; + uint32_t SupportSpeed; + uint32_t PortSpeed; + uint32_t MaxFrameSize; + uint32_t MaxCTPayloadLen; + uint32_t PortState; + uint32_t PortId; + struct lpfc_name NodeName; + struct lpfc_name PortName; + struct lpfc_name FabricName; + uint8_t FC4Types[32]; uint8_t Manufacturer[64]; uint8_t SerialNumber[64]; uint8_t Model[256]; @@ -1087,97 +1057,115 @@ typedef struct { uint8_t DriverVersion[256]; uint8_t OptionROMVersion[256]; uint8_t FirmwareVersion[256]; - struct lpfc_name NodeName; - uint8_t SupportFC4Types[32]; - uint32_t SupportSpeed; - uint32_t PortSpeed; - uint32_t MaxFrameSize; + uint8_t OsHostName[256]; + uint8_t NodeSymName[256]; uint8_t OsDeviceName[256]; uint8_t OsNameVersion[256]; - uint32_t MaxCTPayloadLen; uint8_t HostName[256]; } un; -} ATTRIBUTE_ENTRY; +}; + +#define LPFC_FDMI_MAX_AE_SIZE sizeof(struct lpfc_fdmi_attr_entry) /* * HBA Attribute Block */ -typedef struct { - uint32_t EntryCnt; /* Number of HBA attribute entries */ - ATTRIBUTE_ENTRY Entry; /* Variable-length array */ -} ATTRIBUTE_BLOCK; +struct lpfc_fdmi_attr_block { + uint32_t EntryCnt; /* Number of HBA attribute entries */ + struct lpfc_fdmi_attr_entry Entry; /* Variable-length array */ +}; /* * Port Entry */ -typedef struct { +struct lpfc_fdmi_port_entry { struct lpfc_name PortName; -} PORT_ENTRY; +}; /* * HBA Identifier */ -typedef struct { +struct lpfc_fdmi_hba_ident { struct lpfc_name PortName; -} HBA_IDENTIFIER; - -/* - * Registered Port List Format - */ -typedef struct { - uint32_t EntryCnt; - PORT_ENTRY pe; /* Variable-length array */ -} REG_PORT_LIST; +}; /* * Register HBA(RHBA) */ -typedef struct { - HBA_IDENTIFIER hi; - REG_PORT_LIST rpl; /* variable-length array */ -/* ATTRIBUTE_BLOCK ab; */ -} REG_HBA; +struct lpfc_fdmi_reg_hba { + struct lpfc_fdmi_hba_ident hi; + struct lpfc_fdmi_reg_port_list rpl; /* variable-length array */ +/* struct lpfc_fdmi_attr_block ab; */ +}; /* * Register HBA Attributes (RHAT) */ -typedef struct { +struct lpfc_fdmi_reg_hbaattr { struct lpfc_name HBA_PortName; - ATTRIBUTE_BLOCK ab; -} REG_HBA_ATTRIBUTE; + struct lpfc_fdmi_attr_block ab; +}; /* * Register Port Attributes (RPA) */ -typedef struct { +struct lpfc_fdmi_reg_portattr { struct lpfc_name PortName; - ATTRIBUTE_BLOCK ab; -} REG_PORT_ATTRIBUTE; + struct lpfc_fdmi_attr_block ab; +}; /* - * Get Registered HBA List (GRHL) Accept Payload Format + * HBA MAnagement Operations Command Codes */ -typedef struct { - uint32_t HBA__Entry_Cnt; /* Number of Registered HBA Identifiers */ - struct lpfc_name HBA_PortName; /* Variable-length array */ -} GRHL_ACC_PAYLOAD; +#define SLI_MGMT_GRHL 0x100 /* Get registered HBA list */ +#define SLI_MGMT_GHAT 0x101 /* Get HBA attributes */ +#define SLI_MGMT_GRPL 0x102 /* Get registered Port list */ +#define SLI_MGMT_GPAT 0x110 /* Get Port attributes */ +#define SLI_MGMT_GPAS 0x120 /* Get Port Statistics */ +#define SLI_MGMT_RHBA 0x200 /* Register HBA */ +#define SLI_MGMT_RHAT 0x201 /* Register HBA attributes */ +#define SLI_MGMT_RPRT 0x210 /* Register Port */ +#define SLI_MGMT_RPA 0x211 /* Register Port attributes */ +#define SLI_MGMT_DHBA 0x300 /* De-register HBA */ +#define SLI_MGMT_DHAT 0x301 /* De-register HBA attributes */ +#define SLI_MGMT_DPRT 0x310 /* De-register Port */ +#define SLI_MGMT_DPA 0x311 /* De-register Port attributes */ /* - * Get Registered Port List (GRPL) Accept Payload Format + * HBA Attribute Types */ -typedef struct { - uint32_t RPL_Entry_Cnt; /* Number of Registered Port Entries */ - PORT_ENTRY Reg_Port_Entry[1]; /* Variable-length array */ -} GRPL_ACC_PAYLOAD; +#define RHBA_NODENAME 0x1 /* 8 byte WWNN */ +#define RHBA_MANUFACTURER 0x2 /* 4 to 64 byte ASCII string */ +#define RHBA_SERIAL_NUMBER 0x3 /* 4 to 64 byte ASCII string */ +#define RHBA_MODEL 0x4 /* 4 to 256 byte ASCII string */ +#define RHBA_MODEL_DESCRIPTION 0x5 /* 4 to 256 byte ASCII string */ +#define RHBA_HARDWARE_VERSION 0x6 /* 4 to 256 byte ASCII string */ +#define RHBA_DRIVER_VERSION 0x7 /* 4 to 256 byte ASCII string */ +#define RHBA_OPTION_ROM_VERSION 0x8 /* 4 to 256 byte ASCII string */ +#define RHBA_FIRMWARE_VERSION 0x9 /* 4 to 256 byte ASCII string */ +#define RHBA_OS_NAME_VERSION 0xa /* 4 to 256 byte ASCII string */ +#define RHBA_MAX_CT_PAYLOAD_LEN 0xb /* 32-bit unsigned int */ +#define RHBA_SYM_NODENAME 0xc /* 4 to 256 byte ASCII string */ /* - * Get Port Attributes (GPAT) Accept Payload Format + * Port Attrubute Types */ - -typedef struct { - ATTRIBUTE_BLOCK pab; -} GPAT_ACC_PAYLOAD; - +#define RPRT_SUPPORTED_FC4_TYPES 0x1 /* 32 byte binary array */ +#define RPRT_SUPPORTED_SPEED 0x2 /* 32-bit unsigned int */ +#define RPRT_PORT_SPEED 0x3 /* 32-bit unsigned int */ +#define RPRT_MAX_FRAME_SIZE 0x4 /* 32-bit unsigned int */ +#define RPRT_OS_DEVICE_NAME 0x5 /* 4 to 256 byte ASCII string */ +#define RPRT_HOST_NAME 0x6 /* 4 to 256 byte ASCII string */ +#define RPRT_NODENAME 0x7 /* 8 byte WWNN */ +#define RPRT_PORTNAME 0x8 /* 8 byte WWNN */ +#define RPRT_SYM_PORTNAME 0x9 /* 4 to 256 byte ASCII string */ +#define RPRT_PORT_TYPE 0xa /* 32-bit unsigned int */ +#define RPRT_SUPPORTED_CLASS 0xb /* 32-bit unsigned int */ +#define RPRT_FABRICNAME 0xc /* 8 byte Fabric WWNN */ +#define RPRT_ACTIVE_FC4_TYPES 0xd /* 32 byte binary array */ +#define RPRT_PORT_STATE 0x101 /* 32-bit unsigned int */ +#define RPRT_DISC_PORT 0x102 /* 32-bit unsigned int */ +#define RPRT_PORT_ID 0x103 /* 32-bit unsigned int */ /* * Begin HBA configuration parameters. diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index f432ec180cf8..1813c45946f4 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2009-2014 Emulex. All rights reserved. * + * Copyright (C) 2009-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -3085,6 +3085,9 @@ struct lpfc_acqe_link { #define LPFC_ASYNC_LINK_SPEED_100MBPS 0x2 #define LPFC_ASYNC_LINK_SPEED_1GBPS 0x3 #define LPFC_ASYNC_LINK_SPEED_10GBPS 0x4 +#define LPFC_ASYNC_LINK_SPEED_20GBPS 0x5 +#define LPFC_ASYNC_LINK_SPEED_25GBPS 0x6 +#define LPFC_ASYNC_LINK_SPEED_40GBPS 0x7 #define lpfc_acqe_link_duplex_SHIFT 16 #define lpfc_acqe_link_duplex_MASK 0x000000FF #define lpfc_acqe_link_duplex_WORD word0 @@ -3166,7 +3169,7 @@ struct lpfc_acqe_fc_la { #define lpfc_acqe_fc_la_speed_SHIFT 24 #define lpfc_acqe_fc_la_speed_MASK 0x000000FF #define lpfc_acqe_fc_la_speed_WORD word0 -#define LPFC_FC_LA_SPEED_UNKOWN 0x0 +#define LPFC_FC_LA_SPEED_UNKNOWN 0x0 #define LPFC_FC_LA_SPEED_1G 0x1 #define LPFC_FC_LA_SPEED_2G 0x2 #define LPFC_FC_LA_SPEED_4G 0x4 @@ -3244,6 +3247,7 @@ struct lpfc_acqe_sli { #define LPFC_SLI_EVENT_TYPE_NVLOG_POST 0x4 #define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5 #define LPFC_SLI_EVENT_TYPE_MISCONFIGURED 0x9 +#define LPFC_SLI_EVENT_TYPE_REMOTE_DPORT 0xA }; /* diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 0b2c53af85c7..e8c8c1ecc1f5 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2014 Emulex. All rights reserved. * + * Copyright (C) 2004-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -1330,13 +1330,14 @@ lpfc_offline_eratt(struct lpfc_hba *phba) void lpfc_sli4_offline_eratt(struct lpfc_hba *phba) { + spin_lock_irq(&phba->hbalock); + phba->link_state = LPFC_HBA_ERROR; + spin_unlock_irq(&phba->hbalock); + lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); lpfc_offline(phba); - lpfc_sli4_brdreset(phba); lpfc_hba_down_post(phba); - lpfc_sli4_post_status_check(phba); lpfc_unblock_mgmt_io(phba); - phba->link_state = LPFC_HBA_ERROR; } /** @@ -1629,6 +1630,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) uint32_t uerrlo_reg, uemasklo_reg; uint32_t pci_rd_rc1, pci_rd_rc2; bool en_rn_msg = true; + struct temp_event temp_event_data; int rc; /* If the pci channel is offline, ignore possible errors, since @@ -1636,9 +1638,6 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) */ if (pci_channel_offline(phba->pcidev)) return; - /* If resets are disabled then leave the HBA alone and return */ - if (!phba->cfg_enable_hba_reset) - return; if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); switch (if_type) { @@ -1654,6 +1653,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) return; lpfc_sli4_offline_eratt(phba); break; + case LPFC_SLI_INTF_IF_TYPE_2: pci_rd_rc1 = lpfc_readl( phba->sli4_hba.u.if_type2.STATUSregaddr, @@ -1668,15 +1668,27 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { - /* TODO: Register for Overtemp async events. */ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2889 Port Overtemperature event, " - "taking port offline\n"); + "taking port offline Data: x%x x%x\n", + reg_err1, reg_err2); + + temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; + temp_event_data.event_code = LPFC_CRIT_TEMP; + temp_event_data.data = 0xFFFFFFFF; + + shost = lpfc_shost_from_vport(phba->pport); + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(temp_event_data), + (char *)&temp_event_data, + SCSI_NL_VID_TYPE_PCI + | PCI_VENDOR_ID_EMULEX); + spin_lock_irq(&phba->hbalock); phba->over_temp_state = HBA_OVER_TEMP; spin_unlock_irq(&phba->hbalock); lpfc_sli4_offline_eratt(phba); - break; + return; } if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { @@ -1693,6 +1705,10 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3145 Port Down: Provisioning\n"); + /* If resets are disabled then leave the HBA alone and return */ + if (!phba->cfg_enable_hba_reset) + return; + /* Check port status register for function reset */ rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, en_rn_msg); @@ -2759,9 +2775,19 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba) list_for_each_entry_safe(ndlp, next_ndlp, &vports[i]->fc_nodes, nlp_listp) { - if (NLP_CHK_NODE_ACT(ndlp)) + if (NLP_CHK_NODE_ACT(ndlp)) { ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(phba); + lpfc_printf_vlog(ndlp->vport, KERN_INFO, + LOG_NODE, + "0009 rpi:%x DID:%x " + "flg:%x map:%x %p\n", + ndlp->nlp_rpi, + ndlp->nlp_DID, + ndlp->nlp_flag, + ndlp->nlp_usg_map, + ndlp); + } } } } @@ -2925,8 +2951,18 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) * RPI. Get a new RPI when the adapter port * comes back online. */ - if (phba->sli_rev == LPFC_SLI_REV4) + if (phba->sli_rev == LPFC_SLI_REV4) { + lpfc_printf_vlog(ndlp->vport, + KERN_INFO, LOG_NODE, + "0011 lpfc_offline: " + "ndlp:x%p did %x " + "usgmap:x%x rpi:%x\n", + ndlp, ndlp->nlp_DID, + ndlp->nlp_usg_map, + ndlp->nlp_rpi); + lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); + } lpfc_unreg_rpi(vports[i], ndlp); } } @@ -3241,12 +3277,17 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) struct Scsi_Host *shost; int error = 0; - if (dev != &phba->pcidev->dev) + if (dev != &phba->pcidev->dev) { shost = scsi_host_alloc(&lpfc_vport_template, sizeof(struct lpfc_vport)); - else - shost = scsi_host_alloc(&lpfc_template, + } else { + if (phba->sli_rev == LPFC_SLI_REV4) + shost = scsi_host_alloc(&lpfc_template, sizeof(struct lpfc_vport)); + else + shost = scsi_host_alloc(&lpfc_template_s3, + sizeof(struct lpfc_vport)); + } if (!shost) goto out; @@ -3685,6 +3726,11 @@ lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, case LPFC_ASYNC_LINK_SPEED_10GBPS: link_speed = LPFC_LINK_SPEED_10GHZ; break; + case LPFC_ASYNC_LINK_SPEED_20GBPS: + case LPFC_ASYNC_LINK_SPEED_25GBPS: + case LPFC_ASYNC_LINK_SPEED_40GBPS: + link_speed = LPFC_LINK_SPEED_UNKNOWN; + break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0483 Invalid link-attention link speed: x%x\n", @@ -3756,46 +3802,55 @@ lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, switch (evt_code) { case LPFC_TRAILER_CODE_LINK: switch (speed_code) { - case LPFC_EVT_CODE_LINK_NO_LINK: + case LPFC_ASYNC_LINK_SPEED_ZERO: port_speed = 0; break; - case LPFC_EVT_CODE_LINK_10_MBIT: + case LPFC_ASYNC_LINK_SPEED_10MBPS: port_speed = 10; break; - case LPFC_EVT_CODE_LINK_100_MBIT: + case LPFC_ASYNC_LINK_SPEED_100MBPS: port_speed = 100; break; - case LPFC_EVT_CODE_LINK_1_GBIT: + case LPFC_ASYNC_LINK_SPEED_1GBPS: port_speed = 1000; break; - case LPFC_EVT_CODE_LINK_10_GBIT: + case LPFC_ASYNC_LINK_SPEED_10GBPS: port_speed = 10000; break; + case LPFC_ASYNC_LINK_SPEED_20GBPS: + port_speed = 20000; + break; + case LPFC_ASYNC_LINK_SPEED_25GBPS: + port_speed = 25000; + break; + case LPFC_ASYNC_LINK_SPEED_40GBPS: + port_speed = 40000; + break; default: port_speed = 0; } break; case LPFC_TRAILER_CODE_FC: switch (speed_code) { - case LPFC_EVT_CODE_FC_NO_LINK: + case LPFC_FC_LA_SPEED_UNKNOWN: port_speed = 0; break; - case LPFC_EVT_CODE_FC_1_GBAUD: + case LPFC_FC_LA_SPEED_1G: port_speed = 1000; break; - case LPFC_EVT_CODE_FC_2_GBAUD: + case LPFC_FC_LA_SPEED_2G: port_speed = 2000; break; - case LPFC_EVT_CODE_FC_4_GBAUD: + case LPFC_FC_LA_SPEED_4G: port_speed = 4000; break; - case LPFC_EVT_CODE_FC_8_GBAUD: + case LPFC_FC_LA_SPEED_8G: port_speed = 8000; break; - case LPFC_EVT_CODE_FC_10_GBAUD: + case LPFC_FC_LA_SPEED_10G: port_speed = 10000; break; - case LPFC_EVT_CODE_FC_16_GBAUD: + case LPFC_FC_LA_SPEED_16G: port_speed = 16000; break; default: @@ -4044,18 +4099,21 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) char port_name; char message[128]; uint8_t status; + uint8_t evt_type; + struct temp_event temp_event_data; struct lpfc_acqe_misconfigured_event *misconfigured; + struct Scsi_Host *shost; + + evt_type = bf_get(lpfc_trailer_type, acqe_sli); - /* special case misconfigured event as it contains data for all ports */ - if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != - LPFC_SLI_INTF_IF_TYPE_2) || - (bf_get(lpfc_trailer_type, acqe_sli) != - LPFC_SLI_EVENT_TYPE_MISCONFIGURED)) { + /* Special case Lancer */ + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != + LPFC_SLI_INTF_IF_TYPE_2) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2901 Async SLI event - Event Data1:x%08x Event Data2:" "x%08x SLI Event Type:%d\n", acqe_sli->event_data1, acqe_sli->event_data2, - bf_get(lpfc_trailer_type, acqe_sli)); + evt_type); return; } @@ -4063,58 +4121,107 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) if (port_name == 0x00) port_name = '?'; /* get port name is empty */ - misconfigured = (struct lpfc_acqe_misconfigured_event *) + switch (evt_type) { + case LPFC_SLI_EVENT_TYPE_OVER_TEMP: + temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; + temp_event_data.event_code = LPFC_THRESHOLD_TEMP; + temp_event_data.data = (uint32_t)acqe_sli->event_data1; + + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "3190 Over Temperature:%d Celsius- Port Name %c\n", + acqe_sli->event_data1, port_name); + + shost = lpfc_shost_from_vport(phba->pport); + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(temp_event_data), + (char *)&temp_event_data, + SCSI_NL_VID_TYPE_PCI + | PCI_VENDOR_ID_EMULEX); + break; + case LPFC_SLI_EVENT_TYPE_NORM_TEMP: + temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; + temp_event_data.event_code = LPFC_NORMAL_TEMP; + temp_event_data.data = (uint32_t)acqe_sli->event_data1; + + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3191 Normal Temperature:%d Celsius - Port Name %c\n", + acqe_sli->event_data1, port_name); + + shost = lpfc_shost_from_vport(phba->pport); + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(temp_event_data), + (char *)&temp_event_data, + SCSI_NL_VID_TYPE_PCI + | PCI_VENDOR_ID_EMULEX); + break; + case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: + misconfigured = (struct lpfc_acqe_misconfigured_event *) &acqe_sli->event_data1; - /* fetch the status for this port */ - switch (phba->sli4_hba.lnk_info.lnk_no) { - case LPFC_LINK_NUMBER_0: - status = bf_get(lpfc_sli_misconfigured_port0, + /* fetch the status for this port */ + switch (phba->sli4_hba.lnk_info.lnk_no) { + case LPFC_LINK_NUMBER_0: + status = bf_get(lpfc_sli_misconfigured_port0, &misconfigured->theEvent); - break; - case LPFC_LINK_NUMBER_1: - status = bf_get(lpfc_sli_misconfigured_port1, + break; + case LPFC_LINK_NUMBER_1: + status = bf_get(lpfc_sli_misconfigured_port1, &misconfigured->theEvent); - break; - case LPFC_LINK_NUMBER_2: - status = bf_get(lpfc_sli_misconfigured_port2, + break; + case LPFC_LINK_NUMBER_2: + status = bf_get(lpfc_sli_misconfigured_port2, &misconfigured->theEvent); - break; - case LPFC_LINK_NUMBER_3: - status = bf_get(lpfc_sli_misconfigured_port3, + break; + case LPFC_LINK_NUMBER_3: + status = bf_get(lpfc_sli_misconfigured_port3, &misconfigured->theEvent); - break; - default: - status = ~LPFC_SLI_EVENT_STATUS_VALID; - break; - } + break; + default: + status = ~LPFC_SLI_EVENT_STATUS_VALID; + break; + } - switch (status) { - case LPFC_SLI_EVENT_STATUS_VALID: - return; /* no message if the sfp is okay */ - case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: - sprintf(message, "Optics faulted/incorrectly installed/not " \ - "installed - Reseat optics, if issue not " - "resolved, replace."); - break; - case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: - sprintf(message, - "Optics of two types installed - Remove one optic or " \ - "install matching pair of optics."); - break; - case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: - sprintf(message, "Incompatible optics - Replace with " \ + switch (status) { + case LPFC_SLI_EVENT_STATUS_VALID: + return; /* no message if the sfp is okay */ + case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: + sprintf(message, "Optics faulted/incorrectly " + "installed/not installed - Reseat optics, " + "if issue not resolved, replace."); + break; + case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: + sprintf(message, + "Optics of two types installed - Remove one " + "optic or install matching pair of optics."); + break; + case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: + sprintf(message, "Incompatible optics - Replace with " "compatible optics for card to function."); + break; + default: + /* firmware is reporting a status we don't know about */ + sprintf(message, "Unknown event status x%02x", status); + break; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3176 Misconfigured Physical Port - " + "Port Name %c %s\n", port_name, message); + break; + case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3192 Remote DPort Test Initiated - " + "Event Data1:x%08x Event Data2: x%08x\n", + acqe_sli->event_data1, acqe_sli->event_data2); break; default: - /* firmware is reporting a status we don't know about */ - sprintf(message, "Unknown event status x%02x", status); + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3193 Async SLI event - Event Data1:x%08x Event Data2:" + "x%08x SLI Event Type:%d\n", + acqe_sli->event_data1, acqe_sli->event_data2, + evt_type); break; } - - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "3176 Misconfigured Physical Port - " - "Port Name %c %s\n", port_name, message); } /** @@ -5183,6 +5290,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) rc = lpfc_pci_function_reset(phba); if (unlikely(rc)) return -ENODEV; + phba->temp_sensor_support = 1; } /* Create the bootstrap mailbox command */ @@ -7647,6 +7755,14 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) goto out_destroy_els_rq; } } + + /* + * Configure EQ delay multipier for interrupt coalescing using + * MODIFY_EQ_DELAY for all EQs created, LPFC_MAX_EQ_DELAY at a time. + */ + for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; + fcp_eqidx += LPFC_MAX_EQ_DELAY) + lpfc_modify_fcp_eq_delay(phba, fcp_eqidx); return 0; out_destroy_els_rq: @@ -7953,7 +8069,7 @@ wait: * up to 30 seconds. If the port doesn't respond, treat * it as an error. */ - for (rdy_chk = 0; rdy_chk < 3000; rdy_chk++) { + for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { if (lpfc_readl(phba->sli4_hba.u.if_type2. STATUSregaddr, ®_data.word0)) { rc = -ENODEV; diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 06241f590c1e..816f596cda60 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2013 Emulex. All rights reserved. * + * Copyright (C) 2004-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 5cc1103d811e..4cb9882af157 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2013 Emulex. All rights reserved. * + * Copyright (C) 2004-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -276,6 +276,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *pcmd; + uint64_t nlp_portwwn = 0; uint32_t *lp; IOCB_t *icmd; struct serv_parm *sp; @@ -332,6 +333,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, NULL); return 0; } + + nlp_portwwn = wwn_to_u64(ndlp->nlp_portname.u.wwn); if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) { /* Reject this request because invalid parameters */ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; @@ -367,7 +370,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; - /* no need to reg_login if we are already in one of these states */ + /* if already logged in, do implicit logout */ switch (ndlp->nlp_state) { case NLP_STE_NPR_NODE: if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) @@ -376,8 +379,26 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, case NLP_STE_PRLI_ISSUE: case NLP_STE_UNMAPPED_NODE: case NLP_STE_MAPPED_NODE: - lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); - return 1; + /* lpfc_plogi_confirm_nport skips fabric did, handle it here */ + if (!(ndlp->nlp_type & NLP_FABRIC)) { + lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, + ndlp, NULL); + return 1; + } + if (nlp_portwwn != 0 && + nlp_portwwn != wwn_to_u64(sp->portName.u.wwn)) + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "0143 PLOGI recv'd from DID: x%x " + "WWPN changed: old %llx new %llx\n", + ndlp->nlp_DID, + (unsigned long long)nlp_portwwn, + (unsigned long long) + wwn_to_u64(sp->portName.u.wwn)); + + ndlp->nlp_prev_state = ndlp->nlp_state; + /* rport needs to be unregistered first */ + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + break; } /* Check for Nport to NPort pt2pt protocol */ diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 4f9222eb2266..cb73cf9e9ba5 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2014 Emulex. All rights reserved. * + * Copyright (C) 2004-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -1130,6 +1130,25 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) } /** + * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB + * @data: A pointer to the immediate command data portion of the IOCB. + * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. + * + * The routine copies the entire FCP command from @fcp_cmnd to @data while + * byte swapping the data to big endian format for transmission on the wire. + **/ +static void +lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) +{ + int i, j; + + for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); + i += sizeof(uint32_t), j++) { + ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); + } +} + +/** * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec * @phba: The Hba for which this call is being executed. * @lpfc_cmd: The scsi buffer which is going to be mapped. @@ -1264,6 +1283,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) * we need to set word 4 of IOCB here */ iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); + lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); return 0; } @@ -4127,24 +4147,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, } /** - * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB - * @data: A pointer to the immediate command data portion of the IOCB. - * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. - * - * The routine copies the entire FCP command from @fcp_cmnd to @data while - * byte swapping the data to big endian format for transmission on the wire. - **/ -static void -lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) -{ - int i, j; - for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); - i += sizeof(uint32_t), j++) { - ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); - } -} - -/** * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit * @vport: The virtual port for which this call is being executed. * @lpfc_cmd: The scsi command which needs to send. @@ -4223,9 +4225,6 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, fcp_cmnd->fcpCntl3 = 0; phba->fc4ControlRequests++; } - if (phba->sli_rev == 3 && - !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) - lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); /* * Finish initializing those IOCB fields that are independent * of the scsi_cmnd request_buffer @@ -5118,9 +5117,10 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) int status; rdata = lpfc_rport_data_from_scsi_device(cmnd->device); - if (!rdata) { + if (!rdata || !rdata->pnode) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, - "0798 Device Reset rport failure: rdata x%p\n", rdata); + "0798 Device Reset rport failure: rdata x%p\n", + rdata); return FAILED; } pnode = rdata->pnode; @@ -5202,10 +5202,12 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) if (status == FAILED) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, "0722 Target Reset rport failure: rdata x%p\n", rdata); - spin_lock_irq(shost->host_lock); - pnode->nlp_flag &= ~NLP_NPR_ADISC; - pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - spin_unlock_irq(shost->host_lock); + if (pnode) { + spin_lock_irq(shost->host_lock); + pnode->nlp_flag &= ~NLP_NPR_ADISC; + pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + spin_unlock_irq(shost->host_lock); + } lpfc_reset_flush_io_context(vport, tgt_id, lun_id, LPFC_CTX_TGT); return FAST_IO_FAIL; @@ -5857,6 +5859,31 @@ lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, return false; } +struct scsi_host_template lpfc_template_s3 = { + .module = THIS_MODULE, + .name = LPFC_DRIVER_NAME, + .info = lpfc_info, + .queuecommand = lpfc_queuecommand, + .eh_abort_handler = lpfc_abort_handler, + .eh_device_reset_handler = lpfc_device_reset_handler, + .eh_target_reset_handler = lpfc_target_reset_handler, + .eh_bus_reset_handler = lpfc_bus_reset_handler, + .slave_alloc = lpfc_slave_alloc, + .slave_configure = lpfc_slave_configure, + .slave_destroy = lpfc_slave_destroy, + .scan_finished = lpfc_scan_finished, + .this_id = -1, + .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, + .cmd_per_lun = LPFC_CMD_PER_LUN, + .use_clustering = ENABLE_CLUSTERING, + .shost_attrs = lpfc_hba_attrs, + .max_sectors = 0xFFFF, + .vendor_id = LPFC_NL_VENDOR_ID, + .change_queue_depth = scsi_change_queue_depth, + .use_blk_tags = 1, + .track_queue_depth = 1, +}; + struct scsi_host_template lpfc_template = { .module = THIS_MODULE, .name = LPFC_DRIVER_NAME, diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index 0389ac1e7b83..474e30cdee6e 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2014 Emulex. All rights reserved. * + * Copyright (C) 2004-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 207a43d952fa..56f73682d4bd 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2014 Emulex. All rights reserved. * + * Copyright (C) 2004-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -918,12 +918,16 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1; ndlp = lpfc_cmd->rdata->pnode; } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && - !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) + !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) { ndlp = piocbq->context_un.ndlp; - else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) - ndlp = piocbq->context_un.ndlp; - else + } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) { + if (piocbq->iocb_flag & LPFC_IO_LOOPBACK) + ndlp = NULL; + else + ndlp = piocbq->context_un.ndlp; + } else { ndlp = piocbq->context1; + } list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); start_sglq = sglq; @@ -2213,6 +2217,46 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) else mempool_free(pmb, phba->mbox_mem_pool); } + /** + * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler + * @phba: Pointer to HBA context object. + * @pmb: Pointer to mailbox object. + * + * This function is the unreg rpi mailbox completion handler. It + * frees the memory resources associated with the completed mailbox + * command. An additional refrenece is put on the ndlp to prevent + * lpfc_nlp_release from freeing the rpi bit in the bitmask before + * the unreg mailbox command completes, this routine puts the + * reference back. + * + **/ +void +lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_vport *vport = pmb->vport; + struct lpfc_nodelist *ndlp; + + ndlp = pmb->context1; + if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { + if (phba->sli_rev == LPFC_SLI_REV4 && + (bf_get(lpfc_sli_intf_if_type, + &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_IF_TYPE_2)) { + if (ndlp) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + "0010 UNREG_LOGIN vpi:%x " + "rpi:%x DID:%x map:%x %p\n", + vport->vpi, ndlp->nlp_rpi, + ndlp->nlp_DID, + ndlp->nlp_usg_map, ndlp); + + lpfc_nlp_put(ndlp); + } + } + } + + mempool_free(pmb, phba->mbox_mem_pool); +} /** * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware @@ -12842,7 +12886,7 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) * fails this function will return -ENXIO. **/ int -lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq) +lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint32_t startq) { struct lpfc_mbx_modify_eq_delay *eq_delay; LPFC_MBOXQ_t *mbox; @@ -12959,11 +13003,8 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) bf_set(lpfc_eq_context_size, &eq_create->u.request.context, LPFC_EQE_SIZE); bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); - /* Calculate delay multiper from maximum interrupt per second */ - if (imax > LPFC_DMULT_CONST) - dmult = 0; - else - dmult = LPFC_DMULT_CONST/imax - 1; + /* don't setup delay multiplier using EQ_CREATE */ + dmult = 0; bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, dmult); switch (eq->entry_count) { @@ -15662,14 +15703,14 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) struct lpfc_rpi_hdr *rpi_hdr; unsigned long iflag; - max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; - rpi_limit = phba->sli4_hba.next_rpi; - /* * Fetch the next logical rpi. Because this index is logical, * the driver starts at 0 each time. */ spin_lock_irqsave(&phba->hbalock, iflag); + max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; + rpi_limit = phba->sli4_hba.next_rpi; + rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); if (rpi >= rpi_limit) rpi = LPFC_RPI_ALLOC_ERROR; @@ -15678,6 +15719,9 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) phba->sli4_hba.max_cfg_param.rpi_used++; phba->sli4_hba.rpi_count++; } + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0001 rpi:%x max:%x lim:%x\n", + (int) rpi, max_rpi, rpi_limit); /* * Don't try to allocate more rpi header regions if the device limit diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 4a01452415cf..7fe99ff80846 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2014 Emulex. All rights reserved. * + * Copyright (C) 2004-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -80,6 +80,7 @@ struct lpfc_iocbq { #define LPFC_IO_OAS 0x10000 /* OAS FCP IO */ #define LPFC_IO_FOF 0x20000 /* FOF FCP IO */ +#define LPFC_IO_LOOPBACK 0x40000 /* Loopback IO */ uint32_t drvrTimeout; /* driver timeout in seconds */ uint32_t fcp_wqidx; /* index to FCP work queue */ diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 22ceb2b05ba1..6eca3b8124d3 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2009-2014 Emulex. All rights reserved. * + * Copyright (C) 2009-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -671,7 +671,7 @@ struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, uint32_t); void lpfc_sli4_queue_free(struct lpfc_queue *); int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t); -int lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint16_t); +int lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint32_t); int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_queue *, uint32_t, uint32_t); int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *, diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 89413add2252..c37bb9f91c3b 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2014 Emulex. All rights reserved. * + * Copyright (C) 2004-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "10.4.8000.0." +#define LPFC_DRIVER_VERSION "10.5.0.0." #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ @@ -30,4 +30,4 @@ #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ LPFC_DRIVER_VERSION -#define LPFC_COPYRIGHT "Copyright(c) 2004-2014 Emulex. All rights reserved." +#define LPFC_COPYRIGHT "Copyright(c) 2004-2015 Emulex. All rights reserved." diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index 1e85c07e3b62..d64a769b8155 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -483,7 +483,6 @@ static struct platform_driver mac_scsi_driver = { .remove = __exit_p(mac_scsi_remove), .driver = { .name = DRV_MODULE_NAME, - .owner = THIS_MODULE, }, }; diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig index 113e6c9826a1..33f60c92e20e 100644 --- a/drivers/scsi/qla2xxx/Kconfig +++ b/drivers/scsi/qla2xxx/Kconfig @@ -18,6 +18,9 @@ config SCSI_QLA_FC 2322, 6322 ql2322_fw.bin 24xx, 54xx ql2400_fw.bin 25xx ql2500_fw.bin + 2031 ql2600_fw.bin + 8031 ql8300_fw.bin + 27xx ql2700_fw.bin Upon request, the driver caches the firmware image until the driver is unloaded. diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index d77fe43793b6..0e6ee3ca30e6 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -11,9 +11,9 @@ * ---------------------------------------------------------------------- * | Level | Last Value Used | Holes | * ---------------------------------------------------------------------- - * | Module Init and Probe | 0x017d | 0x0144,0x0146 | + * | Module Init and Probe | 0x017f | 0x0146 | * | | | 0x015b-0x0160 | - * | | | 0x016e-0x0170 | + * | | | 0x016e-0x0170 | * | Mailbox commands | 0x118d | 0x1115-0x1116 | * | | | 0x111a-0x111b | * | Device Discovery | 0x2016 | 0x2020-0x2022, | @@ -60,7 +60,7 @@ * | | | 0xb13c-0xb140 | * | | | 0xb149 | * | MultiQ | 0xc00c | | - * | Misc | 0xd213 | 0xd011-0xd017 | + * | Misc | 0xd300 | 0xd016-0xd017 | * | | | 0xd021,0xd024 | * | | | 0xd025,0xd029 | * | | | 0xd02a,0xd02e | diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 5f6b2960cccb..e86201d3b8c6 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2163,7 +2163,7 @@ struct ct_fdmi_hba_attr { uint8_t node_name[WWN_SIZE]; uint8_t manufacturer[64]; uint8_t serial_num[32]; - uint8_t model[16]; + uint8_t model[16+1]; uint8_t model_desc[80]; uint8_t hw_version[32]; uint8_t driver_version[32]; @@ -2184,9 +2184,9 @@ struct ct_fdmiv2_hba_attr { uint16_t len; union { uint8_t node_name[WWN_SIZE]; - uint8_t manufacturer[32]; + uint8_t manufacturer[64]; uint8_t serial_num[32]; - uint8_t model[16]; + uint8_t model[16+1]; uint8_t model_desc[80]; uint8_t hw_version[16]; uint8_t driver_version[32]; @@ -2252,7 +2252,7 @@ struct ct_fdmiv2_port_attr { uint32_t cur_speed; uint32_t max_frame_size; uint8_t os_dev_name[32]; - uint8_t host_name[32]; + uint8_t host_name[256]; uint8_t node_name[WWN_SIZE]; uint8_t port_name[WWN_SIZE]; uint8_t port_sym_name[128]; @@ -2283,7 +2283,7 @@ struct ct_fdmi_port_attr { uint32_t cur_speed; uint32_t max_frame_size; uint8_t os_dev_name[32]; - uint8_t host_name[32]; + uint8_t host_name[256]; } a; }; @@ -3132,7 +3132,8 @@ struct qla_hw_data { IS_QLA25XX(ha) || IS_QLA81XX(ha) || \ IS_QLA82XX(ha) || IS_QLA83XX(ha) || \ IS_QLA8044(ha) || IS_QLA27XX(ha)) -#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) +#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \ + IS_QLA27XX(ha)) #define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled) #define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \ IS_QLA27XX(ha)) @@ -3300,6 +3301,8 @@ struct qla_hw_data { #define RISC_RDY_AFT_RESET 3 #define RISC_SRAM_DUMP_CMPL 4 #define RISC_EXT_MEM_DUMP_CMPL 5 +#define ISP_MBX_RDY 6 +#define ISP_SOFT_RESET_CMPL 7 int fw_dump_reading; int prev_minidump_failed; dma_addr_t eft_dma; @@ -3587,6 +3590,7 @@ typedef struct scsi_qla_host { #define VP_BIND_NEEDED 2 #define VP_DELETE_NEEDED 3 #define VP_SCR_NEEDED 4 /* State Change Request registration */ +#define VP_CONFIG_OK 5 /* Flag to cfg VP, if FW is ready */ atomic_t vp_state; #define VP_OFFLINE 0 #define VP_ACTIVE 1 diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 5bb57c5282c9..285cb204f300 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1121,7 +1121,7 @@ qla81xx_reset_mpi(scsi_qla_host_t *vha) * * Returns 0 on success. */ -static inline void +static inline int qla24xx_reset_risc(scsi_qla_host_t *vha) { unsigned long flags = 0; @@ -1130,6 +1130,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) uint32_t cnt, d2; uint16_t wd; static int abts_cnt; /* ISP abort retry counts */ + int rval = QLA_SUCCESS; spin_lock_irqsave(&ha->hardware_lock, flags); @@ -1142,26 +1143,57 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) udelay(10); } + if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE)) + set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); + + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e, + "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n", + RD_REG_DWORD(®->hccr), + RD_REG_DWORD(®->ctrl_status), + (RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE)); + WRT_REG_DWORD(®->ctrl_status, CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); udelay(100); + /* Wait for firmware to complete NVRAM accesses. */ d2 = (uint32_t) RD_REG_WORD(®->mailbox0); - for (cnt = 10000 ; cnt && d2; cnt--) { - udelay(5); - d2 = (uint32_t) RD_REG_WORD(®->mailbox0); + for (cnt = 10000; RD_REG_WORD(®->mailbox0) != 0 && + rval == QLA_SUCCESS; cnt--) { barrier(); + if (cnt) + udelay(5); + else + rval = QLA_FUNCTION_TIMEOUT; } + if (rval == QLA_SUCCESS) + set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags); + + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f, + "HCCR: 0x%x, MailBox0 Status 0x%x\n", + RD_REG_DWORD(®->hccr), + RD_REG_DWORD(®->mailbox0)); + /* Wait for soft-reset to complete. */ d2 = RD_REG_DWORD(®->ctrl_status); - for (cnt = 6000000 ; cnt && (d2 & CSRX_ISP_SOFT_RESET); cnt--) { - udelay(5); - d2 = RD_REG_DWORD(®->ctrl_status); + for (cnt = 0; cnt < 6000000; cnt++) { barrier(); + if ((RD_REG_DWORD(®->ctrl_status) & + CSRX_ISP_SOFT_RESET) == 0) + break; + + udelay(5); } + if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) + set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags); + + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d, + "HCCR: 0x%x, Soft Reset status: 0x%x\n", + RD_REG_DWORD(®->hccr), + RD_REG_DWORD(®->ctrl_status)); /* If required, do an MPI FW reset now */ if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) { @@ -1190,16 +1222,32 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) RD_REG_DWORD(®->hccr); d2 = (uint32_t) RD_REG_WORD(®->mailbox0); - for (cnt = 6000000 ; cnt && d2; cnt--) { - udelay(5); - d2 = (uint32_t) RD_REG_WORD(®->mailbox0); + for (cnt = 6000000; RD_REG_WORD(®->mailbox0) != 0 && + rval == QLA_SUCCESS; cnt--) { barrier(); + if (cnt) + udelay(5); + else + rval = QLA_FUNCTION_TIMEOUT; } + if (rval == QLA_SUCCESS) + set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); + + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e, + "Host Risc 0x%x, mailbox0 0x%x\n", + RD_REG_DWORD(®->hccr), + RD_REG_WORD(®->mailbox0)); spin_unlock_irqrestore(&ha->hardware_lock, flags); + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f, + "Driver in %s mode\n", + IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling"); + if (IS_NOPOLLING_TYPE(ha)) ha->isp_ops->enable_intrs(ha); + + return rval; } static void @@ -2243,8 +2291,11 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) rval = QLA_SUCCESS; - /* 20 seconds for loop down. */ - min_wait = 20; + /* Time to wait for loop down */ + if (IS_P3P_TYPE(ha)) + min_wait = 30; + else + min_wait = 20; /* * Firmware should take at most one RATOV to login, plus 5 seconds for diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index a04a1b1f7f32..6dc14cd782b2 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -756,11 +756,21 @@ skip_rio: /* * In case of loop down, restore WWPN from * NVRAM in case of FA-WWPN capable ISP + * Restore for Physical Port only */ - if (ha->flags.fawwpn_enabled) { - void *wwpn = ha->init_cb->port_name; + if (!vha->vp_idx) { + if (ha->flags.fawwpn_enabled) { + void *wwpn = ha->init_cb->port_name; + memcpy(vha->port_name, wwpn, WWN_SIZE); + fc_host_port_name(vha->host) = + wwn_to_u64(vha->port_name); + ql_dbg(ql_dbg_init + ql_dbg_verbose, + vha, 0x0144, "LOOP DOWN detected," + "restore WWPN %016llx\n", + wwn_to_u64(vha->port_name)); + } - memcpy(vha->port_name, wwpn, WWN_SIZE); + clear_bit(VP_CONFIG_OK, &vha->vp_flags); } vha->device_flags |= DFLG_NO_CABLE; @@ -947,6 +957,7 @@ skip_rio: set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + set_bit(VP_CONFIG_OK, &vha->vp_flags); qlt_async_event(mb[0], vha, mb); break; diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 72971daa2552..02b1c1c5355b 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -33,7 +33,7 @@ static int qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) { - int rval; + int rval, i; unsigned long flags = 0; device_reg_t *reg; uint8_t abort_active; @@ -43,10 +43,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) uint16_t __iomem *optr; uint32_t cnt; uint32_t mboxes; + uint16_t __iomem *mbx_reg; unsigned long wait_time; struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__); if (ha->pdev->error_state > pci_channel_io_frozen) { @@ -376,6 +378,18 @@ mbx_done: ql_dbg(ql_dbg_disc, base_vha, 0x1020, "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n", mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command); + + ql_dbg(ql_dbg_disc, vha, 0x1115, + "host status: 0x%x, flags:0x%lx, intr ctrl reg:0x%x, intr status:0x%x\n", + RD_REG_DWORD(®->isp24.host_status), + ha->fw_dump_cap_flags, + RD_REG_DWORD(®->isp24.ictrl), + RD_REG_DWORD(®->isp24.istatus)); + + mbx_reg = ®->isp24.mailbox0; + for (i = 0; i < 6; i++) + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x1116, + "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++)); } else { ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); } @@ -2838,7 +2852,7 @@ qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - if (!IS_QLA2031(vha->hw)) + if (!IS_QLA2031(vha->hw) && !IS_QLA27XX(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182, @@ -2846,7 +2860,11 @@ qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data) mcp->mb[0] = MBC_WRITE_SERDES; mcp->mb[1] = addr; - mcp->mb[2] = data & 0xff; + if (IS_QLA2031(vha->hw)) + mcp->mb[2] = data & 0xff; + else + mcp->mb[2] = data; + mcp->mb[3] = 0; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; @@ -2872,7 +2890,7 @@ qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - if (!IS_QLA2031(vha->hw)) + if (!IS_QLA2031(vha->hw) && !IS_QLA27XX(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185, @@ -2887,7 +2905,10 @@ qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data) mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); - *data = mcp->mb[1] & 0xff; + if (IS_QLA2031(vha->hw)) + *data = mcp->mb[1] & 0xff; + else + *data = mcp->mb[1]; if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1186, diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index ca3804e34833..cc94192511cf 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -306,19 +306,25 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha) static int qla2x00_do_dpc_vp(scsi_qla_host_t *vha) { + struct qla_hw_data *ha = vha->hw; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012, "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags); qla2x00_do_work(vha); - if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { - /* VP acquired. complete port configuration */ - ql_dbg(ql_dbg_dpc, vha, 0x4014, - "Configure VP scheduled.\n"); - qla24xx_configure_vp(vha); - ql_dbg(ql_dbg_dpc, vha, 0x4015, - "Configure VP end.\n"); - return 0; + /* Check if Fw is ready to configure VP first */ + if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) { + if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { + /* VP acquired. complete port configuration */ + ql_dbg(ql_dbg_dpc, vha, 0x4014, + "Configure VP scheduled.\n"); + qla24xx_configure_vp(vha); + ql_dbg(ql_dbg_dpc, vha, 0x4015, + "Configure VP end.\n"); + return 0; + } } if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) { diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 5319b3cb219e..7462dd70b150 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -5834,3 +5834,6 @@ MODULE_FIRMWARE(FW_FILE_ISP2300); MODULE_FIRMWARE(FW_FILE_ISP2322); MODULE_FIRMWARE(FW_FILE_ISP24XX); MODULE_FIRMWARE(FW_FILE_ISP25XX); +MODULE_FIRMWARE(FW_FILE_ISP2031); +MODULE_FIRMWARE(FW_FILE_ISP8031); +MODULE_FIRMWARE(FW_FILE_ISP27XX); diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index b656a05613e8..028e8c8a7de9 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -1718,13 +1718,16 @@ qla83xx_beacon_blink(struct scsi_qla_host *vha) uint16_t orig_led_cfg[6]; uint32_t led_10_value, led_43_value; - if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha)) + if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha) && !IS_QLA27XX(ha)) return; if (!ha->beacon_blink_led) return; - if (IS_QLA2031(ha)) { + if (IS_QLA27XX(ha)) { + qla2x00_write_ram_word(vha, 0x1003, 0x40000230); + qla2x00_write_ram_word(vha, 0x1004, 0x40000230); + } else if (IS_QLA2031(ha)) { led_select_value = qla83xx_select_led_port(ha); qla83xx_wr_reg(vha, led_select_value, 0x40000230); @@ -1811,7 +1814,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha) return QLA_FUNCTION_FAILED; } - if (IS_QLA2031(ha)) + if (IS_QLA2031(ha) || IS_QLA27XX(ha)) goto skip_gpio; spin_lock_irqsave(&ha->hardware_lock, flags); @@ -1848,7 +1851,7 @@ qla24xx_beacon_off(struct scsi_qla_host *vha) ha->beacon_blink_led = 0; - if (IS_QLA2031(ha)) + if (IS_QLA2031(ha) || IS_QLA27XX(ha)) goto set_fw_options; if (IS_QLA8031(ha) || IS_QLA81XX(ha)) diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index a8c0c7362e48..962cb89fe0ae 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -190,7 +190,7 @@ static inline void qla27xx_write_reg(__iomem struct device_reg_24xx *reg, uint offset, uint32_t data, void *buf) { - __iomem void *window = reg + offset; + __iomem void *window = (void __iomem *)reg + offset; if (buf) { WRT_REG_DWORD(window, data); @@ -219,6 +219,8 @@ qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf) { if (buf) ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY; + ql_dbg(ql_dbg_misc + ql_dbg_verbose, NULL, 0xd011, + "Skipping entry %d\n", ent->hdr.entry_type); } static int @@ -784,6 +786,13 @@ qla27xx_walk_template(struct scsi_qla_host *vha, ql_dbg(ql_dbg_misc, vha, 0xd01b, "%s: len=%lx\n", __func__, *len); + + if (buf) { + ql_log(ql_log_warn, vha, 0xd015, + "Firmware dump saved to temp buffer (%ld/%p)\n", + vha->host_no, vha->hw->fw_dump); + qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); + } } static void @@ -938,6 +947,10 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked) ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n"); else if (!vha->hw->fw_dump_template) ql_log(ql_log_warn, vha, 0xd01f, "fwdump template missing.\n"); + else if (vha->hw->fw_dumped) + ql_log(ql_log_warn, vha, 0xd300, + "Firmware has been previously dumped (%p)," + " -- ignoring request\n", vha->hw->fw_dump); else qla27xx_execute_fwdt_template(vha); diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index d88b86214ec5..2ed9ab90a455 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.07.00.16-k" +#define QLA2XXX_VERSION "8.07.00.18-k" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 7 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index c9c3b579eece..3833bf59fb66 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -972,18 +972,24 @@ EXPORT_SYMBOL(scsi_report_opcode); * Description: Gets a reference to the scsi_device and increments the use count * of the underlying LLDD module. You must hold host_lock of the * parent Scsi_Host or already have a reference when calling this. + * + * This will fail if a device is deleted or cancelled, or when the LLD module + * is in the process of being unloaded. */ int scsi_device_get(struct scsi_device *sdev) { - if (sdev->sdev_state == SDEV_DEL) - return -ENXIO; + if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL) + goto fail; if (!get_device(&sdev->sdev_gendev)) - return -ENXIO; - /* We can fail try_module_get if we're doing SCSI operations - * from module exit (like cache flush) */ - __module_get(sdev->host->hostt->module); - + goto fail; + if (!try_module_get(sdev->host->hostt->module)) + goto fail_put_device; return 0; + +fail_put_device: + put_device(&sdev->sdev_gendev); +fail: + return -ENXIO; } EXPORT_SYMBOL(scsi_device_get); diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 9c0a520d933c..60aae01caa89 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -1570,16 +1570,15 @@ EXPORT_SYMBOL(scsi_add_device); void scsi_rescan_device(struct device *dev) { - if (!dev->driver) - return; - - if (try_module_get(dev->driver->owner)) { + device_lock(dev); + if (dev->driver && try_module_get(dev->driver->owner)) { struct scsi_driver *drv = to_scsi_driver(dev->driver); if (drv->rescan) drv->rescan(dev); module_put(dev->driver->owner); } + device_unlock(dev); } EXPORT_SYMBOL(scsi_rescan_device); diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 5d6f348eb3d8..24eaaf66af71 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -265,6 +265,7 @@ static const struct { { FC_PORTSPEED_40GBIT, "40 Gbit" }, { FC_PORTSPEED_50GBIT, "50 Gbit" }, { FC_PORTSPEED_100GBIT, "100 Gbit" }, + { FC_PORTSPEED_25GBIT, "25 Gbit" }, { FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" }, }; fc_bitfield_name_search(port_speed, fc_port_speed_names) diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 6b78476d04bb..dcc42446f58a 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -564,10 +564,12 @@ static int sd_major(int major_idx) } } -static struct scsi_disk *__scsi_disk_get(struct gendisk *disk) +static struct scsi_disk *scsi_disk_get(struct gendisk *disk) { struct scsi_disk *sdkp = NULL; + mutex_lock(&sd_ref_mutex); + if (disk->private_data) { sdkp = scsi_disk(disk); if (scsi_device_get(sdkp->device) == 0) @@ -575,27 +577,6 @@ static struct scsi_disk *__scsi_disk_get(struct gendisk *disk) else sdkp = NULL; } - return sdkp; -} - -static struct scsi_disk *scsi_disk_get(struct gendisk *disk) -{ - struct scsi_disk *sdkp; - - mutex_lock(&sd_ref_mutex); - sdkp = __scsi_disk_get(disk); - mutex_unlock(&sd_ref_mutex); - return sdkp; -} - -static struct scsi_disk *scsi_disk_get_from_dev(struct device *dev) -{ - struct scsi_disk *sdkp; - - mutex_lock(&sd_ref_mutex); - sdkp = dev_get_drvdata(dev); - if (sdkp) - sdkp = __scsi_disk_get(sdkp->disk); mutex_unlock(&sd_ref_mutex); return sdkp; } @@ -610,8 +591,6 @@ static void scsi_disk_put(struct scsi_disk *sdkp) mutex_unlock(&sd_ref_mutex); } - - static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd, unsigned int dix, unsigned int dif) { @@ -1525,12 +1504,9 @@ static int sd_sync_cache(struct scsi_disk *sdkp) static void sd_rescan(struct device *dev) { - struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); + struct scsi_disk *sdkp = dev_get_drvdata(dev); - if (sdkp) { - revalidate_disk(sdkp->disk); - scsi_disk_put(sdkp); - } + revalidate_disk(sdkp->disk); } @@ -2235,11 +2211,11 @@ got_data: { char cap_str_2[10], cap_str_10[10]; - u64 sz = (u64)sdkp->capacity << ilog2(sector_size); - string_get_size(sz, STRING_UNITS_2, cap_str_2, - sizeof(cap_str_2)); - string_get_size(sz, STRING_UNITS_10, cap_str_10, + string_get_size(sdkp->capacity, sector_size, + STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); + string_get_size(sdkp->capacity, sector_size, + STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); if (sdkp->first_scan || old_capacity != sdkp->capacity) { @@ -3149,13 +3125,13 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start) */ static void sd_shutdown(struct device *dev) { - struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); + struct scsi_disk *sdkp = dev_get_drvdata(dev); if (!sdkp) return; /* this can happen */ if (pm_runtime_suspended(dev)) - goto exit; + return; if (sdkp->WCE && sdkp->media_present) { sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); @@ -3166,14 +3142,11 @@ static void sd_shutdown(struct device *dev) sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); sd_start_stop_device(sdkp, 0); } - -exit: - scsi_disk_put(sdkp); } static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) { - struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); + struct scsi_disk *sdkp = dev_get_drvdata(dev); int ret = 0; if (!sdkp) @@ -3199,7 +3172,6 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) } done: - scsi_disk_put(sdkp); return ret; } @@ -3215,18 +3187,13 @@ static int sd_suspend_runtime(struct device *dev) static int sd_resume(struct device *dev) { - struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); - int ret = 0; + struct scsi_disk *sdkp = dev_get_drvdata(dev); if (!sdkp->device->manage_start_stop) - goto done; + return 0; sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); - ret = sd_start_stop_device(sdkp, 1); - -done: - scsi_disk_put(sdkp); - return ret; + return sd_start_stop_device(sdkp, 1); } /** diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index efc6e446b6c8..d9dad90344d5 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -308,11 +308,16 @@ enum storvsc_request_type { * This is the end of Protocol specific defines. */ -static int storvsc_ringbuffer_size = (20 * PAGE_SIZE); +static int storvsc_ringbuffer_size = (256 * PAGE_SIZE); +static u32 max_outstanding_req_per_channel; + +static int storvsc_vcpus_per_sub_channel = 4; module_param(storvsc_ringbuffer_size, int, S_IRUGO); MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)"); +module_param(storvsc_vcpus_per_sub_channel, int, S_IRUGO); +MODULE_PARM_DESC(vcpus_per_sub_channel, "Ratio of VCPUs to subchannels"); /* * Timeout in seconds for all devices managed by this driver. */ @@ -320,7 +325,6 @@ static int storvsc_timeout = 180; static int msft_blist_flags = BLIST_TRY_VPD_PAGES; -#define STORVSC_MAX_IO_REQUESTS 200 static void storvsc_on_channel_callback(void *context); @@ -347,7 +351,10 @@ struct storvsc_cmd_request { /* Synchronize the request/response if needed */ struct completion wait_event; - struct hv_multipage_buffer data_buffer; + struct vmbus_channel_packet_multipage_buffer mpb; + struct vmbus_packet_mpb_array *payload; + u32 payload_sz; + struct vstor_packet vstor_packet; }; @@ -373,6 +380,10 @@ struct storvsc_device { unsigned char path_id; unsigned char target_id; + /* + * Max I/O, the device can support. + */ + u32 max_transfer_bytes; /* Used for vsc/vsp channel reset process */ struct storvsc_cmd_request init_request; struct storvsc_cmd_request reset_request; @@ -618,19 +629,6 @@ cleanup: return NULL; } -/* Disgusting wrapper functions */ -static inline unsigned long sg_kmap_atomic(struct scatterlist *sgl, int idx) -{ - void *addr = kmap_atomic(sg_page(sgl + idx)); - return (unsigned long)addr; -} - -static inline void sg_kunmap_atomic(unsigned long addr) -{ - kunmap_atomic((void *)addr); -} - - /* Assume the original sgl has enough room */ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl, struct scatterlist *bounce_sgl, @@ -645,32 +643,38 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl, unsigned long bounce_addr = 0; unsigned long dest_addr = 0; unsigned long flags; + struct scatterlist *cur_dest_sgl; + struct scatterlist *cur_src_sgl; local_irq_save(flags); - + cur_dest_sgl = orig_sgl; + cur_src_sgl = bounce_sgl; for (i = 0; i < orig_sgl_count; i++) { - dest_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset; + dest_addr = (unsigned long) + kmap_atomic(sg_page(cur_dest_sgl)) + + cur_dest_sgl->offset; dest = dest_addr; - destlen = orig_sgl[i].length; + destlen = cur_dest_sgl->length; if (bounce_addr == 0) - bounce_addr = sg_kmap_atomic(bounce_sgl,j); + bounce_addr = (unsigned long)kmap_atomic( + sg_page(cur_src_sgl)); while (destlen) { - src = bounce_addr + bounce_sgl[j].offset; - srclen = bounce_sgl[j].length - bounce_sgl[j].offset; + src = bounce_addr + cur_src_sgl->offset; + srclen = cur_src_sgl->length - cur_src_sgl->offset; copylen = min(srclen, destlen); memcpy((void *)dest, (void *)src, copylen); total_copied += copylen; - bounce_sgl[j].offset += copylen; + cur_src_sgl->offset += copylen; destlen -= copylen; dest += copylen; - if (bounce_sgl[j].offset == bounce_sgl[j].length) { + if (cur_src_sgl->offset == cur_src_sgl->length) { /* full */ - sg_kunmap_atomic(bounce_addr); + kunmap_atomic((void *)bounce_addr); j++; /* @@ -684,21 +688,27 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl, /* * We are done; cleanup and return. */ - sg_kunmap_atomic(dest_addr - orig_sgl[i].offset); + kunmap_atomic((void *)(dest_addr - + cur_dest_sgl->offset)); local_irq_restore(flags); return total_copied; } /* if we need to use another bounce buffer */ - if (destlen || i != orig_sgl_count - 1) - bounce_addr = sg_kmap_atomic(bounce_sgl,j); + if (destlen || i != orig_sgl_count - 1) { + cur_src_sgl = sg_next(cur_src_sgl); + bounce_addr = (unsigned long) + kmap_atomic( + sg_page(cur_src_sgl)); + } } else if (destlen == 0 && i == orig_sgl_count - 1) { /* unmap the last bounce that is < PAGE_SIZE */ - sg_kunmap_atomic(bounce_addr); + kunmap_atomic((void *)bounce_addr); } } - sg_kunmap_atomic(dest_addr - orig_sgl[i].offset); + kunmap_atomic((void *)(dest_addr - cur_dest_sgl->offset)); + cur_dest_sgl = sg_next(cur_dest_sgl); } local_irq_restore(flags); @@ -719,48 +729,62 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl, unsigned long bounce_addr = 0; unsigned long src_addr = 0; unsigned long flags; + struct scatterlist *cur_src_sgl; + struct scatterlist *cur_dest_sgl; local_irq_save(flags); + cur_src_sgl = orig_sgl; + cur_dest_sgl = bounce_sgl; + for (i = 0; i < orig_sgl_count; i++) { - src_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset; + src_addr = (unsigned long) + kmap_atomic(sg_page(cur_src_sgl)) + + cur_src_sgl->offset; src = src_addr; - srclen = orig_sgl[i].length; + srclen = cur_src_sgl->length; if (bounce_addr == 0) - bounce_addr = sg_kmap_atomic(bounce_sgl,j); + bounce_addr = (unsigned long) + kmap_atomic(sg_page(cur_dest_sgl)); while (srclen) { /* assume bounce offset always == 0 */ - dest = bounce_addr + bounce_sgl[j].length; - destlen = PAGE_SIZE - bounce_sgl[j].length; + dest = bounce_addr + cur_dest_sgl->length; + destlen = PAGE_SIZE - cur_dest_sgl->length; copylen = min(srclen, destlen); memcpy((void *)dest, (void *)src, copylen); total_copied += copylen; - bounce_sgl[j].length += copylen; + cur_dest_sgl->length += copylen; srclen -= copylen; src += copylen; - if (bounce_sgl[j].length == PAGE_SIZE) { + if (cur_dest_sgl->length == PAGE_SIZE) { /* full..move to next entry */ - sg_kunmap_atomic(bounce_addr); + kunmap_atomic((void *)bounce_addr); + bounce_addr = 0; j++; + } - /* if we need to use another bounce buffer */ - if (srclen || i != orig_sgl_count - 1) - bounce_addr = sg_kmap_atomic(bounce_sgl,j); - - } else if (srclen == 0 && i == orig_sgl_count - 1) { - /* unmap the last bounce that is < PAGE_SIZE */ - sg_kunmap_atomic(bounce_addr); + /* if we need to use another bounce buffer */ + if (srclen && bounce_addr == 0) { + cur_dest_sgl = sg_next(cur_dest_sgl); + bounce_addr = (unsigned long) + kmap_atomic( + sg_page(cur_dest_sgl)); } + } - sg_kunmap_atomic(src_addr - orig_sgl[i].offset); + kunmap_atomic((void *)(src_addr - cur_src_sgl->offset)); + cur_src_sgl = sg_next(cur_src_sgl); } + if (bounce_addr) + kunmap_atomic((void *)bounce_addr); + local_irq_restore(flags); return total_copied; @@ -970,6 +994,8 @@ static int storvsc_channel_init(struct hv_device *device) STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL) process_sub_channels = true; } + stor_device->max_transfer_bytes = + vstor_packet->storage_channel_properties.max_transfer_bytes; memset(vstor_packet, 0, sizeof(struct vstor_packet)); vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION; @@ -1080,6 +1106,8 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) struct Scsi_Host *host; struct storvsc_device *stor_dev; struct hv_device *dev = host_dev->dev; + u32 payload_sz = cmd_request->payload_sz; + void *payload = cmd_request->payload; stor_dev = get_in_stor_device(dev); host = stor_dev->host; @@ -1109,10 +1137,14 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) sense_hdr.ascq); scsi_set_resid(scmnd, - cmd_request->data_buffer.len - + cmd_request->payload->range.len - vm_srb->data_transfer_length); scmnd->scsi_done(scmnd); + + if (payload_sz > + sizeof(struct vmbus_channel_packet_multipage_buffer)) + kfree(payload); } static void storvsc_on_io_completion(struct hv_device *device, @@ -1314,7 +1346,7 @@ static int storvsc_dev_remove(struct hv_device *device) } static int storvsc_do_io(struct hv_device *device, - struct storvsc_cmd_request *request) + struct storvsc_cmd_request *request) { struct storvsc_device *stor_device; struct vstor_packet *vstor_packet; @@ -1346,19 +1378,20 @@ static int storvsc_do_io(struct hv_device *device, vstor_packet->vm_srb.data_transfer_length = - request->data_buffer.len; + request->payload->range.len; vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB; - if (request->data_buffer.len) { - ret = vmbus_sendpacket_multipagebuffer(outgoing_channel, - &request->data_buffer, + if (request->payload->range.len) { + + ret = vmbus_sendpacket_mpb_desc(outgoing_channel, + request->payload, request->payload_sz, vstor_packet, (sizeof(struct vstor_packet) - vmscsi_size_delta), (unsigned long)request); } else { - ret = vmbus_sendpacket(device->channel, vstor_packet, + ret = vmbus_sendpacket(outgoing_channel, vstor_packet, (sizeof(struct vstor_packet) - vmscsi_size_delta), (unsigned long)request, @@ -1376,7 +1409,6 @@ static int storvsc_do_io(struct hv_device *device, static int storvsc_device_configure(struct scsi_device *sdevice) { - scsi_change_queue_depth(sdevice, STORVSC_MAX_IO_REQUESTS); blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE); @@ -1526,6 +1558,10 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) struct scatterlist *sgl; unsigned int sg_count = 0; struct vmscsi_request *vm_srb; + struct scatterlist *cur_sgl; + struct vmbus_packet_mpb_array *payload; + u32 payload_sz; + u32 length; if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) { /* @@ -1579,46 +1615,71 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length); - cmd_request->data_buffer.len = scsi_bufflen(scmnd); - if (scsi_sg_count(scmnd)) { - sgl = (struct scatterlist *)scsi_sglist(scmnd); - sg_count = scsi_sg_count(scmnd); + sgl = (struct scatterlist *)scsi_sglist(scmnd); + sg_count = scsi_sg_count(scmnd); + + length = scsi_bufflen(scmnd); + payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb; + payload_sz = sizeof(cmd_request->mpb); + if (sg_count) { /* check if we need to bounce the sgl */ if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) { cmd_request->bounce_sgl = - create_bounce_buffer(sgl, scsi_sg_count(scmnd), - scsi_bufflen(scmnd), + create_bounce_buffer(sgl, sg_count, + length, vm_srb->data_in); if (!cmd_request->bounce_sgl) return SCSI_MLQUEUE_HOST_BUSY; cmd_request->bounce_sgl_count = - ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >> - PAGE_SHIFT; + ALIGN(length, PAGE_SIZE) >> PAGE_SHIFT; if (vm_srb->data_in == WRITE_TYPE) copy_to_bounce_buffer(sgl, - cmd_request->bounce_sgl, - scsi_sg_count(scmnd)); + cmd_request->bounce_sgl, sg_count); sgl = cmd_request->bounce_sgl; sg_count = cmd_request->bounce_sgl_count; } - cmd_request->data_buffer.offset = sgl[0].offset; - for (i = 0; i < sg_count; i++) - cmd_request->data_buffer.pfn_array[i] = - page_to_pfn(sg_page((&sgl[i]))); + if (sg_count > MAX_PAGE_BUFFER_COUNT) { + + payload_sz = (sg_count * sizeof(void *) + + sizeof(struct vmbus_packet_mpb_array)); + payload = kmalloc(payload_sz, GFP_ATOMIC); + if (!payload) { + if (cmd_request->bounce_sgl_count) + destroy_bounce_buffer( + cmd_request->bounce_sgl, + cmd_request->bounce_sgl_count); + + return SCSI_MLQUEUE_DEVICE_BUSY; + } + } + + payload->range.len = length; + payload->range.offset = sgl[0].offset; + + cur_sgl = sgl; + for (i = 0; i < sg_count; i++) { + payload->range.pfn_array[i] = + page_to_pfn(sg_page((cur_sgl))); + cur_sgl = sg_next(cur_sgl); + } } else if (scsi_sglist(scmnd)) { - cmd_request->data_buffer.offset = + payload->range.len = length; + payload->range.offset = virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1); - cmd_request->data_buffer.pfn_array[0] = + payload->range.pfn_array[0] = virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT; } + cmd_request->payload = payload; + cmd_request->payload_sz = payload_sz; + /* Invokes the vsc to start an IO */ ret = storvsc_do_io(dev, cmd_request); @@ -1646,12 +1707,8 @@ static struct scsi_host_template scsi_driver = { .eh_timed_out = storvsc_eh_timed_out, .slave_configure = storvsc_device_configure, .cmd_per_lun = 255, - .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS, .this_id = -1, - /* no use setting to 0 since ll_blk_rw reset it to 1 */ - /* currently 32 */ - .sg_tablesize = MAX_MULTIPAGE_BUFFER_COUNT, - .use_clustering = DISABLE_CLUSTERING, + .use_clustering = ENABLE_CLUSTERING, /* Make sure we dont get a sg segment crosses a page boundary */ .dma_boundary = PAGE_SIZE-1, .no_write_same = 1, @@ -1686,6 +1743,7 @@ static int storvsc_probe(struct hv_device *device, const struct hv_vmbus_device_id *dev_id) { int ret; + int num_cpus = num_online_cpus(); struct Scsi_Host *host; struct hv_host_device *host_dev; bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false); @@ -1694,6 +1752,7 @@ static int storvsc_probe(struct hv_device *device, int max_luns_per_target; int max_targets; int max_channels; + int max_sub_channels = 0; /* * Based on the windows host we are running on, @@ -1719,12 +1778,18 @@ static int storvsc_probe(struct hv_device *device, max_luns_per_target = STORVSC_MAX_LUNS_PER_TARGET; max_targets = STORVSC_MAX_TARGETS; max_channels = STORVSC_MAX_CHANNELS; + /* + * On Windows8 and above, we support sub-channels for storage. + * The number of sub-channels offerred is based on the number of + * VCPUs in the guest. + */ + max_sub_channels = (num_cpus / storvsc_vcpus_per_sub_channel); break; } - if (dev_id->driver_data == SFC_GUID) - scsi_driver.can_queue = (STORVSC_MAX_IO_REQUESTS * - STORVSC_FC_MAX_TARGETS); + scsi_driver.can_queue = (max_outstanding_req_per_channel * + (max_sub_channels + 1)); + host = scsi_host_alloc(&scsi_driver, sizeof(struct hv_host_device)); if (!host) @@ -1780,6 +1845,12 @@ static int storvsc_probe(struct hv_device *device, /* max cmd length */ host->max_cmd_len = STORVSC_MAX_CMD_LEN; + /* + * set the table size based on the info we got + * from the host. + */ + host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT); + /* Register the HBA and start the scsi bus scan */ ret = scsi_add_host(host, &device->device); if (ret != 0) @@ -1837,7 +1908,6 @@ static struct hv_driver storvsc_drv = { static int __init storvsc_drv_init(void) { - u32 max_outstanding_req_per_channel; /* * Divide the ring buffer data size (which is 1 page less @@ -1852,10 +1922,6 @@ static int __init storvsc_drv_init(void) vmscsi_size_delta, sizeof(u64))); - if (max_outstanding_req_per_channel < - STORVSC_MAX_IO_REQUESTS) - return -EINVAL; - return vmbus_driver_register(&storvsc_drv); } diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index 2a906d1d34ba..22a42836d193 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -676,7 +676,6 @@ static struct platform_driver sun3_scsi_driver = { .remove = __exit_p(sun3_scsi_remove), .driver = { .name = DRV_MODULE_NAME, - .owner = THIS_MODULE, }, }; diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index 9217af9bf734..6652a8171de6 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -214,8 +214,6 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) struct ufs_qcom_host *host = hba->priv; struct phy *phy = host->generic_phy; int ret = 0; - u8 major; - u16 minor, step; bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B) ? true : false; @@ -224,8 +222,6 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) /* provide 1ms delay to let the reset pulse propagate */ usleep_range(1000, 1100); - ufs_qcom_get_controller_revision(hba, &major, &minor, &step); - ufs_qcom_phy_save_controller_version(phy, major, minor, step); ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B); if (ret) { dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n", @@ -698,16 +694,24 @@ out: */ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) { - u8 major; - u16 minor, step; + struct ufs_qcom_host *host = hba->priv; - ufs_qcom_get_controller_revision(hba, &major, &minor, &step); + if (host->hw_ver.major == 0x1) + hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS; - /* - * TBD - * here we should be advertising controller quirks according to - * controller version. - */ + if (host->hw_ver.major >= 0x2) { + if (!ufs_qcom_cap_qunipro(host)) + /* Legacy UniPro mode still need following quirks */ + hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS; + } +} + +static void ufs_qcom_set_caps(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = hba->priv; + + if (host->hw_ver.major >= 0x2) + host->caps = UFS_QCOM_CAP_QUNIPRO; } static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host, @@ -929,6 +933,13 @@ static int ufs_qcom_init(struct ufs_hba *hba) if (err) goto out_host_free; + ufs_qcom_get_controller_revision(hba, &host->hw_ver.major, + &host->hw_ver.minor, &host->hw_ver.step); + + /* update phy revision information before calling phy_init() */ + ufs_qcom_phy_save_controller_version(host->generic_phy, + host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step); + phy_init(host->generic_phy); err = phy_power_on(host->generic_phy); if (err) @@ -938,6 +949,7 @@ static int ufs_qcom_init(struct ufs_hba *hba) if (err) goto out_disable_phy; + ufs_qcom_set_caps(hba); ufs_qcom_advertise_quirks(hba); hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_CLK_SCALING; diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h index 9a6febd007df..db2c0a00e846 100644 --- a/drivers/scsi/ufs/ufs-qcom.h +++ b/drivers/scsi/ufs/ufs-qcom.h @@ -151,7 +151,23 @@ struct ufs_qcom_bus_vote { struct device_attribute max_bus_bw; }; +/* Host controller hardware version: major.minor.step */ +struct ufs_hw_version { + u16 step; + u16 minor; + u8 major; +}; struct ufs_qcom_host { + + /* + * Set this capability if host controller supports the QUniPro mode + * and if driver wants the Host controller to operate in QUniPro mode. + * Note: By default this capability will be kept enabled if host + * controller supports the QUniPro mode. + */ + #define UFS_QCOM_CAP_QUNIPRO UFS_BIT(0) + u32 caps; + struct phy *generic_phy; struct ufs_hba *hba; struct ufs_qcom_bus_vote bus_vote; @@ -161,10 +177,20 @@ struct ufs_qcom_host { struct clk *rx_l1_sync_clk; struct clk *tx_l1_sync_clk; bool is_lane_clks_enabled; + + struct ufs_hw_version hw_ver; }; #define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba) #define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba) #define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba) +static inline bool ufs_qcom_cap_qunipro(struct ufs_qcom_host *host) +{ + if (host->caps & UFS_QCOM_CAP_QUNIPRO) + return true; + else + return false; +} + #endif /* UFS_QCOM_H_ */ diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 2aa85e398f76..648a44675880 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -183,6 +183,7 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); +static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); static irqreturn_t ufshcd_intr(int irq, void *__hba); static int ufshcd_config_pwr_mode(struct ufs_hba *hba, @@ -972,6 +973,8 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) ufshcd_hold(hba, false); mutex_lock(&hba->uic_cmd_mutex); + ufshcd_add_delay_before_dme_cmd(hba); + spin_lock_irqsave(hba->host->host_lock, flags); ret = __ufshcd_send_uic_cmd(hba, uic_cmd); spin_unlock_irqrestore(hba->host->host_lock, flags); @@ -2058,6 +2061,37 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba) return ret; } +static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) +{ + #define MIN_DELAY_BEFORE_DME_CMDS_US 1000 + unsigned long min_sleep_time_us; + + if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)) + return; + + /* + * last_dme_cmd_tstamp will be 0 only for 1st call to + * this function + */ + if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) { + min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US; + } else { + unsigned long delta = + (unsigned long) ktime_to_us( + ktime_sub(ktime_get(), + hba->last_dme_cmd_tstamp)); + + if (delta < MIN_DELAY_BEFORE_DME_CMDS_US) + min_sleep_time_us = + MIN_DELAY_BEFORE_DME_CMDS_US - delta; + else + return; /* no more delay required */ + } + + /* allow sleep for extra 50us if needed */ + usleep_range(min_sleep_time_us, min_sleep_time_us + 50); +} + /** * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET * @hba: per adapter instance @@ -2157,6 +2191,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) mutex_lock(&hba->uic_cmd_mutex); init_completion(&uic_async_done); + ufshcd_add_delay_before_dme_cmd(hba); spin_lock_irqsave(hba->host->host_lock, flags); hba->uic_async_done = &uic_async_done; diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 4a574aa45855..b47ff07698e8 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -366,6 +366,7 @@ struct ufs_init_prefetch { * @saved_err: sticky error mask * @saved_uic_err: sticky UIC error mask * @dev_cmd: ufs device management command information + * @last_dme_cmd_tstamp: time stamp of the last completed DME command * @auto_bkops_enabled: to track whether bkops is enabled in device * @vreg_info: UFS device voltage regulator information * @clk_list_head: UFS host controller clocks list node head @@ -416,6 +417,13 @@ struct ufs_hba { unsigned int irq; bool is_irq_enabled; + /* + * delay before each dme command is required as the unipro + * layer has shown instabilities + */ + #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS UFS_BIT(0) + + unsigned int quirks; /* Deviations from standard UFSHCI spec. */ wait_queue_head_t tm_wq; wait_queue_head_t tm_tag_wq; @@ -446,6 +454,7 @@ struct ufs_hba { /* Device management request data */ struct ufs_dev_cmd dev_cmd; + ktime_t last_dme_cmd_tstamp; /* Keeps information of the UFS device connected to this host */ struct ufs_dev_info dev_info; |