diff options
Diffstat (limited to 'drivers')
120 files changed, 2968 insertions, 1352 deletions
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 58e09ffe8b9c..eb2eb599e602 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -17,6 +17,7 @@ * - http://www.t13.org/ */ +#include <linux/compat.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/blkdev.h> @@ -761,6 +762,10 @@ static int ata_ioc32(struct ata_port *ap) return 0; } +/* + * This handles both native and compat commands, so anything added + * here must have a compatible argument, or check in_compat_syscall() + */ int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, unsigned int cmd, void __user *arg) { @@ -773,6 +778,10 @@ int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, spin_lock_irqsave(ap->lock, flags); val = ata_ioc32(ap); spin_unlock_irqrestore(ap->lock, flags); +#ifdef CONFIG_COMPAT + if (in_compat_syscall()) + return put_user(val, (compat_ulong_t __user *)arg); +#endif return put_user(val, (unsigned long __user *)arg); case HDIO_SET_32BIT: diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c index 20736aaa0e69..f7bd0f4db13d 100644 --- a/drivers/base/attribute_container.c +++ b/drivers/base/attribute_container.c @@ -236,6 +236,109 @@ attribute_container_remove_device(struct device *dev, mutex_unlock(&attribute_container_mutex); } +static int +do_attribute_container_device_trigger_safe(struct device *dev, + struct attribute_container *cont, + int (*fn)(struct attribute_container *, + struct device *, struct device *), + int (*undo)(struct attribute_container *, + struct device *, struct device *)) +{ + int ret; + struct internal_container *ic, *failed; + struct klist_iter iter; + + if (attribute_container_no_classdevs(cont)) + return fn(cont, dev, NULL); + + klist_for_each_entry(ic, &cont->containers, node, &iter) { + if (dev == ic->classdev.parent) { + ret = fn(cont, dev, &ic->classdev); + if (ret) { + failed = ic; + klist_iter_exit(&iter); + goto fail; + } + } + } + return 0; + +fail: + if (!undo) + return ret; + + /* Attempt to undo the work partially done. */ + klist_for_each_entry(ic, &cont->containers, node, &iter) { + if (ic == failed) { + klist_iter_exit(&iter); + break; + } + if (dev == ic->classdev.parent) + undo(cont, dev, &ic->classdev); + } + return ret; +} + +/** + * attribute_container_device_trigger_safe - execute a trigger for each + * matching classdev or fail all of them. + * + * @dev: The generic device to run the trigger for + * @fn the function to execute for each classdev. + * @undo A function to undo the work previously done in case of error + * + * This function is a safe version of + * attribute_container_device_trigger. It stops on the first error and + * undo the partial work that has been done, on previous classdev. It + * is guaranteed that either they all succeeded, or none of them + * succeeded. + */ +int +attribute_container_device_trigger_safe(struct device *dev, + int (*fn)(struct attribute_container *, + struct device *, + struct device *), + int (*undo)(struct attribute_container *, + struct device *, + struct device *)) +{ + struct attribute_container *cont, *failed = NULL; + int ret = 0; + + mutex_lock(&attribute_container_mutex); + + list_for_each_entry(cont, &attribute_container_list, node) { + + if (!cont->match(cont, dev)) + continue; + + ret = do_attribute_container_device_trigger_safe(dev, cont, + fn, undo); + if (ret) { + failed = cont; + break; + } + } + + if (ret && !WARN_ON(!undo)) { + list_for_each_entry(cont, &attribute_container_list, node) { + + if (failed == cont) + break; + + if (!cont->match(cont, dev)) + continue; + + do_attribute_container_device_trigger_safe(dev, cont, + undo, NULL); + } + } + + mutex_unlock(&attribute_container_mutex); + return ret; + +} + /** * attribute_container_device_trigger - execute a trigger for each matching classdev * diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c index 5ed86ded6e6b..ccc86206e508 100644 --- a/drivers/base/transport_class.c +++ b/drivers/base/transport_class.c @@ -30,6 +30,10 @@ #include <linux/attribute_container.h> #include <linux/transport_class.h> +static int transport_remove_classdev(struct attribute_container *cont, + struct device *dev, + struct device *classdev); + /** * transport_class_register - register an initial transport class * @@ -172,10 +176,11 @@ static int transport_add_class_device(struct attribute_container *cont, * routine is simply a trigger point used to add the device to the * system and register attributes for it. */ - -void transport_add_device(struct device *dev) +int transport_add_device(struct device *dev) { - attribute_container_device_trigger(dev, transport_add_class_device); + return attribute_container_device_trigger_safe(dev, + transport_add_class_device, + transport_remove_classdev); } EXPORT_SYMBOL_GPL(transport_add_device); diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index bd19f8af950b..7b32fb673375 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -329,6 +329,7 @@ static const struct block_device_operations aoe_bdops = { .open = aoeblk_open, .release = aoeblk_release, .ioctl = aoeblk_ioctl, + .compat_ioctl = blkdev_compat_ptr_ioctl, .getgeo = aoeblk_getgeo, .owner = THIS_MODULE, }; diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 485865fd0412..cd3612e4e2e1 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -3879,6 +3879,9 @@ static int fd_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int { int drive = (long)bdev->bd_disk->private_data; switch (cmd) { + case CDROMEJECT: /* CD-ROM eject */ + case 0x6470: /* SunOS floppy eject */ + case FDMSGON: case FDMSGOFF: case FDSETEMSGTRESH: diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 636bfea2de6f..117cfc8cd05a 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -275,6 +275,9 @@ static const struct block_device_operations pcd_bdops = { .open = pcd_block_open, .release = pcd_block_release, .ioctl = pcd_block_ioctl, +#ifdef CONFIG_COMPAT + .ioctl = blkdev_compat_ptr_ioctl, +#endif .check_events = pcd_block_check_events, }; diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 6f9ad3fc716f..c0967507d085 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -874,6 +874,7 @@ static const struct block_device_operations pd_fops = { .open = pd_open, .release = pd_release, .ioctl = pd_ioctl, + .compat_ioctl = pd_ioctl, .getgeo = pd_getgeo, .check_events = pd_check_events, .revalidate_disk= pd_revalidate diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index 6b7d4cab3687..bb09f21ce21a 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c @@ -276,6 +276,7 @@ static const struct block_device_operations pf_fops = { .open = pf_open, .release = pf_release, .ioctl = pf_ioctl, + .compat_ioctl = pf_ioctl, .getgeo = pf_getgeo, .check_events = pf_check_events, }; diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 861fc65a1b75..5f970a7d32c0 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2663,28 +2663,6 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, return ret; } -#ifdef CONFIG_COMPAT -static int pkt_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) -{ - switch (cmd) { - /* compatible */ - case CDROMEJECT: - case CDROMMULTISESSION: - case CDROMREADTOCENTRY: - case SCSI_IOCTL_SEND_COMMAND: - return pkt_ioctl(bdev, mode, cmd, (unsigned long)compat_ptr(arg)); - - - /* FIXME: no handler so far */ - case CDROM_LAST_WRITTEN: - /* handled in compat_blkdev_driver_ioctl */ - case CDROM_SEND_PACKET: - default: - return -ENOIOCTLCMD; - } -} -#endif - static unsigned int pkt_check_events(struct gendisk *disk, unsigned int clearing) { @@ -2706,9 +2684,7 @@ static const struct block_device_operations pktcdvd_ops = { .open = pkt_open, .release = pkt_close, .ioctl = pkt_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = pkt_compat_ioctl, -#endif + .compat_ioctl = blkdev_compat_ptr_ioctl, .check_events = pkt_check_events, }; diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 571612e233fe..39aeebc6837d 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -171,6 +171,7 @@ static const struct block_device_operations vdc_fops = { .owner = THIS_MODULE, .getgeo = vdc_getgeo, .ioctl = vdc_ioctl, + .compat_ioctl = blkdev_compat_ptr_ioctl, }; static void vdc_blk_queue_start(struct vdc_port *port) diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 7ffd719d89de..fbbf18ac1d5d 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -405,6 +405,9 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) static const struct block_device_operations virtblk_fops = { .ioctl = virtblk_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = blkdev_compat_ptr_ioctl, +#endif .owner = THIS_MODULE, .getgeo = virtblk_getgeo, }; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index c02be06c5299..57d50c5ba309 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -2632,6 +2632,7 @@ static const struct block_device_operations xlvbd_block_fops = .release = blkif_release, .getgeo = blkif_getgeo, .ioctl = blkif_ioctl, + .compat_ioctl = blkdev_compat_ptr_ioctl, }; diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index eebdcbef0578..faca0f346fff 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -3017,9 +3017,31 @@ static noinline int mmc_ioctl_cdrom_read_audio(struct cdrom_device_info *cdi, struct cdrom_read_audio ra; int lba; - if (copy_from_user(&ra, (struct cdrom_read_audio __user *)arg, - sizeof(ra))) - return -EFAULT; +#ifdef CONFIG_COMPAT + if (in_compat_syscall()) { + struct compat_cdrom_read_audio { + union cdrom_addr addr; + u8 addr_format; + compat_int_t nframes; + compat_caddr_t buf; + } ra32; + + if (copy_from_user(&ra32, arg, sizeof(ra32))) + return -EFAULT; + + ra = (struct cdrom_read_audio) { + .addr = ra32.addr, + .addr_format = ra32.addr_format, + .nframes = ra32.nframes, + .buf = compat_ptr(ra32.buf), + }; + } else +#endif + { + if (copy_from_user(&ra, (struct cdrom_read_audio __user *)arg, + sizeof(ra))) + return -EFAULT; + } if (ra.addr_format == CDROM_MSF) lba = msf_to_lba(ra.addr.msf.minute, @@ -3271,9 +3293,10 @@ static noinline int mmc_ioctl_cdrom_last_written(struct cdrom_device_info *cdi, ret = cdrom_get_last_written(cdi, &last); if (ret) return ret; - if (copy_to_user((long __user *)arg, &last, sizeof(last))) - return -EFAULT; - return 0; + if (in_compat_syscall()) + return put_user(last, (__s32 __user *)arg); + + return put_user(last, (long __user *)arg); } static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index 5b21dc421c94..886b2638c730 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -518,6 +518,9 @@ static const struct block_device_operations gdrom_bdops = { .release = gdrom_bdops_release, .check_events = gdrom_bdops_check_events, .ioctl = gdrom_bdops_ioctl, +#ifdef CONFIG_COMPAT + .ioctl = blkdev_compat_ptr_ioctl, +#endif }; static irqreturn_t gdrom_command_interrupt(int irq, void *dev_id) diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 9d117936bee1..dcf8b51b47fd 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -25,6 +25,7 @@ #define IDECD_VERSION "5.00" +#include <linux/compat.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> @@ -1710,6 +1711,41 @@ static int idecd_ioctl(struct block_device *bdev, fmode_t mode, return ret; } +static int idecd_locked_compat_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct cdrom_info *info = ide_drv_g(bdev->bd_disk, cdrom_info); + void __user *argp = compat_ptr(arg); + int err; + + switch (cmd) { + case CDROMSETSPINDOWN: + return idecd_set_spindown(&info->devinfo, (unsigned long)argp); + case CDROMGETSPINDOWN: + return idecd_get_spindown(&info->devinfo, (unsigned long)argp); + default: + break; + } + + err = generic_ide_ioctl(info->drive, bdev, cmd, arg); + if (err == -EINVAL) + err = cdrom_ioctl(&info->devinfo, bdev, mode, cmd, + (unsigned long)argp); + + return err; +} + +static int idecd_compat_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + int ret; + + mutex_lock(&ide_cd_mutex); + ret = idecd_locked_compat_ioctl(bdev, mode, cmd, arg); + mutex_unlock(&ide_cd_mutex); + + return ret; +} static unsigned int idecd_check_events(struct gendisk *disk, unsigned int clearing) @@ -1732,6 +1768,8 @@ static const struct block_device_operations idecd_ops = { .open = idecd_open, .release = idecd_release, .ioctl = idecd_ioctl, + .compat_ioctl = IS_ENABLED(CONFIG_COMPAT) ? + idecd_compat_ioctl : NULL, .check_events = idecd_check_events, .revalidate_disk = idecd_revalidate_disk }; diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 197912af5c2f..1d3407d7e095 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -794,4 +794,5 @@ const struct ide_disk_ops ide_ata_disk_ops = { .set_doorlock = ide_disk_set_doorlock, .do_request = ide_do_rw_disk, .ioctl = ide_disk_ioctl, + .compat_ioctl = ide_disk_ioctl, }; diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index 1ea2f9e82bf8..1fe1f9d37a51 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c @@ -19,6 +19,7 @@ #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> +#include <linux/compat.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/mm.h> @@ -546,4 +547,7 @@ const struct ide_disk_ops ide_atapi_disk_ops = { .set_doorlock = ide_set_media_lock, .do_request = ide_floppy_do_request, .ioctl = ide_floppy_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = ide_floppy_compat_ioctl, +#endif }; diff --git a/drivers/ide/ide-floppy.h b/drivers/ide/ide-floppy.h index 13c9b4b6d75e..8505a5f58f4e 100644 --- a/drivers/ide/ide-floppy.h +++ b/drivers/ide/ide-floppy.h @@ -26,6 +26,8 @@ void ide_floppy_create_read_capacity_cmd(struct ide_atapi_pc *); /* ide-floppy_ioctl.c */ int ide_floppy_ioctl(ide_drive_t *, struct block_device *, fmode_t, unsigned int, unsigned long); +int ide_floppy_compat_ioctl(ide_drive_t *, struct block_device *, fmode_t, + unsigned int, unsigned long); #ifdef CONFIG_IDE_PROC_FS /* ide-floppy_proc.c */ diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c index 40a2ebe34e1d..39a790ac6cc3 100644 --- a/drivers/ide/ide-floppy_ioctl.c +++ b/drivers/ide/ide-floppy_ioctl.c @@ -5,6 +5,7 @@ #include <linux/kernel.h> #include <linux/ide.h> +#include <linux/compat.h> #include <linux/cdrom.h> #include <linux/mutex.h> @@ -302,3 +303,37 @@ out: mutex_unlock(&ide_floppy_ioctl_mutex); return err; } + +#ifdef CONFIG_COMPAT +int ide_floppy_compat_ioctl(ide_drive_t *drive, struct block_device *bdev, + fmode_t mode, unsigned int cmd, unsigned long arg) +{ + struct ide_atapi_pc pc; + void __user *argp = compat_ptr(arg); + int err; + + mutex_lock(&ide_floppy_ioctl_mutex); + if (cmd == CDROMEJECT || cmd == CDROM_LOCKDOOR) { + err = ide_floppy_lockdoor(drive, &pc, arg, cmd); + goto out; + } + + err = ide_floppy_format_ioctl(drive, &pc, mode, cmd, argp); + if (err != -ENOTTY) + goto out; + + /* + * skip SCSI_IOCTL_SEND_COMMAND (deprecated) + * and CDROM_SEND_PACKET (legacy) ioctls + */ + if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND) + err = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp); + + if (err == -ENOTTY) + err = generic_ide_ioctl(drive, bdev, cmd, arg); + +out: + mutex_unlock(&ide_floppy_ioctl_mutex); + return err; +} +#endif diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c index dba9ad5c97b3..1bb99b556393 100644 --- a/drivers/ide/ide-gd.c +++ b/drivers/ide/ide-gd.c @@ -341,11 +341,28 @@ static int ide_gd_ioctl(struct block_device *bdev, fmode_t mode, return drive->disk_ops->ioctl(drive, bdev, mode, cmd, arg); } +#ifdef CONFIG_COMPAT +static int ide_gd_compat_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj); + ide_drive_t *drive = idkp->drive; + + if (!drive->disk_ops->compat_ioctl) + return -ENOIOCTLCMD; + + return drive->disk_ops->compat_ioctl(drive, bdev, mode, cmd, arg); +} +#endif + static const struct block_device_operations ide_gd_ops = { .owner = THIS_MODULE, .open = ide_gd_unlocked_open, .release = ide_gd_release, .ioctl = ide_gd_ioctl, +#ifdef CONFIG_COMPAT + .ioctl = ide_gd_compat_ioctl, +#endif .getgeo = ide_gd_getgeo, .check_events = ide_gd_check_events, .unlock_native_capacity = ide_gd_unlock_native_capacity, diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c index d48c17003874..09491098047b 100644 --- a/drivers/ide/ide-ioctls.c +++ b/drivers/ide/ide-ioctls.c @@ -3,11 +3,20 @@ * IDE ioctls handling. */ +#include <linux/compat.h> #include <linux/export.h> #include <linux/hdreg.h> #include <linux/ide.h> #include <linux/slab.h> +static int put_user_long(long val, unsigned long arg) +{ + if (in_compat_syscall()) + return put_user(val, (compat_long_t __user *)compat_ptr(arg)); + + return put_user(val, (long __user *)arg); +} + static const struct ide_ioctl_devset ide_ioctl_settings[] = { { HDIO_GET_32BIT, HDIO_SET_32BIT, &ide_devset_io_32bit }, { HDIO_GET_KEEPSETTINGS, HDIO_SET_KEEPSETTINGS, &ide_devset_keepsettings }, @@ -37,7 +46,7 @@ read_val: mutex_lock(&ide_setting_mtx); err = ds->get(drive); mutex_unlock(&ide_setting_mtx); - return err >= 0 ? put_user(err, (long __user *)arg) : err; + return err >= 0 ? put_user_long(err, arg) : err; set_val: if (bdev != bdev->bd_contains) @@ -56,7 +65,7 @@ set_val: EXPORT_SYMBOL_GPL(ide_setting_ioctl); static int ide_get_identity_ioctl(ide_drive_t *drive, unsigned int cmd, - unsigned long arg) + void __user *argp) { u16 *id = NULL; int size = (cmd == HDIO_GET_IDENTITY) ? (ATA_ID_WORDS * 2) : 142; @@ -77,7 +86,7 @@ static int ide_get_identity_ioctl(ide_drive_t *drive, unsigned int cmd, memcpy(id, drive->id, size); ata_id_to_hd_driveid(id); - if (copy_to_user((void __user *)arg, id, size)) + if (copy_to_user(argp, id, size)) rc = -EFAULT; kfree(id); @@ -87,10 +96,10 @@ out: static int ide_get_nice_ioctl(ide_drive_t *drive, unsigned long arg) { - return put_user((!!(drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) + return put_user_long((!!(drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) << IDE_NICE_DSC_OVERLAP) | (!!(drive->dev_flags & IDE_DFLAG_NICE1) - << IDE_NICE_1), (long __user *)arg); + << IDE_NICE_1), arg); } static int ide_set_nice_ioctl(ide_drive_t *drive, unsigned long arg) @@ -115,7 +124,7 @@ static int ide_set_nice_ioctl(ide_drive_t *drive, unsigned long arg) return 0; } -static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg) +static int ide_cmd_ioctl(ide_drive_t *drive, void __user *argp) { u8 *buf = NULL; int bufsize = 0, err = 0; @@ -123,7 +132,7 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg) struct ide_cmd cmd; struct ide_taskfile *tf = &cmd.tf; - if (NULL == (void *) arg) { + if (NULL == argp) { struct request *rq; rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0); @@ -135,7 +144,7 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg) return err; } - if (copy_from_user(args, (void __user *)arg, 4)) + if (copy_from_user(args, argp, 4)) return -EFAULT; memset(&cmd, 0, sizeof(cmd)); @@ -181,19 +190,18 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg) args[1] = tf->error; args[2] = tf->nsect; abort: - if (copy_to_user((void __user *)arg, &args, 4)) + if (copy_to_user(argp, &args, 4)) err = -EFAULT; if (buf) { - if (copy_to_user((void __user *)(arg + 4), buf, bufsize)) + if (copy_to_user((argp + 4), buf, bufsize)) err = -EFAULT; kfree(buf); } return err; } -static int ide_task_ioctl(ide_drive_t *drive, unsigned long arg) +static int ide_task_ioctl(ide_drive_t *drive, void __user *p) { - void __user *p = (void __user *)arg; int err = 0; u8 args[7]; struct ide_cmd cmd; @@ -237,6 +245,10 @@ int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev, unsigned int cmd, unsigned long arg) { int err; + void __user *argp = (void __user *)arg; + + if (in_compat_syscall()) + argp = compat_ptr(arg); err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_ioctl_settings); if (err != -EOPNOTSUPP) @@ -247,7 +259,7 @@ int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev, case HDIO_GET_IDENTITY: if (bdev != bdev->bd_contains) return -EINVAL; - return ide_get_identity_ioctl(drive, cmd, arg); + return ide_get_identity_ioctl(drive, cmd, argp); case HDIO_GET_NICE: return ide_get_nice_ioctl(drive, arg); case HDIO_SET_NICE: @@ -258,6 +270,9 @@ int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev, case HDIO_DRIVE_TASKFILE: if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; + /* missing compat handler for HDIO_DRIVE_TASKFILE */ + if (in_compat_syscall()) + return -ENOTTY; if (drive->media == ide_disk) return ide_taskfile_ioctl(drive, arg); return -ENOMSG; @@ -265,11 +280,11 @@ int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev, case HDIO_DRIVE_CMD: if (!capable(CAP_SYS_RAWIO)) return -EACCES; - return ide_cmd_ioctl(drive, arg); + return ide_cmd_ioctl(drive, argp); case HDIO_DRIVE_TASK: if (!capable(CAP_SYS_RAWIO)) return -EACCES; - return ide_task_ioctl(drive, arg); + return ide_task_ioctl(drive, argp); case HDIO_DRIVE_RESET: if (!capable(CAP_SYS_ADMIN)) return -EACCES; @@ -277,7 +292,7 @@ int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev, case HDIO_GET_BUSSTATE: if (!capable(CAP_SYS_ADMIN)) return -EACCES; - if (put_user(BUSSTATE_ON, (long __user *)arg)) + if (put_user_long(BUSSTATE_ON, arg)) return -EFAULT; return 0; case HDIO_SET_BUSSTATE: diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 3e7482695f77..6f26634b22bb 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -1945,11 +1945,22 @@ static int idetape_ioctl(struct block_device *bdev, fmode_t mode, return err; } +static int idetape_compat_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + if (cmd == 0x0340 || cmd == 0x350) + arg = (unsigned long)compat_ptr(arg); + + return idetape_ioctl(bdev, mode, cmd, arg); +} + static const struct block_device_operations idetape_block_ops = { .owner = THIS_MODULE, .open = idetape_open, .release = idetape_release, .ioctl = idetape_ioctl, + .compat_ioctl = IS_ENABLED(CONFIG_COMPAT) ? + idetape_compat_ioctl : NULL, }; static int ide_tape_probe(ide_drive_t *drive) diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index c25e8a54e869..3170b295a5da 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c @@ -134,7 +134,7 @@ static char *blogic_cmd_failure_reason; static void blogic_announce_drvr(struct blogic_adapter *adapter) { blogic_announce("***** BusLogic SCSI Driver Version " blogic_drvr_version " of " blogic_drvr_date " *****\n", adapter); - blogic_announce("Copyright 1995-1998 by Leonard N. Zubkoff " "<lnz@dandelion.com>\n", adapter); + blogic_announce("Copyright 1995-1998 by Leonard N. Zubkoff <lnz@dandelion.com>\n", adapter); } @@ -440,7 +440,7 @@ static int blogic_cmd(struct blogic_adapter *adapter, enum blogic_opcode opcode, goto done; } if (blogic_global_options.trace_config) - blogic_notice("blogic_cmd(%02X) Status = %02X: " "(Modify I/O Address)\n", adapter, opcode, statusreg.all); + blogic_notice("blogic_cmd(%02X) Status = %02X: (Modify I/O Address)\n", adapter, opcode, statusreg.all); result = 0; goto done; } @@ -716,23 +716,23 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter) pci_addr = base_addr1 = pci_resource_start(pci_device, 1); if (pci_resource_flags(pci_device, 0) & IORESOURCE_MEM) { - blogic_err("BusLogic: Base Address0 0x%X not I/O for " "MultiMaster Host Adapter\n", NULL, base_addr0); - blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr); + blogic_err("BusLogic: Base Address0 0x%lX not I/O for MultiMaster Host Adapter\n", NULL, base_addr0); + blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr); continue; } if (pci_resource_flags(pci_device, 1) & IORESOURCE_IO) { - blogic_err("BusLogic: Base Address1 0x%X not Memory for " "MultiMaster Host Adapter\n", NULL, base_addr1); - blogic_err("at PCI Bus %d Device %d PCI Address 0x%X\n", NULL, bus, device, pci_addr); + blogic_err("BusLogic: Base Address1 0x%lX not Memory for MultiMaster Host Adapter\n", NULL, base_addr1); + blogic_err("at PCI Bus %d Device %d PCI Address 0x%lX\n", NULL, bus, device, pci_addr); continue; } if (irq_ch == 0) { - blogic_err("BusLogic: IRQ Channel %d invalid for " "MultiMaster Host Adapter\n", NULL, irq_ch); - blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr); + blogic_err("BusLogic: IRQ Channel %d invalid for MultiMaster Host Adapter\n", NULL, irq_ch); + blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr); continue; } if (blogic_global_options.trace_probe) { - blogic_notice("BusLogic: PCI MultiMaster Host Adapter " "detected at\n", NULL); - blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address " "0x%X PCI Address 0x%X\n", NULL, bus, device, io_addr, pci_addr); + blogic_notice("BusLogic: PCI MultiMaster Host Adapter detected at\n", NULL); + blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address 0x%lX PCI Address 0x%lX\n", NULL, bus, device, io_addr, pci_addr); } /* Issue the Inquire PCI Host Adapter Information command to determine @@ -818,7 +818,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter) nonpr_mmcount++; mmcount++; } else - blogic_warn("BusLogic: Too many Host Adapters " "detected\n", NULL); + blogic_warn("BusLogic: Too many Host Adapters detected\n", NULL); } /* If the AutoSCSI "Use Bus And Device # For PCI Scanning Seq." @@ -956,23 +956,23 @@ static int __init blogic_init_fp_probeinfo(struct blogic_adapter *adapter) pci_addr = base_addr1 = pci_resource_start(pci_device, 1); #ifdef CONFIG_SCSI_FLASHPOINT if (pci_resource_flags(pci_device, 0) & IORESOURCE_MEM) { - blogic_err("BusLogic: Base Address0 0x%X not I/O for " "FlashPoint Host Adapter\n", NULL, base_addr0); - blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr); + blogic_err("BusLogic: Base Address0 0x%lX not I/O for FlashPoint Host Adapter\n", NULL, base_addr0); + blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr); continue; } if (pci_resource_flags(pci_device, 1) & IORESOURCE_IO) { - blogic_err("BusLogic: Base Address1 0x%X not Memory for " "FlashPoint Host Adapter\n", NULL, base_addr1); - blogic_err("at PCI Bus %d Device %d PCI Address 0x%X\n", NULL, bus, device, pci_addr); + blogic_err("BusLogic: Base Address1 0x%lX not Memory for FlashPoint Host Adapter\n", NULL, base_addr1); + blogic_err("at PCI Bus %d Device %d PCI Address 0x%lX\n", NULL, bus, device, pci_addr); continue; } if (irq_ch == 0) { - blogic_err("BusLogic: IRQ Channel %d invalid for " "FlashPoint Host Adapter\n", NULL, irq_ch); - blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr); + blogic_err("BusLogic: IRQ Channel %d invalid for FlashPoint Host Adapter\n", NULL, irq_ch); + blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr); continue; } if (blogic_global_options.trace_probe) { - blogic_notice("BusLogic: FlashPoint Host Adapter " "detected at\n", NULL); - blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address " "0x%X PCI Address 0x%X\n", NULL, bus, device, io_addr, pci_addr); + blogic_notice("BusLogic: FlashPoint Host Adapter detected at\n", NULL); + blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address 0x%lX PCI Address 0x%lX\n", NULL, bus, device, io_addr, pci_addr); } if (blogic_probeinfo_count < BLOGIC_MAX_ADAPTERS) { struct blogic_probeinfo *probeinfo = @@ -987,11 +987,11 @@ static int __init blogic_init_fp_probeinfo(struct blogic_adapter *adapter) probeinfo->pci_device = pci_dev_get(pci_device); fpcount++; } else - blogic_warn("BusLogic: Too many Host Adapters " "detected\n", NULL); + blogic_warn("BusLogic: Too many Host Adapters detected\n", NULL); #else - blogic_err("BusLogic: FlashPoint Host Adapter detected at " "PCI Bus %d Device %d\n", NULL, bus, device); - blogic_err("BusLogic: I/O Address 0x%X PCI Address 0x%X, irq %d, " "but FlashPoint\n", NULL, io_addr, pci_addr, irq_ch); - blogic_err("BusLogic: support was omitted in this kernel " "configuration.\n", NULL); + blogic_err("BusLogic: FlashPoint Host Adapter detected at PCI Bus %d Device %d\n", NULL, bus, device); + blogic_err("BusLogic: I/O Address 0x%lX PCI Address 0x%lX, irq %d, but FlashPoint\n", NULL, io_addr, pci_addr, irq_ch); + blogic_err("BusLogic: support was omitted in this kernel configuration.\n", NULL); #endif } /* @@ -1099,9 +1099,9 @@ static bool blogic_failure(struct blogic_adapter *adapter, char *msg) if (adapter->adapter_bus_type == BLOGIC_PCI_BUS) { blogic_err("While configuring BusLogic PCI Host Adapter at\n", adapter); - blogic_err("Bus %d Device %d I/O Address 0x%X PCI Address 0x%X:\n", adapter, adapter->bus, adapter->dev, adapter->io_addr, adapter->pci_addr); + blogic_err("Bus %d Device %d I/O Address 0x%lX PCI Address 0x%lX:\n", adapter, adapter->bus, adapter->dev, adapter->io_addr, adapter->pci_addr); } else - blogic_err("While configuring BusLogic Host Adapter at " "I/O Address 0x%X:\n", adapter, adapter->io_addr); + blogic_err("While configuring BusLogic Host Adapter at I/O Address 0x%lX:\n", adapter, adapter->io_addr); blogic_err("%s FAILED - DETACHING\n", adapter, msg); if (blogic_cmd_failure_reason != NULL) blogic_err("ADDITIONAL FAILURE INFO - %s\n", adapter, @@ -1129,13 +1129,13 @@ static bool __init blogic_probe(struct blogic_adapter *adapter) fpinfo->present = false; if (!(FlashPoint_ProbeHostAdapter(fpinfo) == 0 && fpinfo->present)) { - blogic_err("BusLogic: FlashPoint Host Adapter detected at " "PCI Bus %d Device %d\n", adapter, adapter->bus, adapter->dev); - blogic_err("BusLogic: I/O Address 0x%X PCI Address 0x%X, " "but FlashPoint\n", adapter, adapter->io_addr, adapter->pci_addr); + blogic_err("BusLogic: FlashPoint Host Adapter detected at PCI Bus %d Device %d\n", adapter, adapter->bus, adapter->dev); + blogic_err("BusLogic: I/O Address 0x%lX PCI Address 0x%lX, but FlashPoint\n", adapter, adapter->io_addr, adapter->pci_addr); blogic_err("BusLogic: Probe Function failed to validate it.\n", adapter); return false; } if (blogic_global_options.trace_probe) - blogic_notice("BusLogic_Probe(0x%X): FlashPoint Found\n", adapter, adapter->io_addr); + blogic_notice("BusLogic_Probe(0x%lX): FlashPoint Found\n", adapter, adapter->io_addr); /* Indicate the Host Adapter Probe completed successfully. */ @@ -1152,7 +1152,7 @@ static bool __init blogic_probe(struct blogic_adapter *adapter) intreg.all = blogic_rdint(adapter); georeg.all = blogic_rdgeom(adapter); if (blogic_global_options.trace_probe) - blogic_notice("BusLogic_Probe(0x%X): Status 0x%02X, Interrupt 0x%02X, " "Geometry 0x%02X\n", adapter, adapter->io_addr, statusreg.all, intreg.all, georeg.all); + blogic_notice("BusLogic_Probe(0x%lX): Status 0x%02X, Interrupt 0x%02X, Geometry 0x%02X\n", adapter, adapter->io_addr, statusreg.all, intreg.all, georeg.all); if (statusreg.all == 0 || statusreg.sr.diag_active || statusreg.sr.cmd_param_busy || statusreg.sr.rsvd || statusreg.sr.cmd_invalid || intreg.ir.rsvd != 0) @@ -1231,7 +1231,7 @@ static bool blogic_hwreset(struct blogic_adapter *adapter, bool hard_reset) udelay(100); } if (blogic_global_options.trace_hw_reset) - blogic_notice("BusLogic_HardwareReset(0x%X): Diagnostic Active, " "Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all); + blogic_notice("BusLogic_HardwareReset(0x%lX): Diagnostic Active, Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all); if (timeout < 0) return false; /* @@ -1251,7 +1251,7 @@ static bool blogic_hwreset(struct blogic_adapter *adapter, bool hard_reset) udelay(100); } if (blogic_global_options.trace_hw_reset) - blogic_notice("BusLogic_HardwareReset(0x%X): Diagnostic Completed, " "Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all); + blogic_notice("BusLogic_HardwareReset(0x%lX): Diagnostic Completed, Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all); if (timeout < 0) return false; /* @@ -1267,7 +1267,7 @@ static bool blogic_hwreset(struct blogic_adapter *adapter, bool hard_reset) udelay(100); } if (blogic_global_options.trace_hw_reset) - blogic_notice("BusLogic_HardwareReset(0x%X): Host Adapter Ready, " "Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all); + blogic_notice("BusLogic_HardwareReset(0x%lX): Host Adapter Ready, Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all); if (timeout < 0) return false; /* @@ -1323,7 +1323,7 @@ static bool __init blogic_checkadapter(struct blogic_adapter *adapter) Provide tracing information if requested and return. */ if (blogic_global_options.trace_probe) - blogic_notice("BusLogic_Check(0x%X): MultiMaster %s\n", adapter, + blogic_notice("BusLogic_Check(0x%lX): MultiMaster %s\n", adapter, adapter->io_addr, (result ? "Found" : "Not Found")); return result; @@ -1836,7 +1836,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter) int tgt_id; blogic_info("Configuring BusLogic Model %s %s%s%s%s SCSI Host Adapter\n", adapter, adapter->model, blogic_adapter_busnames[adapter->adapter_bus_type], (adapter->wide ? " Wide" : ""), (adapter->differential ? " Differential" : ""), (adapter->ultra ? " Ultra" : "")); - blogic_info(" Firmware Version: %s, I/O Address: 0x%X, " "IRQ Channel: %d/%s\n", adapter, adapter->fw_ver, adapter->io_addr, adapter->irq_ch, (adapter->level_int ? "Level" : "Edge")); + blogic_info(" Firmware Version: %s, I/O Address: 0x%lX, IRQ Channel: %d/%s\n", adapter, adapter->fw_ver, adapter->io_addr, adapter->irq_ch, (adapter->level_int ? "Level" : "Edge")); if (adapter->adapter_bus_type != BLOGIC_PCI_BUS) { blogic_info(" DMA Channel: ", adapter); if (adapter->dma_ch > 0) @@ -1844,7 +1844,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter) else blogic_info("None, ", adapter); if (adapter->bios_addr > 0) - blogic_info("BIOS Address: 0x%X, ", adapter, + blogic_info("BIOS Address: 0x%lX, ", adapter, adapter->bios_addr); else blogic_info("BIOS Address: None, ", adapter); @@ -1852,7 +1852,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter) blogic_info(" PCI Bus: %d, Device: %d, Address: ", adapter, adapter->bus, adapter->dev); if (adapter->pci_addr > 0) - blogic_info("0x%X, ", adapter, adapter->pci_addr); + blogic_info("0x%lX, ", adapter, adapter->pci_addr); else blogic_info("Unassigned, ", adapter); } @@ -1932,10 +1932,10 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter) blogic_info(" Disconnect/Reconnect: %s, Tagged Queuing: %s\n", adapter, discon_msg, tagq_msg); if (blogic_multimaster_type(adapter)) { - blogic_info(" Scatter/Gather Limit: %d of %d segments, " "Mailboxes: %d\n", adapter, adapter->drvr_sglimit, adapter->adapter_sglimit, adapter->mbox_count); - blogic_info(" Driver Queue Depth: %d, " "Host Adapter Queue Depth: %d\n", adapter, adapter->drvr_qdepth, adapter->adapter_qdepth); + blogic_info(" Scatter/Gather Limit: %d of %d segments, Mailboxes: %d\n", adapter, adapter->drvr_sglimit, adapter->adapter_sglimit, adapter->mbox_count); + blogic_info(" Driver Queue Depth: %d, Host Adapter Queue Depth: %d\n", adapter, adapter->drvr_qdepth, adapter->adapter_qdepth); } else - blogic_info(" Driver Queue Depth: %d, " "Scatter/Gather Limit: %d segments\n", adapter, adapter->drvr_qdepth, adapter->drvr_sglimit); + blogic_info(" Driver Queue Depth: %d, Scatter/Gather Limit: %d segments\n", adapter, adapter->drvr_qdepth, adapter->drvr_sglimit); blogic_info(" Tagged Queue Depth: ", adapter); common_tagq_depth = true; for (tgt_id = 1; tgt_id < adapter->maxdev; tgt_id++) @@ -2717,7 +2717,7 @@ static void blogic_scan_inbox(struct blogic_adapter *adapter) then there is most likely a bug in the Host Adapter firmware. */ - blogic_warn("Illegal CCB #%ld status %d in " "Incoming Mailbox\n", adapter, ccb->serial, ccb->status); + blogic_warn("Illegal CCB #%ld status %d in Incoming Mailbox\n", adapter, ccb->serial, ccb->status); } } next_inbox->comp_code = BLOGIC_INBOX_FREE; @@ -2752,7 +2752,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter) if (ccb->opcode == BLOGIC_BDR) { int tgt_id = ccb->tgt_id; - blogic_warn("Bus Device Reset CCB #%ld to Target " "%d Completed\n", adapter, ccb->serial, tgt_id); + blogic_warn("Bus Device Reset CCB #%ld to Target %d Completed\n", adapter, ccb->serial, tgt_id); blogic_inc_count(&adapter->tgt_stats[tgt_id].bdr_done); adapter->tgt_flags[tgt_id].tagq_active = false; adapter->cmds_since_rst[tgt_id] = 0; @@ -2829,7 +2829,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter) if (blogic_global_options.trace_err) { int i; blogic_notice("CCB #%ld Target %d: Result %X Host " - "Adapter Status %02X " "Target Status %02X\n", adapter, ccb->serial, ccb->tgt_id, command->result, ccb->adapter_status, ccb->tgt_status); + "Adapter Status %02X Target Status %02X\n", adapter, ccb->serial, ccb->tgt_id, command->result, ccb->adapter_status, ccb->tgt_status); blogic_notice("CDB ", adapter); for (i = 0; i < ccb->cdblen; i++) blogic_notice(" %02X", adapter, ccb->cdb[i]); @@ -3203,12 +3203,12 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command, */ if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START, ccb)) { spin_unlock_irq(adapter->scsi_host->host_lock); - blogic_warn("Unable to write Outgoing Mailbox - " "Pausing for 1 second\n", adapter); + blogic_warn("Unable to write Outgoing Mailbox - Pausing for 1 second\n", adapter); blogic_delay(1); spin_lock_irq(adapter->scsi_host->host_lock); if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START, ccb)) { - blogic_warn("Still unable to write Outgoing Mailbox - " "Host Adapter Dead?\n", adapter); + blogic_warn("Still unable to write Outgoing Mailbox - Host Adapter Dead?\n", adapter); blogic_dealloc_ccb(ccb, 1); command->result = DID_ERROR << 16; command->scsi_done(command); @@ -3443,8 +3443,8 @@ static int blogic_diskparam(struct scsi_device *sdev, struct block_device *dev, if (diskparam->cylinders != saved_cyl) blogic_warn("Adopting Geometry %d/%d from Partition Table\n", adapter, diskparam->heads, diskparam->sectors); } else if (part_end_head > 0 || part_end_sector > 0) { - blogic_warn("Warning: Partition Table appears to " "have Geometry %d/%d which is\n", adapter, part_end_head + 1, part_end_sector); - blogic_warn("not compatible with current BusLogic " "Host Adapter Geometry %d/%d\n", adapter, diskparam->heads, diskparam->sectors); + blogic_warn("Warning: Partition Table appears to have Geometry %d/%d which is\n", adapter, part_end_head + 1, part_end_sector); + blogic_warn("not compatible with current BusLogic Host Adapter Geometry %d/%d\n", adapter, diskparam->heads, diskparam->sectors); } } kfree(buf); @@ -3689,7 +3689,7 @@ static int __init blogic_parseopts(char *options) blogic_probe_options.probe134 = true; break; default: - blogic_err("BusLogic: Invalid Driver Options " "(invalid I/O Address 0x%X)\n", NULL, io_addr); + blogic_err("BusLogic: Invalid Driver Options (invalid I/O Address 0x%lX)\n", NULL, io_addr); return 0; } } else if (blogic_parse(&options, "NoProbeISA")) @@ -3710,7 +3710,7 @@ static int __init blogic_parseopts(char *options) for (tgt_id = 0; tgt_id < BLOGIC_MAXDEV; tgt_id++) { unsigned short qdepth = simple_strtoul(options, &options, 0); if (qdepth > BLOGIC_MAX_TAG_DEPTH) { - blogic_err("BusLogic: Invalid Driver Options " "(invalid Queue Depth %d)\n", NULL, qdepth); + blogic_err("BusLogic: Invalid Driver Options (invalid Queue Depth %d)\n", NULL, qdepth); return 0; } drvr_opts->qdepth[tgt_id] = qdepth; @@ -3719,12 +3719,12 @@ static int __init blogic_parseopts(char *options) else if (*options == ']') break; else { - blogic_err("BusLogic: Invalid Driver Options " "(',' or ']' expected at '%s')\n", NULL, options); + blogic_err("BusLogic: Invalid Driver Options (',' or ']' expected at '%s')\n", NULL, options); return 0; } } if (*options != ']') { - blogic_err("BusLogic: Invalid Driver Options " "(']' expected at '%s')\n", NULL, options); + blogic_err("BusLogic: Invalid Driver Options (']' expected at '%s')\n", NULL, options); return 0; } else options++; @@ -3732,7 +3732,7 @@ static int __init blogic_parseopts(char *options) unsigned short qdepth = simple_strtoul(options, &options, 0); if (qdepth == 0 || qdepth > BLOGIC_MAX_TAG_DEPTH) { - blogic_err("BusLogic: Invalid Driver Options " "(invalid Queue Depth %d)\n", NULL, qdepth); + blogic_err("BusLogic: Invalid Driver Options (invalid Queue Depth %d)\n", NULL, qdepth); return 0; } drvr_opts->common_qdepth = qdepth; @@ -3778,7 +3778,7 @@ static int __init blogic_parseopts(char *options) unsigned short bus_settle_time = simple_strtoul(options, &options, 0); if (bus_settle_time > 5 * 60) { - blogic_err("BusLogic: Invalid Driver Options " "(invalid Bus Settle Time %d)\n", NULL, bus_settle_time); + blogic_err("BusLogic: Invalid Driver Options (invalid Bus Settle Time %d)\n", NULL, bus_settle_time); return 0; } drvr_opts->bus_settle_time = bus_settle_time; @@ -3803,14 +3803,14 @@ static int __init blogic_parseopts(char *options) if (*options == ',') options++; else if (*options != ';' && *options != '\0') { - blogic_err("BusLogic: Unexpected Driver Option '%s' " "ignored\n", NULL, options); + blogic_err("BusLogic: Unexpected Driver Option '%s' ignored\n", NULL, options); *options = '\0'; } } if (!(blogic_drvr_options_count == 0 || blogic_probeinfo_count == 0 || blogic_drvr_options_count == blogic_probeinfo_count)) { - blogic_err("BusLogic: Invalid Driver Options " "(all or no I/O Addresses must be specified)\n", NULL); + blogic_err("BusLogic: Invalid Driver Options (all or no I/O Addresses must be specified)\n", NULL); return 0; } /* @@ -3864,7 +3864,7 @@ static int __init blogic_setup(char *str) (void) get_options(str, ARRAY_SIZE(ints), ints); if (ints[0] != 0) { - blogic_err("BusLogic: Obsolete Command Line Entry " "Format Ignored\n", NULL); + blogic_err("BusLogic: Obsolete Command Line Entry Format Ignored\n", NULL); return 0; } if (str == NULL || *str == '\0') diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c index a9d40d3b90ef..4190a025381a 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_core.c +++ b/drivers/scsi/aic7xxx/aic7xxx_core.c @@ -2314,7 +2314,7 @@ ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, * At some speeds, we only support * ST transfers. */ - if ((syncrate->sxfr_u2 & ST_SXFR) != 0) + if ((syncrate->sxfr_u2 & ST_SXFR) != 0) *ppr_options &= ~MSG_EXT_PPR_DT_REQ; break; } diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index f5781e31f57c..d022407e5645 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c @@ -54,6 +54,9 @@ static struct scsi_host_template aic94xx_sht = { .eh_target_reset_handler = sas_eh_target_reset_handler, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = sas_ioctl, +#endif .track_queue_depth = 1, }; diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c index 76751d6c7f0d..ed5f4a6ae270 100644 --- a/drivers/scsi/ch.c +++ b/drivers/scsi/ch.c @@ -872,6 +872,10 @@ static long ch_ioctl_compat(struct file * file, unsigned int cmd, unsigned long arg) { scsi_changer *ch = file->private_data; + int retval = scsi_ioctl_block_when_processing_errors(ch->device, cmd, + file->f_flags & O_NDELAY); + if (retval) + return retval; switch (cmd) { case CHIOGPARAMS: @@ -883,7 +887,7 @@ static long ch_ioctl_compat(struct file * file, case CHIOINITELEM: case CHIOSVOLTAG: /* compatible */ - return ch_ioctl(file, cmd, arg); + return ch_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); case CHIOGSTATUS32: { struct changer_element_status32 ces32; @@ -898,8 +902,7 @@ static long ch_ioctl_compat(struct file * file, return ch_gstatus(ch, ces32.ces_type, data); } default: - // return scsi_ioctl_compat(ch->device, cmd, (void*)arg); - return -ENOIOCTLCMD; + return scsi_compat_ioctl(ch->device, cmd, compat_ptr(arg)); } } diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c index 469d0bc9f5fe..00cf33573136 100644 --- a/drivers/scsi/csiostor/csio_scsi.c +++ b/drivers/scsi/csiostor/csio_scsi.c @@ -1383,7 +1383,7 @@ csio_device_reset(struct device *dev, return -EINVAL; /* Delete NPIV lnodes */ - csio_lnodes_exit(hw, 1); + csio_lnodes_exit(hw, 1); /* Block upper IOs */ csio_lnodes_block_request(hw); diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c index bb88995a12c7..89afa31e33cb 100644 --- a/drivers/scsi/esp_scsi.c +++ b/drivers/scsi/esp_scsi.c @@ -243,8 +243,6 @@ static void esp_set_all_config3(struct esp *esp, u8 val) /* Reset the ESP chip, _not_ the SCSI bus. */ static void esp_reset_esp(struct esp *esp) { - u8 family_code, version; - /* Now reset the ESP chip */ scsi_esp_cmd(esp, ESP_CMD_RC); scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); @@ -257,14 +255,19 @@ static void esp_reset_esp(struct esp *esp) */ esp->max_period = ((35 * esp->ccycle) / 1000); if (esp->rev == FAST) { - version = esp_read8(ESP_UID); - family_code = (version & 0xf8) >> 3; - if (family_code == 0x02) + u8 family_code = ESP_FAMILY(esp_read8(ESP_UID)); + + if (family_code == ESP_UID_F236) { esp->rev = FAS236; - else if (family_code == 0x0a) + } else if (family_code == ESP_UID_HME) { esp->rev = FASHME; /* Version is usually '5'. */ - else + } else if (family_code == ESP_UID_FSC) { + esp->rev = FSC; + /* Enable Active Negation */ + esp_write8(ESP_CONFIG4_RADE, ESP_CFG4); + } else { esp->rev = FAS100A; + } esp->min_period = ((4 * esp->ccycle) / 1000); } else { esp->min_period = ((5 * esp->ccycle) / 1000); @@ -308,7 +311,7 @@ static void esp_reset_esp(struct esp *esp) case FAS236: case PCSCSI: - /* Fast 236, AM53c974 or HME */ + case FSC: esp_write8(esp->config2, ESP_CFG2); if (esp->rev == FASHME) { u8 cfg3 = esp->target[0].esp_config3; @@ -2373,10 +2376,11 @@ static const char *esp_chip_names[] = { "ESP100A", "ESP236", "FAS236", + "AM53C974", + "53CF9x-2", "FAS100A", "FAST", "FASHME", - "AM53C974", }; static struct scsi_transport_template *esp_transport_template; diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h index 91b32f2a1a1b..446a3d18c022 100644 --- a/drivers/scsi/esp_scsi.h +++ b/drivers/scsi/esp_scsi.h @@ -78,12 +78,14 @@ #define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236) */ #define ESP_CONFIG3_OBPUSH 0x80 /* Push odd-byte to dma (hme) */ -/* ESP config register 4 read-write, found only on am53c974 chips */ -#define ESP_CONFIG4_RADE 0x04 /* Active negation */ -#define ESP_CONFIG4_RAE 0x08 /* Active negation on REQ and ACK */ -#define ESP_CONFIG4_PWD 0x20 /* Reduced power feature */ -#define ESP_CONFIG4_GE0 0x40 /* Glitch eater bit 0 */ -#define ESP_CONFIG4_GE1 0x80 /* Glitch eater bit 1 */ +/* ESP config register 4 read-write */ +#define ESP_CONFIG4_BBTE 0x01 /* Back-to-back transfers (fsc) */ +#define ESP_CONGIG4_TEST 0x02 /* Transfer counter test mode (fsc) */ +#define ESP_CONFIG4_RADE 0x04 /* Active negation (am53c974/fsc) */ +#define ESP_CONFIG4_RAE 0x08 /* Act. negation REQ/ACK (am53c974) */ +#define ESP_CONFIG4_PWD 0x20 /* Reduced power feature (am53c974) */ +#define ESP_CONFIG4_GE0 0x40 /* Glitch eater bit 0 (am53c974) */ +#define ESP_CONFIG4_GE1 0x80 /* Glitch eater bit 1 (am53c974) */ #define ESP_CONFIG_GE_12NS (0) #define ESP_CONFIG_GE_25NS (ESP_CONFIG_GE1) @@ -209,10 +211,15 @@ #define ESP_TEST_TS 0x04 /* Tristate test mode */ /* ESP unique ID register read-only, found on fas236+fas100a only */ +#define ESP_UID_FAM 0xf8 /* ESP family bitmask */ + +#define ESP_FAMILY(uid) (((uid) & ESP_UID_FAM) >> 3) + +/* Values for the ESP family bits */ #define ESP_UID_F100A 0x00 /* ESP FAS100A */ #define ESP_UID_F236 0x02 /* ESP FAS236 */ -#define ESP_UID_REV 0x07 /* ESP revision */ -#define ESP_UID_FAM 0xf8 /* ESP family */ +#define ESP_UID_HME 0x0a /* FAS HME */ +#define ESP_UID_FSC 0x14 /* NCR/Symbios Logic 53CF9x-2 */ /* ESP fifo flags register read-only */ /* Note that the following implies a 16 byte FIFO on the ESP. */ @@ -257,15 +264,17 @@ struct esp_cmd_priv { }; #define ESP_CMD_PRIV(CMD) ((struct esp_cmd_priv *)(&(CMD)->SCp)) +/* NOTE: this enum is ordered based on chip features! */ enum esp_rev { - ESP100 = 0x00, /* NCR53C90 - very broken */ - ESP100A = 0x01, /* NCR53C90A */ - ESP236 = 0x02, - FAS236 = 0x03, - FAS100A = 0x04, - FAST = 0x05, - FASHME = 0x06, - PCSCSI = 0x07, /* AM53c974 */ + ESP100, /* NCR53C90 - very broken */ + ESP100A, /* NCR53C90A */ + ESP236, + FAS236, + PCSCSI, /* AM53c974 */ + FSC, /* NCR/Symbios Logic 53CF9x-2 */ + FAS100A, + FAST, + FASHME, }; struct esp_cmd_entry { diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 233c73e01246..2bdd64648ef0 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -180,10 +180,10 @@ struct hisi_sas_port { struct hisi_sas_cq { struct hisi_hba *hisi_hba; - const struct cpumask *pci_irq_mask; - struct tasklet_struct tasklet; + const struct cpumask *irq_mask; int rd_point; int id; + int irq_no; }; struct hisi_sas_dq { @@ -627,7 +627,7 @@ extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba); extern void hisi_sas_rst_work_handler(struct work_struct *work); extern void hisi_sas_sync_rst_work_handler(struct work_struct *work); -extern void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba); +extern void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba); extern void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no); extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, enum hisi_sas_phy_event event); diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 03588ec3c394..9a6deb21fe4d 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -163,13 +163,11 @@ static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) { - unsigned long flags; - if (hisi_hba->hw->slot_index_alloc || slot_idx >= HISI_SAS_UNRESERVED_IPTT) { - spin_lock_irqsave(&hisi_hba->lock, flags); + spin_lock(&hisi_hba->lock); hisi_sas_slot_index_clear(hisi_hba, slot_idx); - spin_unlock_irqrestore(&hisi_hba->lock, flags); + spin_unlock(&hisi_hba->lock); } } @@ -185,12 +183,11 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, { int index; void *bitmap = hisi_hba->slot_index_tags; - unsigned long flags; if (scsi_cmnd) return scsi_cmnd->request->tag; - spin_lock_irqsave(&hisi_hba->lock, flags); + spin_lock(&hisi_hba->lock); index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, hisi_hba->last_slot_index + 1); if (index >= hisi_hba->slot_index_count) { @@ -198,13 +195,13 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, hisi_hba->slot_index_count, HISI_SAS_UNRESERVED_IPTT); if (index >= hisi_hba->slot_index_count) { - spin_unlock_irqrestore(&hisi_hba->lock, flags); + spin_unlock(&hisi_hba->lock); return -SAS_QUEUE_FULL; } } hisi_sas_slot_index_set(hisi_hba, index); hisi_hba->last_slot_index = index; - spin_unlock_irqrestore(&hisi_hba->lock, flags); + spin_unlock(&hisi_hba->lock); return index; } @@ -220,7 +217,6 @@ static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba) void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, struct hisi_sas_slot *slot) { - unsigned long flags; int device_id = slot->device_id; struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; @@ -247,9 +243,9 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, } } - spin_lock_irqsave(&sas_dev->lock, flags); + spin_lock(&sas_dev->lock); list_del_init(&slot->entry); - spin_unlock_irqrestore(&sas_dev->lock, flags); + spin_unlock(&sas_dev->lock); memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); @@ -489,14 +485,14 @@ static int hisi_sas_task_prep(struct sas_task *task, slot_idx = rc; slot = &hisi_hba->slot_info[slot_idx]; - spin_lock_irqsave(&dq->lock, flags); + spin_lock(&dq->lock); wr_q_index = dq->wr_point; dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; list_add_tail(&slot->delivery, &dq->list); - spin_unlock_irqrestore(&dq->lock, flags); - spin_lock_irqsave(&sas_dev->lock, flags); + spin_unlock(&dq->lock); + spin_lock(&sas_dev->lock); list_add_tail(&slot->entry, &sas_dev->list); - spin_unlock_irqrestore(&sas_dev->lock, flags); + spin_unlock(&sas_dev->lock); dlvry_queue = dq->id; dlvry_queue_slot = wr_q_index; @@ -562,7 +558,6 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, { u32 rc; u32 pass = 0; - unsigned long flags; struct hisi_hba *hisi_hba; struct device *dev; struct domain_device *device = task->dev; @@ -606,9 +601,9 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, dev_err(dev, "task exec: failed[%d]!\n", rc); if (likely(pass)) { - spin_lock_irqsave(&dq->lock, flags); + spin_lock(&dq->lock); hisi_hba->hw->start_delivery(dq); - spin_unlock_irqrestore(&dq->lock, flags); + spin_unlock(&dq->lock); } return rc; @@ -659,12 +654,11 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) { struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct hisi_sas_device *sas_dev = NULL; - unsigned long flags; int last = hisi_hba->last_dev_id; int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES; int i; - spin_lock_irqsave(&hisi_hba->lock, flags); + spin_lock(&hisi_hba->lock); for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) { if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { int queue = i % hisi_hba->queue_count; @@ -684,7 +678,7 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) i++; } hisi_hba->last_dev_id = i; - spin_unlock_irqrestore(&hisi_hba->lock, flags); + spin_unlock(&hisi_hba->lock); return sas_dev; } @@ -1233,10 +1227,10 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; /* - * flush tasklet to avoid free'ing task + * sync irq to avoid free'ing task * before using task in IO completion */ - tasklet_kill(&cq->tasklet); + synchronize_irq(cq->irq_no); slot->task = NULL; } @@ -1626,11 +1620,11 @@ static int hisi_sas_abort_task(struct sas_task *task) if (slot) { /* - * flush tasklet to avoid free'ing task + * sync irq to avoid free'ing task * before using task in IO completion */ cq = &hisi_hba->cq[slot->dlvry_queue]; - tasklet_kill(&cq->tasklet); + synchronize_irq(cq->irq_no); } spin_unlock_irqrestore(&task->task_state_lock, flags); rc = TMF_RESP_FUNC_COMPLETE; @@ -1694,10 +1688,10 @@ static int hisi_sas_abort_task(struct sas_task *task) if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && task->lldd_task) { /* - * flush tasklet to avoid free'ing task + * sync irq to avoid free'ing task * before using task in IO completion */ - tasklet_kill(&cq->tasklet); + synchronize_irq(cq->irq_no); slot->task = NULL; } } @@ -1965,14 +1959,14 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, slot_idx = rc; slot = &hisi_hba->slot_info[slot_idx]; - spin_lock_irqsave(&dq->lock, flags); + spin_lock(&dq->lock); wr_q_index = dq->wr_point; dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; list_add_tail(&slot->delivery, &dq->list); - spin_unlock_irqrestore(&dq->lock, flags); - spin_lock_irqsave(&sas_dev->lock, flags); + spin_unlock(&dq->lock); + spin_lock(&sas_dev->lock); list_add_tail(&slot->entry, &sas_dev->list); - spin_unlock_irqrestore(&sas_dev->lock, flags); + spin_unlock(&sas_dev->lock); dlvry_queue = dq->id; dlvry_queue_slot = wr_q_index; @@ -2001,9 +1995,9 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, spin_unlock_irqrestore(&task->task_state_lock, flags); WRITE_ONCE(slot->ready, 1); /* send abort command to the chip */ - spin_lock_irqsave(&dq->lock, flags); + spin_lock(&dq->lock); hisi_hba->hw->start_delivery(dq); - spin_unlock_irqrestore(&dq->lock, flags); + spin_unlock(&dq->lock); return 0; @@ -2076,10 +2070,10 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; /* - * flush tasklet to avoid free'ing task + * sync irq to avoid free'ing task * before using task in IO completion */ - tasklet_kill(&cq->tasklet); + synchronize_irq(cq->irq_no); slot->task = NULL; } dev_err(dev, "internal task abort: timeout and not done.\n"); @@ -2131,7 +2125,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, case HISI_SAS_INT_ABT_DEV: for (i = 0; i < hisi_hba->cq_nvecs; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; - const struct cpumask *mask = cq->pci_irq_mask; + const struct cpumask *mask = cq->irq_mask; if (mask && !cpumask_intersects(cpu_online_mask, mask)) continue; @@ -2225,17 +2219,17 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy) } EXPORT_SYMBOL_GPL(hisi_sas_phy_down); -void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba) +void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba) { int i; for (i = 0; i < hisi_hba->cq_nvecs; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; - tasklet_kill(&cq->tasklet); + synchronize_irq(cq->irq_no); } } -EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets); +EXPORT_SYMBOL_GPL(hisi_sas_sync_irqs); int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type) { @@ -3936,7 +3930,7 @@ void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba) hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev), hisi_sas_debugfs_dir); - debugfs_create_file("trigger_dump", 0600, + debugfs_create_file("trigger_dump", 0200, hisi_hba->debugfs_dir, hisi_hba, &hisi_sas_debugfs_trigger_dump_fops); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index 3af53cc42bd6..fa25766502a2 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -1772,6 +1772,9 @@ static struct scsi_host_template sht_v1_hw = { .eh_target_reset_handler = sas_eh_target_reset_handler, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = sas_ioctl, +#endif .shost_attrs = host_attrs_v1_hw, .host_reset = hisi_sas_host_reset, }; diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 61b1e2693b08..e05faf315dcd 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -773,7 +773,6 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_device *sas_dev = device->lldd_dev; int sata_idx = sas_dev->sata_idx; int start, end; - unsigned long flags; if (!sata_dev) { /* @@ -797,12 +796,12 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, end = 64 * (sata_idx + 2); } - spin_lock_irqsave(&hisi_hba->lock, flags); + spin_lock(&hisi_hba->lock); while (1) { start = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, start); if (start >= end) { - spin_unlock_irqrestore(&hisi_hba->lock, flags); + spin_unlock(&hisi_hba->lock); return -SAS_QUEUE_FULL; } /* @@ -814,7 +813,7 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, } set_bit(start, bitmap); - spin_unlock_irqrestore(&hisi_hba->lock, flags); + spin_unlock(&hisi_hba->lock); return start; } @@ -843,9 +842,8 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device) struct hisi_sas_device *sas_dev = NULL; int i, sata_dev = dev_is_sata(device); int sata_idx = -1; - unsigned long flags; - spin_lock_irqsave(&hisi_hba->lock, flags); + spin_lock(&hisi_hba->lock); if (sata_dev) if (!sata_index_alloc_v2_hw(hisi_hba, &sata_idx)) @@ -876,7 +874,7 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device) } out: - spin_unlock_irqrestore(&hisi_hba->lock, flags); + spin_unlock(&hisi_hba->lock); return sas_dev; } @@ -3111,9 +3109,9 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p) return IRQ_HANDLED; } -static void cq_tasklet_v2_hw(unsigned long val) +static irqreturn_t cq_thread_v2_hw(int irq_no, void *p) { - struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val; + struct hisi_sas_cq *cq = p; struct hisi_hba *hisi_hba = cq->hisi_hba; struct hisi_sas_slot *slot; struct hisi_sas_itct *itct; @@ -3181,6 +3179,8 @@ static void cq_tasklet_v2_hw(unsigned long val) /* update rd_point */ cq->rd_point = rd_point; hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); + + return IRQ_HANDLED; } static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p) @@ -3191,9 +3191,7 @@ static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p) hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); - tasklet_schedule(&cq->tasklet); - - return IRQ_HANDLED; + return IRQ_WAKE_THREAD; } static irqreturn_t sata_int_v2_hw(int irq_no, void *p) @@ -3360,18 +3358,18 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba) for (queue_no = 0; queue_no < hisi_hba->queue_count; queue_no++) { struct hisi_sas_cq *cq = &hisi_hba->cq[queue_no]; - struct tasklet_struct *t = &cq->tasklet; - irq = irq_map[queue_no + 96]; - rc = devm_request_irq(dev, irq, cq_interrupt_v2_hw, 0, - DRV_NAME " cq", cq); + cq->irq_no = irq_map[queue_no + 96]; + rc = devm_request_threaded_irq(dev, cq->irq_no, + cq_interrupt_v2_hw, + cq_thread_v2_hw, IRQF_ONESHOT, + DRV_NAME " cq", cq); if (rc) { dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n", irq, rc); rc = -ENOENT; goto err_out; } - tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq); } hisi_hba->cq_nvecs = hisi_hba->queue_count; @@ -3432,7 +3430,6 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba) interrupt_disable_v2_hw(hisi_hba); hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); - hisi_sas_kill_tasklets(hisi_hba); hisi_sas_stop_phys(hisi_hba); @@ -3551,6 +3548,9 @@ static struct scsi_host_template sht_v2_hw = { .eh_target_reset_handler = sas_eh_target_reset_handler, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = sas_ioctl, +#endif .shost_attrs = host_attrs_v2_hw, .host_reset = hisi_sas_host_reset, }; @@ -3603,11 +3603,6 @@ static int hisi_sas_v2_probe(struct platform_device *pdev) static int hisi_sas_v2_remove(struct platform_device *pdev) { - struct sas_ha_struct *sha = platform_get_drvdata(pdev); - struct hisi_hba *hisi_hba = sha->lldd_ha; - - hisi_sas_kill_tasklets(hisi_hba); - return hisi_sas_remove(pdev); } diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index bf5d5f138437..a2debe0c8185 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -495,6 +495,13 @@ struct hisi_sas_err_record_v3 { #define BASE_VECTORS_V3_HW 16 #define MIN_AFFINE_VECTORS_V3_HW (BASE_VECTORS_V3_HW + 1) +#define CHNL_INT_STS_MSK 0xeeeeeeee +#define CHNL_INT_STS_PHY_MSK 0xe +#define CHNL_INT_STS_INT0_MSK BIT(1) +#define CHNL_INT_STS_INT1_MSK BIT(2) +#define CHNL_INT_STS_INT2_MSK BIT(3) +#define CHNL_WIDTH 4 + enum { DSM_FUNC_ERR_HANDLE_MSI = 0, }; @@ -1819,19 +1826,19 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) int phy_no = 0; irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) - & 0xeeeeeeee; + & CHNL_INT_STS_MSK; while (irq_msk) { - if (irq_msk & (2 << (phy_no * 4))) + if (irq_msk & (CHNL_INT_STS_INT0_MSK << (phy_no * CHNL_WIDTH))) handle_chl_int0_v3_hw(hisi_hba, phy_no); - if (irq_msk & (4 << (phy_no * 4))) + if (irq_msk & (CHNL_INT_STS_INT1_MSK << (phy_no * CHNL_WIDTH))) handle_chl_int1_v3_hw(hisi_hba, phy_no); - if (irq_msk & (8 << (phy_no * 4))) + if (irq_msk & (CHNL_INT_STS_INT2_MSK << (phy_no * CHNL_WIDTH))) handle_chl_int2_v3_hw(hisi_hba, phy_no); - irq_msk &= ~(0xe << (phy_no * 4)); + irq_msk &= ~(CHNL_INT_STS_PHY_MSK << (phy_no * CHNL_WIDTH)); phy_no++; } @@ -2299,9 +2306,9 @@ out: return sts; } -static void cq_tasklet_v3_hw(unsigned long val) +static irqreturn_t cq_thread_v3_hw(int irq_no, void *p) { - struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val; + struct hisi_sas_cq *cq = p; struct hisi_hba *hisi_hba = cq->hisi_hba; struct hisi_sas_slot *slot; struct hisi_sas_complete_v3_hdr *complete_queue; @@ -2338,6 +2345,8 @@ static void cq_tasklet_v3_hw(unsigned long val) /* update rd_point */ cq->rd_point = rd_point; hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); + + return IRQ_HANDLED; } static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p) @@ -2348,9 +2357,7 @@ static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p) hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); - tasklet_schedule(&cq->tasklet); - - return IRQ_HANDLED; + return IRQ_WAKE_THREAD; } static void setup_reply_map_v3_hw(struct hisi_hba *hisi_hba, int nvecs) @@ -2365,7 +2372,7 @@ static void setup_reply_map_v3_hw(struct hisi_hba *hisi_hba, int nvecs) BASE_VECTORS_V3_HW); if (!mask) goto fallback; - cq->pci_irq_mask = mask; + cq->irq_mask = mask; for_each_cpu(cpu, mask) hisi_hba->reply_map[cpu] = queue; } @@ -2389,6 +2396,8 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) .pre_vectors = BASE_VECTORS_V3_HW, }; + dev_info(dev, "Enable MSI auto-affinity\n"); + min_msi = MIN_AFFINE_VECTORS_V3_HW; hisi_hba->reply_map = devm_kcalloc(dev, nr_cpu_ids, @@ -2441,15 +2450,20 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) goto free_irq_vectors; } - /* Init tasklets for cq only */ + if (hisi_sas_intr_conv) + dev_info(dev, "Enable interrupt converge\n"); + for (i = 0; i < hisi_hba->cq_nvecs; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; - struct tasklet_struct *t = &cq->tasklet; int nr = hisi_sas_intr_conv ? 16 : 16 + i; - unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED : 0; - - rc = devm_request_irq(dev, pci_irq_vector(pdev, nr), - cq_interrupt_v3_hw, irqflags, + unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED : + IRQF_ONESHOT; + + cq->irq_no = pci_irq_vector(pdev, nr); + rc = devm_request_threaded_irq(dev, cq->irq_no, + cq_interrupt_v3_hw, + cq_thread_v3_hw, + irqflags, DRV_NAME " cq", cq); if (rc) { dev_err(dev, "could not request cq%d interrupt, rc=%d\n", @@ -2457,8 +2471,6 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) rc = -ENOENT; goto free_irq_vectors; } - - tasklet_init(t, cq_tasklet_v3_hw, (unsigned long)cq); } return 0; @@ -2534,7 +2546,6 @@ static int disable_host_v3_hw(struct hisi_hba *hisi_hba) interrupt_disable_v3_hw(hisi_hba); hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); - hisi_sas_kill_tasklets(hisi_hba); hisi_sas_stop_phys(hisi_hba); @@ -2910,7 +2921,7 @@ static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba) wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000); - hisi_sas_kill_tasklets(hisi_hba); + hisi_sas_sync_irqs(hisi_hba); } static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba) @@ -3075,6 +3086,9 @@ static struct scsi_host_template sht_v3_hw = { .eh_target_reset_handler = sas_eh_target_reset_handler, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = sas_ioctl, +#endif .shost_attrs = host_attrs_v3_hw, .tag_alloc_policy = BLK_TAG_ALLOC_RR, .host_reset = hisi_sas_host_reset, @@ -3309,7 +3323,6 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev) sas_remove_host(sha->core.shost); hisi_sas_v3_destroy_irqs(pdev, hisi_hba); - hisi_sas_kill_tasklets(hisi_hba); pci_release_regions(pdev); pci_disable_device(pdev); hisi_sas_free(hisi_hba); diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index 54b8c6f9daf4..d9e94e81da01 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c @@ -1877,7 +1877,6 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi) */ struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi; struct ibmvscsis_cmd *cmd, *nxt; - struct iu_entry *iue; long rc = ADAPT_SUCCESS; bool retry = false; @@ -1931,8 +1930,6 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi) */ vscsi->credit += 1; } else { - iue = cmd->iue; - crq->valid = VALID_CMD_RESP_EL; crq->format = cmd->rsp.format; @@ -3796,7 +3793,6 @@ static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd) se_cmd); struct iu_entry *iue = cmd->iue; struct scsi_info *vscsi = cmd->adapter; - char *sd; uint len = 0; int rc; @@ -3804,7 +3800,6 @@ static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd) 1); if (rc) { dev_err(&vscsi->dev, "srp_transfer_data failed: %d\n", rc); - sd = se_cmd->sense_buffer; se_cmd->scsi_sense_length = 18; memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length); /* Logical Unit Communication Time-out asc/ascq = 0x0801 */ diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c index 41fd64c9c8e9..1d39628ac947 100644 --- a/drivers/scsi/initio.c +++ b/drivers/scsi/initio.c @@ -1640,7 +1640,7 @@ static int initio_state_6(struct initio_host * host) * */ -int initio_state_7(struct initio_host * host) +static int initio_state_7(struct initio_host * host) { int cnt, i; diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 079c04bc448a..ae45cbe98ae2 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -6727,6 +6727,9 @@ static struct scsi_host_template driver_template = { .name = "IPR", .info = ipr_ioa_info, .ioctl = ipr_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = ipr_ioctl, +#endif .queuecommand = ipr_queuecommand, .eh_abort_handler = ipr_eh_abort, .eh_device_reset_handler = ipr_eh_dev_reset, diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 1727d0c71b12..b48aac8dfcb8 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -168,6 +168,9 @@ static struct scsi_host_template isci_sht = { .eh_target_reset_handler = sas_eh_target_reset_handler, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = sas_ioctl, +#endif .shost_attrs = isci_host_attrs, .track_queue_depth = 1, }; diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 0bc63a7ab41c..b5dd1caae5e9 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -887,6 +887,10 @@ free_host: static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session) { struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct iscsi_session *session = cls_session->dd_data; + + if (WARN_ON_ONCE(session->leadconn)) + return; iscsi_tcp_r2tpool_free(cls_session->dd_data); iscsi_session_teardown(cls_session); diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index e9e00740f7ca..c5a828a041e0 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -137,7 +137,7 @@ static void sas_ata_task_done(struct sas_task *task) } else { ac = sas_to_ata_err(stat); if (ac) { - pr_warn("%s: SAS error %x\n", __func__, stat->stat); + pr_warn("%s: SAS error 0x%x\n", __func__, stat->stat); /* We saw a SAS error. Send a vague error. */ if (!link->sactive) { qc->err_mask = ac; diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index d7302c2052f9..daf951b0b3f5 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -179,7 +179,7 @@ int sas_notify_lldd_dev_found(struct domain_device *dev) res = i->dft->lldd_dev_found(dev); if (res) { - pr_warn("driver on host %s cannot handle device %llx, error:%d\n", + pr_warn("driver on host %s cannot handle device %016llx, error:%d\n", dev_name(sas_ha->dev), SAS_ADDR(dev->sas_addr), res); } diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 9fdb9c9fbda4..ab671cdd4cfb 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -500,7 +500,7 @@ static int sas_ex_general(struct domain_device *dev) ex_assign_report_general(dev, rg_resp); if (dev->ex_dev.configuring) { - pr_debug("RG: ex %llx self-configuring...\n", + pr_debug("RG: ex %016llx self-configuring...\n", SAS_ADDR(dev->sas_addr)); schedule_timeout_interruptible(5*HZ); } else @@ -881,7 +881,7 @@ static struct domain_device *sas_ex_discover_end_dev( res = sas_discover_end_dev(child); if (res) { - pr_notice("sas_discover_end_dev() for device %16llx at %016llx:%02d returned 0x%x\n", + pr_notice("sas_discover_end_dev() for device %016llx at %016llx:%02d returned 0x%x\n", SAS_ADDR(child->sas_addr), SAS_ADDR(parent->sas_addr), phy_id, res); goto out_list_del; diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index 01f1738ce6df..1f1d01901978 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -107,7 +107,7 @@ static inline void sas_smp_host_handler(struct bsg_job *job, static inline void sas_fail_probe(struct domain_device *dev, const char *func, int err) { - pr_warn("%s: for %s device %16llx returned %d\n", + pr_warn("%s: for %s device %016llx returned %d\n", func, dev->parent ? "exp-attached" : "direct-attached", SAS_ADDR(dev->sas_addr), err); diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c index 7c86fd248129..19cf418928fa 100644 --- a/drivers/scsi/libsas/sas_port.c +++ b/drivers/scsi/libsas/sas_port.c @@ -165,7 +165,7 @@ static void sas_form_port(struct asd_sas_phy *phy) } sas_port_add_phy(port->port, phy->phy); - pr_debug("%s added to %s, phy_mask:0x%x (%16llx)\n", + pr_debug("%s added to %s, phy_mask:0x%x (%016llx)\n", dev_name(&phy->phy->dev), dev_name(&port->port->dev), port->phy_mask, SAS_ADDR(port->attached_sas_addr)); diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index bec83eb8ab87..9e0975e55c27 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -330,7 +330,7 @@ static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd) int_to_scsilun(cmd->device->lun, &lun); - pr_notice("eh: device %llx LUN %llx has the task\n", + pr_notice("eh: device %016llx LUN 0x%llx has the task\n", SAS_ADDR(dev->sas_addr), cmd->device->lun); @@ -615,7 +615,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head * reset: tmf_resp = sas_recover_lu(task->dev, cmd); if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { - pr_notice("dev %016llx LU %llx is recovered\n", + pr_notice("dev %016llx LU 0x%llx is recovered\n", SAS_ADDR(task->dev), cmd->device->lun); sas_eh_finish_cmd(cmd); @@ -666,7 +666,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head * * of effort could recover from errors. Quite * possibly the HA just disappeared. */ - pr_err("error from device %llx, LUN %llx couldn't be recovered in any way\n", + pr_err("error from device %016llx, LUN 0x%llx couldn't be recovered in any way\n", SAS_ADDR(task->dev->sas_addr), cmd->device->lun); @@ -851,7 +851,7 @@ int sas_slave_configure(struct scsi_device *scsi_dev) if (scsi_dev->tagged_supported) { scsi_change_queue_depth(scsi_dev, SAS_DEF_QD); } else { - pr_notice("device %llx, LUN %llx doesn't support TCQ\n", + pr_notice("device %016llx, LUN 0x%llx doesn't support TCQ\n", SAS_ADDR(dev->sas_addr), scsi_dev->lun); scsi_change_queue_depth(scsi_dev, 1); } diff --git a/drivers/scsi/libsas/sas_task.c b/drivers/scsi/libsas/sas_task.c index 1ded7d85027e..e2d42593ce52 100644 --- a/drivers/scsi/libsas/sas_task.c +++ b/drivers/scsi/libsas/sas_task.c @@ -27,7 +27,7 @@ void sas_ssp_task_response(struct device *dev, struct sas_task *task, memcpy(tstat->buf, iu->sense_data, tstat->buf_valid_size); if (iu->status != SAM_STAT_CHECK_CONDITION) - dev_warn(dev, "dev %llx sent sense data, but stat(%x) is not CHECK CONDITION\n", + dev_warn(dev, "dev %016llx sent sense data, but stat(0x%x) is not CHECK CONDITION\n", SAS_ADDR(task->dev->sas_addr), iu->status); } else diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 935f98804198..04d73e2be373 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -1223,6 +1223,8 @@ struct lpfc_hba { #define LPFC_POLL_HB 1 /* slowpath heartbeat */ #define LPFC_POLL_FASTPATH 0 /* called from fastpath */ #define LPFC_POLL_SLOWPATH 1 /* called from slowpath */ + + char os_host_name[MAXHOSTNAMELEN]; }; static inline struct Scsi_Host * diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 4ff82b36a37a..46f56f30f77e 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -4123,14 +4123,13 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr, /* * The 'topology' is not a configurable parameter if : * - persistent topology enabled - * - G7 adapters - * - G6 with no private loop support + * - G7/G6 with no private loop support */ - if (((phba->hba_flag & HBA_PERSISTENT_TOPO) || + if ((phba->hba_flag & HBA_PERSISTENT_TOPO || (!phba->sli4_hba.pc_sli4_params.pls && - phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC) || - phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) && + (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC || + phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC))) && val == 4) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3114 Loop mode not supported\n"); diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index ee353c84a097..25d3dd39bc05 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -180,7 +180,7 @@ int lpfc_issue_gidft(struct lpfc_vport *vport); int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *iocbq); int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int, uint32_t); -void lpfc_fdmi_num_disc_check(struct lpfc_vport *); +void lpfc_fdmi_change_check(struct lpfc_vport *vport); void lpfc_delayed_disc_tmo(struct timer_list *); void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 99c9bb249758..58b35a1442c1 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1493,33 +1493,35 @@ int lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol, size_t size) { - char fwrev[FW_REV_STR_SIZE]; - int n; + char fwrev[FW_REV_STR_SIZE] = {0}; + char tmp[MAXHOSTNAMELEN] = {0}; - lpfc_decode_firmware_rev(vport->phba, fwrev, 0); + memset(symbol, 0, size); - n = scnprintf(symbol, size, "Emulex %s", vport->phba->ModelName); - if (size < n) - return n; + scnprintf(tmp, sizeof(tmp), "Emulex %s", vport->phba->ModelName); + if (strlcat(symbol, tmp, size) >= size) + goto buffer_done; - n += scnprintf(symbol + n, size - n, " FV%s", fwrev); - if (size < n) - return n; + lpfc_decode_firmware_rev(vport->phba, fwrev, 0); + scnprintf(tmp, sizeof(tmp), " FV%s", fwrev); + if (strlcat(symbol, tmp, size) >= size) + goto buffer_done; - n += scnprintf(symbol + n, size - n, " DV%s.", - lpfc_release_version); - if (size < n) - return n; + scnprintf(tmp, sizeof(tmp), " DV%s", lpfc_release_version); + if (strlcat(symbol, tmp, size) >= size) + goto buffer_done; - n += scnprintf(symbol + n, size - n, " HN:%s.", - init_utsname()->nodename); - if (size < n) - return n; + scnprintf(tmp, sizeof(tmp), " HN:%s", vport->phba->os_host_name); + if (strlcat(symbol, tmp, size) >= size) + goto buffer_done; /* Note :- OS name is "Linux" */ - n += scnprintf(symbol + n, size - n, " OS:%s", - init_utsname()->sysname); - return n; + scnprintf(tmp, sizeof(tmp), " OS:%s", init_utsname()->sysname); + strlcat(symbol, tmp, size); + +buffer_done: + return strnlen(symbol, size); + } static uint32_t @@ -1998,14 +2000,16 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /** - * lpfc_fdmi_num_disc_check - Check how many mapped NPorts we are connected to + * lpfc_fdmi_change_check - Check for changed FDMI parameters * @vport: pointer to a host virtual N_Port data structure. * - * Called from hbeat timeout routine to check if the number of discovered - * ports has changed. If so, re-register thar port Attribute. + * Check how many mapped NPorts we are connected to + * Check if our hostname changed + * Called from hbeat timeout routine to check if any FDMI parameters + * changed. If so, re-register those Attributes. */ void -lpfc_fdmi_num_disc_check(struct lpfc_vport *vport) +lpfc_fdmi_change_check(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp; @@ -2018,17 +2022,41 @@ lpfc_fdmi_num_disc_check(struct lpfc_vport *vport) if (!(vport->fc_flag & FC_FABRIC)) return; + ndlp = lpfc_findnode_did(vport, FDMI_DID); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) + return; + + /* Check if system hostname changed */ + if (strcmp(phba->os_host_name, init_utsname()->nodename)) { + memset(phba->os_host_name, 0, sizeof(phba->os_host_name)); + scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s", + init_utsname()->nodename); + lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); + + /* Since this effects multiple HBA and PORT attributes, we need + * de-register and go thru the whole FDMI registration cycle. + * DHBA -> DPRT -> RHBA -> RPA (physical port) + * DPRT -> RPRT (vports) + */ + if (vport->port_type == LPFC_PHYSICAL_PORT) + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); + else + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0); + + /* Since this code path registers all the port attributes + * we can just return without further checking. + */ + return; + } + if (!(vport->fdmi_port_mask & LPFC_FDMI_PORT_ATTR_num_disc)) return; + /* Check if the number of mapped NPorts changed */ cnt = lpfc_find_map_node(vport); if (cnt == vport->fdmi_num_disc) return; - ndlp = lpfc_findnode_did(vport, FDMI_DID); - if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) - return; - if (vport->port_type == LPFC_PHYSICAL_PORT) { lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA, LPFC_FDMI_PORT_ATTR_num_disc); @@ -2616,8 +2644,8 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport, ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; memset(ae, 0, 256); - snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s", - init_utsname()->nodename); + scnprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s", + vport->phba->os_host_name); len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString)); len += (len & 3) ? (4 - (len & 3)) : 4; diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index a5ecbce4eda2..819335b16c2e 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -2085,6 +2085,8 @@ static int lpfc_debugfs_ras_log_data(struct lpfc_hba *phba, int copied = 0; struct lpfc_dmabuf *dmabuf, *next; + memset(buffer, 0, size); + spin_lock_irq(&phba->hbalock); if (phba->ras_fwlog.state != ACTIVE) { spin_unlock_irq(&phba->hbalock); @@ -2094,10 +2096,15 @@ static int lpfc_debugfs_ras_log_data(struct lpfc_hba *phba, list_for_each_entry_safe(dmabuf, next, &phba->ras_fwlog.fwlog_buff_list, list) { + /* Check if copying will go over size and a '\0' char */ + if ((copied + LPFC_RAS_MAX_ENTRY_SIZE) >= (size - 1)) { + memcpy(buffer + copied, dmabuf->virt, + size - copied - 1); + copied += size - copied - 1; + break; + } memcpy(buffer + copied, dmabuf->virt, LPFC_RAS_MAX_ENTRY_SIZE); copied += LPFC_RAS_MAX_ENTRY_SIZE; - if (size > copied) - break; } return copied; } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 85ada3deb47d..dcc8999c6a68 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -28,6 +28,7 @@ #include <linux/kthread.h> #include <linux/interrupt.h> #include <linux/lockdep.h> +#include <linux/utsname.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -3315,6 +3316,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) lpfc_sli4_clear_fcf_rr_bmask(phba); } + /* Prepare for LINK up registrations */ + memset(phba->os_host_name, 0, sizeof(phba->os_host_name)); + scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s", + init_utsname()->nodename); return; out: lpfc_vport_set_state(vport, FC_VPORT_FAILED); diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 25cdcbc2b02f..9a064b96e570 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -3925,6 +3925,9 @@ struct lpfc_mbx_wr_object { #define LPFC_CHANGE_STATUS_FW_RESET 0x02 #define LPFC_CHANGE_STATUS_PORT_MIGRATION 0x04 #define LPFC_CHANGE_STATUS_PCI_RESET 0x05 +#define lpfc_wr_object_csf_SHIFT 8 +#define lpfc_wr_object_csf_MASK 0x00000001 +#define lpfc_wr_object_csf_WORD word5 } response; } u; }; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 6a04fdb3fbf2..5a605773dd0a 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1362,7 +1362,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { lpfc_rcv_seq_check_edtov(vports[i]); - lpfc_fdmi_num_disc_check(vports[i]); + lpfc_fdmi_change_check(vports[i]); } lpfc_destroy_vport_work_array(phba, vports); @@ -8320,14 +8320,6 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) phba->hba_flag |= HBA_PERSISTENT_TOPO; switch (phba->pcidev->device) { case PCI_DEVICE_ID_LANCER_G7_FC: - if (tf || (pt == LINK_FLAGS_LOOP)) { - /* Invalid values from FW - use driver params */ - phba->hba_flag &= ~HBA_PERSISTENT_TOPO; - } else { - /* Prism only supports PT2PT topology */ - phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT; - } - break; case PCI_DEVICE_ID_LANCER_G6_FC: if (!tf) { phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) @@ -10449,6 +10441,8 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) case LPFC_SLI_INTF_IF_TYPE_6: iounmap(phba->sli4_hba.drbl_regs_memmap_p); iounmap(phba->sli4_hba.conf_regs_memmap_p); + if (phba->sli4_hba.dpp_regs_memmap_p) + iounmap(phba->sli4_hba.dpp_regs_memmap_p); break; case LPFC_SLI_INTF_IF_TYPE_1: default: diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index ae4359013846..a024e5a3918f 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -308,7 +308,7 @@ lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox) mb->mbxStatus); mempool_free(login_mbox, phba->mbox_mem_pool); mempool_free(link_mbox, phba->mbox_mem_pool); - lpfc_sli_release_iocbq(phba, save_iocb); + kfree(save_iocb); return; } @@ -325,7 +325,61 @@ lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox) } mempool_free(link_mbox, phba->mbox_mem_pool); - lpfc_sli_release_iocbq(phba, save_iocb); + kfree(save_iocb); +} + +/** + * lpfc_defer_tgt_acc - Progress SLI4 target rcv PLOGI handler + * @phba: Pointer to HBA context object. + * @pmb: Pointer to mailbox object. + * + * This function provides the unreg rpi mailbox completion handler for a tgt. + * The routine frees the memory resources associated with the completed + * mailbox command and transmits the ELS ACC. + * + * This routine is only called if we are SLI4, acting in target + * mode and the remote NPort issues the PLOGI after link up. + **/ +static void +lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_vport *vport = pmb->vport; + struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; + LPFC_MBOXQ_t *mbox = pmb->context3; + struct lpfc_iocbq *piocb = NULL; + int rc; + + if (mbox) { + pmb->context3 = NULL; + piocb = mbox->context3; + mbox->context3 = NULL; + } + + /* + * Complete the unreg rpi mbx request, and update flags. + * This will also restart any deferred events. + */ + lpfc_nlp_get(ndlp); + lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb); + + if (!piocb) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS, + "4578 PLOGI ACC fail\n"); + if (mbox) + mempool_free(mbox, phba->mbox_mem_pool); + goto out; + } + + rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox); + if (rc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS, + "4579 PLOGI ACC fail %x\n", rc); + if (mbox) + mempool_free(mbox, phba->mbox_mem_pool); + } + kfree(piocb); +out: + lpfc_nlp_put(ndlp); } static int @@ -345,6 +399,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *save_iocb; struct ls_rjt stat; uint32_t vid, flag; + u16 rpi; int rc, defer_acc; memset(&stat, 0, sizeof (struct ls_rjt)); @@ -488,7 +543,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, link_mbox->vport = vport; link_mbox->ctx_ndlp = ndlp; - save_iocb = lpfc_sli_get_iocbq(phba); + save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL); if (!save_iocb) goto out; /* Save info from cmd IOCB used in rsp */ @@ -513,7 +568,36 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, goto out; /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */ - if (phba->sli_rev == LPFC_SLI_REV4) + if (phba->nvmet_support && !defer_acc) { + link_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!link_mbox) + goto out; + + /* As unique identifiers such as iotag would be overwritten + * with those from the cmdiocb, allocate separate temporary + * storage for the copy. + */ + save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL); + if (!save_iocb) + goto out; + + /* Unreg RPI is required for SLI4. */ + rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; + lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox); + link_mbox->vport = vport; + link_mbox->ctx_ndlp = ndlp; + link_mbox->mbox_cmpl = lpfc_defer_acc_rsp; + + if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && + (!(vport->fc_flag & FC_OFFLINE_MODE))) + ndlp->nlp_flag |= NLP_UNREG_INP; + + /* Save info from cmd IOCB used in rsp */ + memcpy(save_iocb, cmdiocb, sizeof(*save_iocb)); + + /* Delay sending ACC till unreg RPI completes. */ + defer_acc = 1; + } else if (phba->sli_rev == LPFC_SLI_REV4) lpfc_unreg_rpi(vport, ndlp); rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID, @@ -553,6 +637,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if ((vport->port_type == LPFC_NPIV_PORT && vport->cfg_restrict_login)) { + /* no deferred ACC */ + kfree(save_iocb); + /* In order to preserve RPIs, we want to cleanup * the default RPI the firmware created to rcv * this ELS request. The only way to do this is @@ -571,8 +658,12 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } if (defer_acc) { /* So the order here should be: - * Issue CONFIG_LINK mbox - * CONFIG_LINK cmpl + * SLI3 pt2pt + * Issue CONFIG_LINK mbox + * CONFIG_LINK cmpl + * SLI4 tgt + * Issue UNREG RPI mbx + * UNREG RPI cmpl * Issue PLOGI ACC * PLOGI ACC cmpl * Issue REG_LOGIN mbox @@ -596,10 +687,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, out: if (defer_acc) lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "4577 pt2pt discovery failure: %p %p %p\n", + "4577 discovery failure: %p %p %p\n", save_iocb, link_mbox, login_mbox); - if (save_iocb) - lpfc_sli_release_iocbq(phba, save_iocb); + kfree(save_iocb); if (link_mbox) mempool_free(link_mbox, phba->mbox_mem_pool); if (login_mbox) diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index b138d9fee675..2c7e0b22db2f 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -481,7 +481,7 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) spin_lock(&qp->abts_io_buf_list_lock); list_for_each_entry_safe(psb, next_psb, &qp->lpfc_abts_io_buf_list, list) { - if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) + if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) continue; if (psb->rdata && psb->rdata->pnode && @@ -528,7 +528,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, list_del_init(&psb->list); psb->flags &= ~LPFC_SBUF_XBUSY; psb->status = IOSTAT_SUCCESS; - if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) { + if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) { qp->abts_nvme_io_bufs--; spin_unlock(&qp->abts_io_buf_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 625c046ac4ef..64002b0cb02d 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -4918,8 +4918,17 @@ static int lpfc_sli4_rb_setup(struct lpfc_hba *phba) { phba->hbq_in_use = 1; - phba->hbqs[LPFC_ELS_HBQ].entry_count = - lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; + /** + * Specific case when the MDS diagnostics is enabled and supported. + * The receive buffer count is truncated to manage the incoming + * traffic. + **/ + if (phba->cfg_enable_mds_diags && phba->mds_diags_support) + phba->hbqs[LPFC_ELS_HBQ].entry_count = + lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1; + else + phba->hbqs[LPFC_ELS_HBQ].entry_count = + lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; phba->hbq_count = 1; lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ); /* Initially populate or replenish the HBQs */ @@ -19449,7 +19458,7 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, struct lpfc_mbx_wr_object *wr_object; LPFC_MBOXQ_t *mbox; int rc = 0, i = 0; - uint32_t shdr_status, shdr_add_status, shdr_change_status; + uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf; uint32_t mbox_tmo; struct lpfc_dmabuf *dmabuf; uint32_t written = 0; @@ -19506,6 +19515,16 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, if (check_change_status) { shdr_change_status = bf_get(lpfc_wr_object_change_status, &wr_object->u.response); + + if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET || + shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) { + shdr_csf = bf_get(lpfc_wr_object_csf, + &wr_object->u.response); + if (shdr_csf) + shdr_change_status = + LPFC_CHANGE_STATUS_PCI_RESET; + } + switch (shdr_change_status) { case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET): lpfc_printf_log(phba, KERN_INFO, LOG_INIT, diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 9e5ff58edaca..9563c49f36ab 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "12.6.0.2" +#define LPFC_DRIVER_VERSION "12.6.0.3" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index bd8184072bed..83d8c4cb1ad5 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -21,8 +21,8 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "07.710.50.00-rc1" -#define MEGASAS_RELDATE "June 28, 2019" +#define MEGASAS_VERSION "07.713.01.00-rc1" +#define MEGASAS_RELDATE "Dec 27, 2019" #define MEGASAS_MSIX_NAME_LEN 32 @@ -2233,9 +2233,9 @@ enum MR_PD_TYPE { /* JBOD Queue depth definitions */ #define MEGASAS_SATA_QD 32 -#define MEGASAS_SAS_QD 64 +#define MEGASAS_SAS_QD 256 #define MEGASAS_DEFAULT_PD_QD 64 -#define MEGASAS_NVME_QD 32 +#define MEGASAS_NVME_QD 64 #define MR_DEFAULT_NVME_PAGE_SIZE 4096 #define MR_DEFAULT_NVME_PAGE_SHIFT 12 @@ -2640,10 +2640,11 @@ enum MEGASAS_OCR_CAUSE { }; enum DCMD_RETURN_STATUS { - DCMD_SUCCESS = 0, - DCMD_TIMEOUT = 1, - DCMD_FAILED = 2, - DCMD_NOT_FIRED = 3, + DCMD_SUCCESS = 0x00, + DCMD_TIMEOUT = 0x01, + DCMD_FAILED = 0x02, + DCMD_BUSY = 0x03, + DCMD_INIT = 0xff, }; u8 diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index c60cd9fc4240..acb82181f70f 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1099,7 +1099,7 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); - return DCMD_NOT_FIRED; + return DCMD_INIT; } instance->instancet->issue_dcmd(instance, cmd); @@ -1123,19 +1123,19 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, int timeout) { int ret = 0; - cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; + cmd->cmd_status_drv = DCMD_INIT; if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); - return DCMD_NOT_FIRED; + return DCMD_INIT; } instance->instancet->issue_dcmd(instance, cmd); if (timeout) { ret = wait_event_timeout(instance->int_cmd_wait_q, - cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); + cmd->cmd_status_drv != DCMD_INIT, timeout * HZ); if (!ret) { dev_err(&instance->pdev->dev, "DCMD(opcode: 0x%x) is timed out, func:%s\n", @@ -1144,10 +1144,9 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance, } } else wait_event(instance->int_cmd_wait_q, - cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); + cmd->cmd_status_drv != DCMD_INIT); - return (cmd->cmd_status_drv == MFI_STAT_OK) ? - DCMD_SUCCESS : DCMD_FAILED; + return cmd->cmd_status_drv; } /** @@ -1190,19 +1189,19 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); cmd->sync_cmd = 1; - cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; + cmd->cmd_status_drv = DCMD_INIT; if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); - return DCMD_NOT_FIRED; + return DCMD_INIT; } instance->instancet->issue_dcmd(instance, cmd); if (timeout) { ret = wait_event_timeout(instance->abort_cmd_wait_q, - cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); + cmd->cmd_status_drv != DCMD_INIT, timeout * HZ); if (!ret) { opcode = cmd_to_abort->frame->dcmd.opcode; dev_err(&instance->pdev->dev, @@ -1212,13 +1211,12 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, } } else wait_event(instance->abort_cmd_wait_q, - cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); + cmd->cmd_status_drv != DCMD_INIT); cmd->sync_cmd = 0; megasas_return_cmd(instance, cmd); - return (cmd->cmd_status_drv == MFI_STAT_OK) ? - DCMD_SUCCESS : DCMD_FAILED; + return cmd->cmd_status_drv; } /** @@ -1887,6 +1885,10 @@ void megasas_set_dynamic_target_properties(struct scsi_device *sdev, mr_device_priv_data->is_tm_capable = raid->capability.tmCapable; + + if (!raid->flags.isEPD) + sdev->no_write_same = 1; + } else if (instance->use_seqnum_jbod_fp) { pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; @@ -2150,6 +2152,12 @@ static void megasas_complete_outstanding_ioctls(struct megasas_instance *instanc void megaraid_sas_kill_hba(struct megasas_instance *instance) { + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { + dev_warn(&instance->pdev->dev, + "Adapter already dead, skipping kill HBA\n"); + return; + } + /* Set critical error to block I/O & ioctls in case caller didn't */ atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); /* Wait 1 second to ensure IO or ioctls in build have posted */ @@ -2726,7 +2734,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance) "reset queue\n", reset_cmd); - reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; + reset_cmd->cmd_status_drv = DCMD_INIT; instance->instancet->fire_cmd(instance, reset_cmd->frame_phys_addr, 0, instance->reg_set); @@ -3416,7 +3424,6 @@ static struct scsi_host_template megasas_template = { .bios_param = megasas_bios_param, .change_queue_depth = scsi_change_queue_depth, .max_segment_size = 0xffffffff, - .no_write_same = 1, }; /** @@ -3432,7 +3439,11 @@ static void megasas_complete_int_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { - cmd->cmd_status_drv = cmd->frame->io.cmd_status; + if (cmd->cmd_status_drv == DCMD_INIT) + cmd->cmd_status_drv = + (cmd->frame->io.cmd_status == MFI_STAT_OK) ? + DCMD_SUCCESS : DCMD_FAILED; + wake_up(&instance->int_cmd_wait_q); } @@ -3451,7 +3462,7 @@ megasas_complete_abort(struct megasas_instance *instance, { if (cmd->sync_cmd) { cmd->sync_cmd = 0; - cmd->cmd_status_drv = 0; + cmd->cmd_status_drv = DCMD_SUCCESS; wake_up(&instance->abort_cmd_wait_q); } } @@ -3727,7 +3738,7 @@ megasas_issue_pending_cmds_again(struct megasas_instance *instance) dev_notice(&instance->pdev->dev, "%p synchronous cmd" "on the internal reset queue," "issue it again.\n", cmd); - cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; + cmd->cmd_status_drv = DCMD_INIT; instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 0, instance->reg_set); @@ -4392,7 +4403,8 @@ dcmd_timeout_ocr_possible(struct megasas_instance *instance) { if (instance->adapter_type == MFI_SERIES) return KILL_ADAPTER; else if (instance->unload || - test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) + test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, + &instance->reset_flags)) return IGNORE_TIMEOUT; else return INITIATE_OCR; @@ -7593,6 +7605,7 @@ megasas_resume(struct pci_dev *pdev) struct Scsi_Host *host; struct megasas_instance *instance; int irq_flags = PCI_IRQ_LEGACY; + u32 status_reg; instance = pci_get_drvdata(pdev); @@ -7620,9 +7633,35 @@ megasas_resume(struct pci_dev *pdev) /* * We expect the FW state to be READY */ - if (megasas_transition_to_ready(instance, 0)) - goto fail_ready_state; + if (megasas_transition_to_ready(instance, 0)) { + dev_info(&instance->pdev->dev, + "Failed to transition controller to ready from %s!\n", + __func__); + if (instance->adapter_type != MFI_SERIES) { + status_reg = + instance->instancet->read_fw_status_reg(instance); + if (!(status_reg & MFI_RESET_ADAPTER) || + ((megasas_adp_reset_wait_for_ready + (instance, true, 0)) == FAILED)) + goto fail_ready_state; + } else { + atomic_set(&instance->fw_reset_no_pci_access, 1); + instance->instancet->adp_reset + (instance, instance->reg_set); + atomic_set(&instance->fw_reset_no_pci_access, 0); + + /* waiting for about 30 seconds before retry */ + ssleep(30); + + if (megasas_transition_to_ready(instance, 0)) + goto fail_ready_state; + } + + dev_info(&instance->pdev->dev, + "FW restarted successfully from %s!\n", + __func__); + } if (megasas_set_dma_mask(instance)) goto fail_set_dma_mask; @@ -8036,6 +8075,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, dma_addr_t sense_handle; unsigned long *sense_ptr; u32 opcode = 0; + int ret = DCMD_SUCCESS; memset(kbuff_arr, 0, sizeof(kbuff_arr)); @@ -8176,13 +8216,18 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, * cmd to the SCSI mid-layer */ cmd->sync_cmd = 1; - if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) { + + ret = megasas_issue_blocked_cmd(instance, cmd, 0); + switch (ret) { + case DCMD_INIT: + case DCMD_BUSY: cmd->sync_cmd = 0; dev_err(&instance->pdev->dev, "return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n", - __func__, __LINE__, cmd->frame->hdr.cmd, opcode, - cmd->cmd_status_drv); - return -EBUSY; + __func__, __LINE__, cmd->frame->hdr.cmd, opcode, + cmd->cmd_status_drv); + error = -EBUSY; + goto out; } cmd->sync_cmd = 0; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index e301458bcbae..f3b36fd0a0eb 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -364,6 +364,35 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c instance->max_fw_cmds = instance->max_fw_cmds-1; } } + +static inline void +megasas_get_msix_index(struct megasas_instance *instance, + struct scsi_cmnd *scmd, + struct megasas_cmd_fusion *cmd, + u8 data_arms) +{ + int sdev_busy; + + /* nr_hw_queue = 1 for MegaRAID */ + struct blk_mq_hw_ctx *hctx = + scmd->device->request_queue->queue_hw_ctx[0]; + + sdev_busy = atomic_read(&hctx->nr_active); + + if (instance->perf_mode == MR_BALANCED_PERF_MODE && + sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) + cmd->request_desc->SCSIIO.MSIxIndex = + mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) / + MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start); + else if (instance->msix_load_balance) + cmd->request_desc->SCSIIO.MSIxIndex = + (mega_mod64(atomic64_add_return(1, &instance->total_io_count), + instance->msix_vectors)); + else + cmd->request_desc->SCSIIO.MSIxIndex = + instance->reply_map[raw_smp_processor_id()]; +} + /** * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool * @instance: Adapter soft state @@ -1312,7 +1341,9 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) { } if (ret == DCMD_TIMEOUT) - megaraid_sas_kill_hba(instance); + dev_warn(&instance->pdev->dev, + "%s DCMD timed out, continue without JBOD sequence map\n", + __func__); if (ret == DCMD_SUCCESS) instance->pd_seq_map_id++; @@ -1394,7 +1425,9 @@ megasas_get_ld_map_info(struct megasas_instance *instance) ret = megasas_issue_polled(instance, cmd); if (ret == DCMD_TIMEOUT) - megaraid_sas_kill_hba(instance); + dev_warn(&instance->pdev->dev, + "%s DCMD timed out, RAID map is disabled\n", + __func__); megasas_return_cmd(instance, cmd); @@ -2825,19 +2858,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, fp_possible = (io_info.fpOkForIo > 0) ? true : false; } - if ((instance->perf_mode == MR_BALANCED_PERF_MODE) && - atomic_read(&scp->device->device_busy) > - (io_info.data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) - cmd->request_desc->SCSIIO.MSIxIndex = - mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) / - MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start); - else if (instance->msix_load_balance) - cmd->request_desc->SCSIIO.MSIxIndex = - (mega_mod64(atomic64_add_return(1, &instance->total_io_count), - instance->msix_vectors)); - else - cmd->request_desc->SCSIIO.MSIxIndex = - instance->reply_map[raw_smp_processor_id()]; + megasas_get_msix_index(instance, scp, cmd, io_info.data_arms); if (instance->adapter_type >= VENTURA_SERIES) { /* FP for Optimal raid level 1. @@ -3158,18 +3179,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle; - if ((instance->perf_mode == MR_BALANCED_PERF_MODE) && - atomic_read(&scmd->device->device_busy) > MR_DEVICE_HIGH_IOPS_DEPTH) - cmd->request_desc->SCSIIO.MSIxIndex = - mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) / - MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start); - else if (instance->msix_load_balance) - cmd->request_desc->SCSIIO.MSIxIndex = - (mega_mod64(atomic64_add_return(1, &instance->total_io_count), - instance->msix_vectors)); - else - cmd->request_desc->SCSIIO.MSIxIndex = - instance->reply_map[raw_smp_processor_id()]; + megasas_get_msix_index(instance, scmd, cmd, 1); if (!fp_possible) { /* system pd firmware path */ @@ -4219,7 +4229,8 @@ void megasas_reset_reply_desc(struct megasas_instance *instance) * megasas_refire_mgmt_cmd : Re-fire management commands * @instance: Controller's soft instance */ -static void megasas_refire_mgmt_cmd(struct megasas_instance *instance) +void megasas_refire_mgmt_cmd(struct megasas_instance *instance, + bool return_ioctl) { int j; struct megasas_cmd_fusion *cmd_fusion; @@ -4283,6 +4294,16 @@ static void megasas_refire_mgmt_cmd(struct megasas_instance *instance) break; } + if (return_ioctl && cmd_mfi->sync_cmd && + cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) { + dev_err(&instance->pdev->dev, + "return -EBUSY from %s %d cmd 0x%x opcode 0x%x\n", + __func__, __LINE__, cmd_mfi->frame->hdr.cmd, + le32_to_cpu(cmd_mfi->frame->dcmd.opcode)); + cmd_mfi->cmd_status_drv = DCMD_BUSY; + result = COMPLETE_CMD; + } + switch (result) { case REFIRE_CMD: megasas_fire_cmd_fusion(instance, req_desc); @@ -4298,6 +4319,37 @@ static void megasas_refire_mgmt_cmd(struct megasas_instance *instance) } /* + * megasas_return_polled_cmds: Return polled mode commands back to the pool + * before initiating an OCR. + * @instance: Controller's soft instance + */ +static void +megasas_return_polled_cmds(struct megasas_instance *instance) +{ + int i; + struct megasas_cmd_fusion *cmd_fusion; + struct fusion_context *fusion; + struct megasas_cmd *cmd_mfi; + + fusion = instance->ctrl_context; + + for (i = instance->max_scsi_cmds; i < instance->max_fw_cmds; i++) { + cmd_fusion = fusion->cmd_list[i]; + cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; + + if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) { + if (megasas_dbg_lvl & OCR_DEBUG) + dev_info(&instance->pdev->dev, + "%s %d return cmd 0x%x opcode 0x%x\n", + __func__, __LINE__, cmd_mfi->frame->hdr.cmd, + le32_to_cpu(cmd_mfi->frame->dcmd.opcode)); + cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE; + megasas_return_cmd(instance, cmd_mfi); + } + } +} + +/* * megasas_track_scsiio : Track SCSI IOs outstanding to a SCSI device * @instance: per adapter struct * @channel: the channel assigned by the OS @@ -4847,6 +4899,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) if (instance->requestorId && !instance->skip_heartbeat_timer_del) del_timer_sync(&instance->sriov_heartbeat_timer); set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); + set_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags); atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING); instance->instancet->disable_intr(instance); megasas_sync_irqs((unsigned long)instance); @@ -4951,7 +5004,9 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) goto kill_hba; } - megasas_refire_mgmt_cmd(instance); + megasas_refire_mgmt_cmd(instance, + (i == (MEGASAS_FUSION_MAX_RESET_TRIES - 1) + ? 1 : 0)); /* Reset load balance info */ if (fusion->load_balance_info) @@ -4959,8 +5014,16 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) (sizeof(struct LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT)); - if (!megasas_get_map_info(instance)) + if (!megasas_get_map_info(instance)) { megasas_sync_map_info(instance); + } else { + /* + * Return pending polled mode cmds before + * retrying OCR + */ + megasas_return_polled_cmds(instance); + continue; + } megasas_setup_jbod_map(instance); @@ -4987,6 +5050,15 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) megasas_set_dynamic_target_properties(sdev, is_target_prop); } + status_reg = instance->instancet->read_fw_status_reg + (instance); + abs_state = status_reg & MFI_STATE_MASK; + if (abs_state != MFI_STATE_OPERATIONAL) { + dev_info(&instance->pdev->dev, + "Adapter is not OPERATIONAL, state 0x%x for scsi:%d\n", + abs_state, instance->host->host_no); + goto out; + } atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); dev_info(&instance->pdev->dev, @@ -5046,7 +5118,7 @@ kill_hba: instance->skip_heartbeat_timer_del = 1; retval = FAILED; out: - clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); + clear_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags); mutex_unlock(&instance->reset_mutex); return retval; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index c013c80fe4e6..d57ecc7f88d8 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -89,6 +89,7 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE { #define MEGASAS_FP_CMD_LEN 16 #define MEGASAS_FUSION_IN_RESET 0 +#define MEGASAS_FUSION_OCR_NOT_POSSIBLE 1 #define RAID_1_PEER_CMDS 2 #define JBOD_MAPS_COUNT 2 #define MEGASAS_REDUCE_QD_COUNT 64 @@ -864,9 +865,20 @@ struct MR_LD_RAID { u8 regTypeReqOnRead; __le16 seqNum; - struct { - u32 ldSyncRequired:1; - u32 reserved:31; +struct { +#ifndef MFI_BIG_ENDIAN + u32 ldSyncRequired:1; + u32 regTypeReqOnReadIsValid:1; + u32 isEPD:1; + u32 enableSLDOnAllRWIOs:1; + u32 reserved:28; +#else + u32 reserved:28; + u32 enableSLDOnAllRWIOs:1; + u32 isEPD:1; + u32 regTypeReqOnReadIsValid:1; + u32 ldSyncRequired:1; +#endif } flags; u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */ diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h index 18b1e31b5eb8..ed3923f8db4f 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2.h @@ -122,6 +122,9 @@ * 08-28-18 02.00.53 Bumped MPI2_HEADER_VERSION_UNIT. * Added MPI2_IOCSTATUS_FAILURE * 12-17-18 02.00.54 Bumped MPI2_HEADER_VERSION_UNIT + * 06-24-19 02.00.55 Bumped MPI2_HEADER_VERSION_UNIT + * 08-01-19 02.00.56 Bumped MPI2_HEADER_VERSION_UNIT + * 10-02-19 02.00.57 Bumped MPI2_HEADER_VERSION_UNIT * -------------------------------------------------------------------------- */ @@ -162,7 +165,7 @@ /* Unit and Dev versioning for this MPI header set */ -#define MPI2_HEADER_VERSION_UNIT (0x36) +#define MPI2_HEADER_VERSION_UNIT (0x39) #define MPI2_HEADER_VERSION_DEV (0x00) #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) @@ -181,6 +184,7 @@ #define MPI2_IOC_STATE_READY (0x10000000) #define MPI2_IOC_STATE_OPERATIONAL (0x20000000) #define MPI2_IOC_STATE_FAULT (0x40000000) +#define MPI2_IOC_STATE_COREDUMP (0x50000000) #define MPI2_IOC_STATE_MASK (0xF0000000) #define MPI2_IOC_STATE_SHIFT (28) diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h index 3a6871aecada..43a3bf8ff428 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h @@ -249,6 +249,8 @@ * 08-28-18 02.00.46 Added NVMs Write Cache flag to IOUnitPage1 * Added DMDReport Delay Time defines to PCIeIOUnitPage1 * 12-17-18 02.00.47 Swap locations of Slotx2 and Slotx4 in ManPage 7. + * 08-01-19 02.00.49 Add MPI26_MANPAGE7_FLAG_X2_X4_SLOT_INFO_VALID + * Add MPI26_IOUNITPAGE1_NVME_WRCACHE_SHIFT */ #ifndef MPI2_CNFG_H @@ -891,6 +893,8 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7 { #define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002) #define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001) +#define MPI26_MANPAGE7_FLAG_CONN_LANE_USE_PINOUT (0x00000020) +#define MPI26_MANPAGE7_FLAG_X2_X4_SLOT_INFO_VALID (0x00000010) /* *Generic structure to use for product-specific manufacturing pages @@ -962,9 +966,10 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 { /* IO Unit Page 1 Flags defines */ #define MPI26_IOUNITPAGE1_NVME_WRCACHE_MASK (0x00030000) -#define MPI26_IOUNITPAGE1_NVME_WRCACHE_ENABLE (0x00000000) -#define MPI26_IOUNITPAGE1_NVME_WRCACHE_DISABLE (0x00010000) -#define MPI26_IOUNITPAGE1_NVME_WRCACHE_NO_CHANGE (0x00020000) +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_SHIFT (16) +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_NO_CHANGE (0x00000000) +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_ENABLE (0x00010000) +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_DISABLE (0x00020000) #define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK (0x00004000) #define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE (0x00002000) #define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH (0x00001000) @@ -3931,7 +3936,13 @@ typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_2 { U32 MaximumDataTransferSize; /*0x0C */ U32 Capabilities; /*0x10 */ U16 NOIOB; /* 0x14 */ - U16 Reserved2; /* 0x16 */ + U16 ShutdownLatency; /* 0x16 */ + U16 VendorID; /* 0x18 */ + U16 DeviceID; /* 0x1A */ + U16 SubsystemVendorID; /* 0x1C */ + U16 SubsystemID; /* 0x1E */ + U8 RevisionID; /* 0x20 */ + U8 Reserved21[3]; /* 0x21 */ } MPI26_CONFIG_PAGE_PCIEDEV_2, *PTR_MPI26_CONFIG_PAGE_PCIEDEV_2, Mpi26PCIeDevicePage2_t, *pMpi26PCIeDevicePage2_t; diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_image.h b/drivers/scsi/mpt3sas/mpi/mpi2_image.h index a3f677853098..33b9c3a6fd40 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_image.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_image.h @@ -19,6 +19,10 @@ * 09-07-18 02.06.03 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES * 12-17-18 02.06.04 Addd MPI2_EXT_IMAGE_TYPE_PBLP * Shorten some defines to be compatible with DOS + * 06-24-19 02.06.05 Whitespace adjustments to help with identifier + * checking tool. + * 10-02-19 02.06.06 Added MPI26_IMAGE_HEADER_SIG1_COREDUMP + * Added MPI2_FLASH_REGION_COREDUMP */ #ifndef MPI2_IMAGE_H #define MPI2_IMAGE_H @@ -213,6 +217,8 @@ typedef struct _MPI26_COMPONENT_IMAGE_HEADER { #define MPI26_IMAGE_HEADER_SIG1_NVDATA (0x5444564E) #define MPI26_IMAGE_HEADER_SIG1_GAS_GAUGE (0x20534147) #define MPI26_IMAGE_HEADER_SIG1_PBLP (0x504C4250) +/* little-endian "DUMP" */ +#define MPI26_IMAGE_HEADER_SIG1_COREDUMP (0x504D5544) /**** Definitions for Signature2 field ****/ #define MPI26_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546) @@ -359,6 +365,7 @@ typedef struct _MPI2_FLASH_LAYOUT_DATA { #define MPI2_FLASH_REGION_MR_NVDATA (0x14) #define MPI2_FLASH_REGION_CPLD (0x15) #define MPI2_FLASH_REGION_PSOC (0x16) +#define MPI2_FLASH_REGION_COREDUMP (0x17) /*ImageRevision */ #define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00) diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h index 68ea408cd5c5..e83c7c529dc9 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h @@ -175,6 +175,10 @@ * Moved FW image definitions ionto new mpi2_image,h * 08-14-18 02.00.36 Fixed definition of MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16) * 09-07-18 02.00.37 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES + * 10-02-19 02.00.38 Added MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE + * Added MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED + * Added MPI2_FW_DOWNLOAD_ITYPE_COREDUMP + * Added MPI2_FW_UPLOAD_ITYPE_COREDUMP * -------------------------------------------------------------------------- */ @@ -248,6 +252,7 @@ typedef struct _MPI2_IOC_INIT_REQUEST { /*ConfigurationFlags */ #define MPI26_IOCINIT_CFGFLAGS_NVME_SGL_FORMAT (0x0001) +#define MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE (0x0002) /*minimum depth for a Reply Descriptor Post Queue */ #define MPI2_RDPQ_DEPTH_MIN (16) @@ -377,6 +382,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY { /*ProductID field uses MPI2_FW_HEADER_PID_ */ /*IOCCapabilities */ +#define MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED (0x00200000) #define MPI26_IOCFACTS_CAPABILITY_PCIE_SRIOV (0x00100000) #define MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000) #define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000) @@ -1458,8 +1464,8 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST { /*MPI v2.6 and newer */ #define MPI2_FW_DOWNLOAD_ITYPE_CPLD (0x15) #define MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16) +#define MPI2_FW_DOWNLOAD_ITYPE_COREDUMP (0x17) #define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0) -#define MPI2_FW_DOWNLOAD_ITYPE_TERMINATE (0xFF) /*MPI v2.0 FWDownload TransactionContext Element */ typedef struct _MPI2_FW_DOWNLOAD_TCSGE { diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 45fd8dfb7c40..663782bb790d 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -124,7 +124,14 @@ enum mpt3sas_perf_mode { }; static int +_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, + u32 ioc_state, int timeout); +static int _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc); +static void +_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc); +static void +_base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc); /** * mpt3sas_base_check_cmd_timeout - Function @@ -609,7 +616,8 @@ _base_fault_reset_work(struct work_struct *work) spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); - if (ioc->shost_recovery || ioc->pci_error_recovery) + if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) || + ioc->pci_error_recovery) goto rearm_timer; spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); @@ -656,20 +664,64 @@ _base_fault_reset_work(struct work_struct *work) return; /* don't rearm timer */ } - ioc->non_operational_loop = 0; + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) { + u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ? + ioc->manu_pg11.CoreDumpTOSec : + MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS; + + timeout /= (FAULT_POLLING_INTERVAL/1000); + + if (ioc->ioc_coredump_loop == 0) { + mpt3sas_print_coredump_info(ioc, + doorbell & MPI2_DOORBELL_DATA_MASK); + /* do not accept any IOs and disable the interrupts */ + spin_lock_irqsave( + &ioc->ioc_reset_in_progress_lock, flags); + ioc->shost_recovery = 1; + spin_unlock_irqrestore( + &ioc->ioc_reset_in_progress_lock, flags); + _base_mask_interrupts(ioc); + _base_clear_outstanding_commands(ioc); + } + + ioc_info(ioc, "%s: CoreDump loop %d.", + __func__, ioc->ioc_coredump_loop); + /* Wait until CoreDump completes or times out */ + if (ioc->ioc_coredump_loop++ < timeout) { + spin_lock_irqsave( + &ioc->ioc_reset_in_progress_lock, flags); + goto rearm_timer; + } + } + + if (ioc->ioc_coredump_loop) { + if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_COREDUMP) + ioc_err(ioc, "%s: CoreDump completed. LoopCount: %d", + __func__, ioc->ioc_coredump_loop); + else + ioc_err(ioc, "%s: CoreDump Timed out. LoopCount: %d", + __func__, ioc->ioc_coredump_loop); + ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE; + } + ioc->non_operational_loop = 0; if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) { rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); ioc_warn(ioc, "%s: hard reset: %s\n", __func__, rc == 0 ? "success" : "failed"); doorbell = mpt3sas_base_get_iocstate(ioc, 0); - if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) - mpt3sas_base_fault_info(ioc, doorbell & + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, doorbell & + MPI2_DOORBELL_DATA_MASK); + } else if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) + mpt3sas_print_coredump_info(ioc, doorbell & MPI2_DOORBELL_DATA_MASK); if (rc && (doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) return; /* don't rearm timer */ } + ioc->ioc_coredump_loop = 0; spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); rearm_timer: @@ -749,6 +801,49 @@ mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code) } /** + * mpt3sas_base_coredump_info - verbose translation of firmware CoreDump state + * @ioc: per adapter object + * @fault_code: fault code + * + * Return nothing. + */ +void +mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code) +{ + ioc_err(ioc, "coredump_state(0x%04x)!\n", fault_code); +} + +/** + * mpt3sas_base_wait_for_coredump_completion - Wait until coredump + * completes or times out + * @ioc: per adapter object + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc, + const char *caller) +{ + u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ? + ioc->manu_pg11.CoreDumpTOSec : + MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS; + + int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT, + timeout); + + if (ioc_state) + ioc_err(ioc, + "%s: CoreDump timed out. (ioc_state=0x%x)\n", + caller, ioc_state); + else + ioc_info(ioc, + "%s: CoreDump completed. (ioc_state=0x%x)\n", + caller, ioc_state); + + return ioc_state; +} + +/** * mpt3sas_halt_firmware - halt's mpt controller firmware * @ioc: per adapter object * @@ -768,9 +863,14 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc) dump_stack(); doorbell = ioc->base_readl(&ioc->chip->Doorbell); - if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) - mpt3sas_base_fault_info(ioc , doorbell); - else { + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, doorbell & + MPI2_DOORBELL_DATA_MASK); + } else if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + mpt3sas_print_coredump_info(ioc, doorbell & + MPI2_DOORBELL_DATA_MASK); + } else { writel(0xC0FFEE00, &ioc->chip->Doorbell); ioc_err(ioc, "Firmware is halted due to command timeout\n"); } @@ -3103,6 +3203,8 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) */ if (!ioc->combined_reply_queue && ioc->hba_mpi_version_belonged != MPI2_VERSION) { + ioc_info(ioc, + "combined ReplyQueue is off, Enabling msix load balance\n"); ioc->msix_load_balance = true; } @@ -3115,9 +3217,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) r = _base_alloc_irq_vectors(ioc); if (r < 0) { - dfailprintk(ioc, - ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", - r)); + ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r); goto try_ioapic; } @@ -3206,9 +3306,15 @@ _base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc) dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state)); if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { - mpt3sas_base_fault_info(ioc, ioc_state & + mpt3sas_print_fault_code(ioc, ioc_state & MPI2_DOORBELL_DATA_MASK); rc = _base_diag_reset(ioc); + } else if ((ioc_state & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + mpt3sas_print_coredump_info(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + mpt3sas_base_wait_for_coredump_completion(ioc, __func__); + rc = _base_diag_reset(ioc); } return rc; @@ -3279,7 +3385,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) } if (ioc->chip == NULL) { - ioc_err(ioc, "unable to map adapter memory! or resource not found\n"); + ioc_err(ioc, + "unable to map adapter memory! or resource not found\n"); r = -EINVAL; goto out_fail; } @@ -3318,8 +3425,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) ioc->combined_reply_index_count, sizeof(resource_size_t *), GFP_KERNEL); if (!ioc->replyPostRegisterIndex) { - dfailprintk(ioc, - ioc_warn(ioc, "allocation for reply Post Register Index failed!!!\n")); + ioc_err(ioc, + "allocation for replyPostRegisterIndex failed!\n"); r = -ENOMEM; goto out_fail; } @@ -3467,6 +3574,22 @@ _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc, } /** + * _base_sdev_nr_inflight_request -get number of inflight requests + * of a request queue. + * @q: request_queue object + * + * returns number of inflight request of a request queue. + */ +inline unsigned long +_base_sdev_nr_inflight_request(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[0]; + + return atomic_read(&hctx->nr_active); +} + + +/** * _base_get_high_iops_msix_index - get the msix index of * high iops queues * @ioc: per adapter object @@ -3485,7 +3608,7 @@ _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc, * reply queues in terms of batch count 16 when outstanding * IOs on the target device is >=8. */ - if (atomic_read(&scmd->device->device_busy) > + if (_base_sdev_nr_inflight_request(scmd->device->request_queue) > MPT3SAS_DEVICE_HIGH_IOPS_DEPTH) return base_mod64(( atomic64_add_return(1, &ioc->high_iops_outstanding) / @@ -4264,7 +4387,8 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc) fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length, &fwpkg_data_dma, GFP_KERNEL); if (!fwpkg_data) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", + ioc_err(ioc, + "Memory allocation for fwpkg data failed at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -ENOMEM; } @@ -4994,12 +5118,13 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; } - dinitprintk(ioc, - ioc_info(ioc, "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), chains_per_io(%d)\n", - ioc->max_sges_in_main_message, - ioc->max_sges_in_chain_message, - ioc->shost->sg_tablesize, - ioc->chains_needed_per_io)); + ioc_info(ioc, + "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), " + "sge_per_io(%d), chains_per_io(%d)\n", + ioc->max_sges_in_main_message, + ioc->max_sges_in_chain_message, + ioc->shost->sg_tablesize, + ioc->chains_needed_per_io); /* reply post queue, 16 byte align */ reply_post_free_sz = ioc->reply_post_queue_depth * @@ -5109,15 +5234,13 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth * ioc->request_sz); - dinitprintk(ioc, - ioc_info(ioc, "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n", - ioc->request, ioc->hba_queue_depth, - ioc->request_sz, - (ioc->hba_queue_depth * ioc->request_sz) / 1024)); + ioc_info(ioc, + "request pool(0x%p) - dma(0x%llx): " + "depth(%d), frame_size(%d), pool_size(%d kB)\n", + ioc->request, (unsigned long long) ioc->request_dma, + ioc->hba_queue_depth, ioc->request_sz, + (ioc->hba_queue_depth * ioc->request_sz) / 1024); - dinitprintk(ioc, - ioc_info(ioc, "request pool: dma(0x%llx)\n", - (unsigned long long)ioc->request_dma)); total_sz += sz; dinitprintk(ioc, @@ -5302,13 +5425,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) goto out; } } - dinitprintk(ioc, - ioc_info(ioc, "sense pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n", - ioc->sense, ioc->scsiio_depth, - SCSI_SENSE_BUFFERSIZE, sz / 1024)); - dinitprintk(ioc, - ioc_info(ioc, "sense_dma(0x%llx)\n", - (unsigned long long)ioc->sense_dma)); + ioc_info(ioc, + "sense pool(0x%p)- dma(0x%llx): depth(%d)," + "element_size(%d), pool_size(%d kB)\n", + ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth, + SCSI_SENSE_BUFFERSIZE, sz / 1024); + total_sz += sz; /* reply pool, 4 byte align */ @@ -5386,12 +5508,10 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) ioc_err(ioc, "config page: dma_pool_alloc failed\n"); goto out; } - dinitprintk(ioc, - ioc_info(ioc, "config page(0x%p): size(%d)\n", - ioc->config_page, ioc->config_page_sz)); - dinitprintk(ioc, - ioc_info(ioc, "config_page_dma(0x%llx)\n", - (unsigned long long)ioc->config_page_dma)); + + ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n", + ioc->config_page, (unsigned long long)ioc->config_page_dma, + ioc->config_page_sz); total_sz += ioc->config_page_sz; ioc_info(ioc, "Allocated physical memory: size(%d kB)\n", @@ -5446,6 +5566,8 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout) return 0; if (count && current_state == MPI2_IOC_STATE_FAULT) break; + if (count && current_state == MPI2_IOC_STATE_COREDUMP) + break; usleep_range(1000, 1500); count++; @@ -5547,7 +5669,12 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout) doorbell = ioc->base_readl(&ioc->chip->Doorbell); if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { - mpt3sas_base_fault_info(ioc , doorbell); + mpt3sas_print_fault_code(ioc, doorbell); + return -EFAULT; + } + if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + mpt3sas_print_coredump_info(ioc, doorbell); return -EFAULT; } } else if (int_status == 0xFFFFFFFF) @@ -5609,6 +5736,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout) { u32 ioc_state; int r = 0; + unsigned long flags; if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) { ioc_err(ioc, "%s: unknown reset_type\n", __func__); @@ -5627,6 +5755,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout) r = -EFAULT; goto out; } + ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout); if (ioc_state) { ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n", @@ -5635,6 +5764,26 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout) goto out; } out: + if (r != 0) { + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + /* + * Wait for IOC state CoreDump to clear only during + * HBA initialization & release time. + */ + if ((ioc_state & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 || + ioc->fault_reset_work_q == NULL)) { + spin_unlock_irqrestore( + &ioc->ioc_reset_in_progress_lock, flags); + mpt3sas_print_coredump_info(ioc, ioc_state); + mpt3sas_base_wait_for_coredump_completion(ioc, + __func__); + spin_lock_irqsave( + &ioc->ioc_reset_in_progress_lock, flags); + } + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + } ioc_info(ioc, "message unit reset: %s\n", r == 0 ? "SUCCESS" : "FAILED"); return r; @@ -5782,7 +5931,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, mfp = (__le32 *)reply; pr_info("\toffset:data\n"); for (i = 0; i < reply_bytes/4; i++) - pr_info("\t[0x%02x]:%08x\n", i*4, + ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4, le32_to_cpu(mfp[i])); } return 0; @@ -5850,10 +5999,9 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, ioc->ioc_link_reset_in_progress) ioc->ioc_link_reset_in_progress = 0; if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->base_cmds.status, mpi_request, - sizeof(Mpi2SasIoUnitControlRequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status, + mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)/4, + issue_reset); goto issue_host_reset; } if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) @@ -5926,10 +6074,9 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, wait_for_completion_timeout(&ioc->base_cmds.done, msecs_to_jiffies(10000)); if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->base_cmds.status, mpi_request, - sizeof(Mpi2SepRequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, + ioc->base_cmds.status, mpi_request, + sizeof(Mpi2SepRequest_t)/4, issue_reset); goto issue_host_reset; } if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) @@ -6028,9 +6175,15 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout) } if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { - mpt3sas_base_fault_info(ioc, ioc_state & + mpt3sas_print_fault_code(ioc, ioc_state & MPI2_DOORBELL_DATA_MASK); goto issue_diag_reset; + } else if ((ioc_state & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + ioc_info(ioc, + "%s: Skipping the diag reset here. (ioc_state=0x%x)\n", + __func__, ioc_state); + return -EFAULT; } ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout); @@ -6209,6 +6362,12 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma); } + /* + * Set the flag to enable CoreDump state feature in IOC firmware. + */ + mpi_request.ConfigurationFlags |= + cpu_to_le16(MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE); + /* This time stamp specifies number of milliseconds * since epoch ~ midnight January 1, 1970. */ @@ -6220,9 +6379,9 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) int i; mfp = (__le32 *)&mpi_request; - pr_info("\toffset:data\n"); + ioc_info(ioc, "\toffset:data\n"); for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++) - pr_info("\t[0x%02x]:%08x\n", i*4, + ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4, le32_to_cpu(mfp[i])); } @@ -6592,8 +6751,11 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc) /* wait 100 msec */ msleep(100); - if (count++ > 20) + if (count++ > 20) { + ioc_info(ioc, + "Stop writing magic sequence after 20 retries\n"); goto out; + } host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic); drsprintk(ioc, @@ -6617,8 +6779,11 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc) host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic); - if (host_diagnostic == 0xFFFFFFFF) + if (host_diagnostic == 0xFFFFFFFF) { + ioc_info(ioc, + "Invalid host diagnostic register value\n"); goto out; + } if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER)) break; @@ -6705,16 +6870,33 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type) return 0; if (ioc_state & MPI2_DOORBELL_USED) { - dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n")); + ioc_info(ioc, "unexpected doorbell active!\n"); goto issue_diag_reset; } if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { - mpt3sas_base_fault_info(ioc, ioc_state & + mpt3sas_print_fault_code(ioc, ioc_state & MPI2_DOORBELL_DATA_MASK); goto issue_diag_reset; } + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) { + /* + * if host reset is invoked while watch dog thread is waiting + * for IOC state to be changed to Fault state then driver has + * to wait here for CoreDump state to clear otherwise reset + * will be issued to the FW and FW move the IOC state to + * reset state without copying the FW logs to coredump region. + */ + if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) { + mpt3sas_print_coredump_info(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + mpt3sas_base_wait_for_coredump_completion(ioc, + __func__); + } + goto issue_diag_reset; + } + if (type == FORCE_BIG_HAMMER) goto issue_diag_reset; @@ -6958,8 +7140,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL); ioc->reply_queue_count = 1; if (!ioc->cpu_msix_table) { - dfailprintk(ioc, - ioc_info(ioc, "allocation for cpu_msix_table failed!!!\n")); + ioc_info(ioc, "Allocation for cpu_msix_table failed!!!\n"); r = -ENOMEM; goto out_free_resources; } @@ -6968,8 +7149,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz, sizeof(resource_size_t *), GFP_KERNEL); if (!ioc->reply_post_host_index) { - dfailprintk(ioc, - ioc_info(ioc, "allocation for reply_post_host_index failed!!!\n")); + ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n"); r = -ENOMEM; goto out_free_resources; } @@ -7195,6 +7375,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) sizeof(struct mpt3sas_facts)); ioc->non_operational_loop = 0; + ioc->ioc_coredump_loop = 0; ioc->got_task_abort_from_ioctl = 0; return 0; @@ -7276,14 +7457,14 @@ static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) } /** - * _base_after_reset_handler - after reset handler + * _base_clear_outstanding_mpt_commands - clears outstanding mpt commands * @ioc: per adapter object */ -static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc) +static void +_base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc) { - mpt3sas_scsih_after_reset_handler(ioc); - mpt3sas_ctl_after_reset_handler(ioc); - dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__)); + dtmprintk(ioc, + ioc_info(ioc, "%s: clear outstanding mpt cmds\n", __func__)); if (ioc->transport_cmds.status & MPT3_CMD_PENDING) { ioc->transport_cmds.status |= MPT3_CMD_RESET; mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid); @@ -7317,6 +7498,17 @@ static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc) } /** + * _base_clear_outstanding_commands - clear all outstanding commands + * @ioc: per adapter object + */ +static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc) +{ + mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc); + mpt3sas_ctl_clear_outstanding_ioctls(ioc); + _base_clear_outstanding_mpt_commands(ioc); +} + +/** * _base_reset_done_handler - reset done handler * @ioc: per adapter object */ @@ -7474,7 +7666,9 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, MPT3_DIAG_BUFFER_IS_RELEASED))) { is_trigger = 1; ioc_state = mpt3sas_base_get_iocstate(ioc, 0); - if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT || + (ioc_state & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) is_fault = 1; } _base_pre_reset_handler(ioc); @@ -7483,7 +7677,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, r = _base_make_ioc_ready(ioc, type); if (r) goto out; - _base_after_reset_handler(ioc); + _base_clear_outstanding_commands(ioc); /* If this hard reset is called while port enable is active, then * there is no reason to call make_ioc_operational @@ -7514,9 +7708,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, _base_reset_done_handler(ioc); out: - dtmprintk(ioc, - ioc_info(ioc, "%s: %s\n", - __func__, r == 0 ? "SUCCESS" : "FAILED")); + ioc_info(ioc, "%s: %s\n", __func__, r == 0 ? "SUCCESS" : "FAILED"); spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); ioc->shost_recovery = 0; diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 4ebf81ea4d2f..e7197150721f 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -76,8 +76,8 @@ #define MPT3SAS_DRIVER_NAME "mpt3sas" #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" -#define MPT3SAS_DRIVER_VERSION "32.100.00.00" -#define MPT3SAS_MAJOR_VERSION 32 +#define MPT3SAS_DRIVER_VERSION "33.100.00.00" +#define MPT3SAS_MAJOR_VERSION 33 #define MPT3SAS_MINOR_VERSION 100 #define MPT3SAS_BUILD_VERSION 0 #define MPT3SAS_RELEASE_VERSION 00 @@ -90,6 +90,10 @@ #define MPT2SAS_BUILD_VERSION 0 #define MPT2SAS_RELEASE_VERSION 00 +/* CoreDump: Default timeout */ +#define MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS (15) /*15 seconds*/ +#define MPT3SAS_COREDUMP_LOOP_DONE (0xFF) + /* * Set MPT3SAS_SG_DEPTH value based on user input. */ @@ -140,6 +144,7 @@ #define MAX_CHAIN_ELEMT_SZ 16 #define DEFAULT_NUM_FWCHAIN_ELEMTS 8 +#define IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT 6 #define FW_IMG_HDR_READ_TIMEOUT 15 #define IOC_OPERATIONAL_WAIT_COUNT 10 @@ -398,7 +403,10 @@ struct Mpi2ManufacturingPage11_t { u8 HostTraceBufferFlags; /* 4Fh */ u16 HostTraceBufferMaxSizeKB; /* 50h */ u16 HostTraceBufferMinSizeKB; /* 52h */ - __le32 Reserved10[2]; /* 54h - 5Bh */ + u8 CoreDumpTOSec; /* 54h */ + u8 Reserved8; /* 55h */ + u16 Reserved9; /* 56h */ + __le32 Reserved10; /* 58h */ }; /** @@ -589,6 +597,7 @@ static inline void sas_device_put(struct _sas_device *s) * @connector_name: ASCII value of the Connector's name * @serial_number: pointer of serial number string allocated runtime * @access_status: Device's Access Status + * @shutdown_latency: NVMe device's RTD3 Entry Latency * @refcount: reference count for deletion */ struct _pcie_device { @@ -611,6 +620,7 @@ struct _pcie_device { u8 *serial_number; u8 reset_timeout; u8 access_status; + u16 shutdown_latency; struct kref refcount; }; /** @@ -1045,6 +1055,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); * @cpu_msix_table: table for mapping cpus to msix index * @cpu_msix_table_sz: table size * @total_io_cnt: Gives total IO count, used to load balance the interrupts + * @ioc_coredump_loop: will have non-zero value when FW is in CoreDump state * @high_iops_outstanding: used to load balance the interrupts * within high iops reply queues * @msix_load_balance: Enables load balancing of interrupts across @@ -1073,6 +1084,10 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); * @event_context: unique id for each logged event * @event_log: event log pointer * @event_masks: events that are masked + * @max_shutdown_latency: timeout value for NVMe shutdown operation, + * which is equal that NVMe drive's RTD3 Entry Latency + * which has reported maximum RTD3 Entry Latency value + * among attached NVMe drives. * @facts: static facts data * @prev_fw_facts: previous fw facts data * @pfacts: static port facts data @@ -1231,6 +1246,7 @@ struct MPT3SAS_ADAPTER { u32 ioc_reset_count; MPT3SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds; u32 non_operational_loop; + u8 ioc_coredump_loop; atomic64_t total_io_cnt; atomic64_t high_iops_outstanding; bool msix_load_balance; @@ -1283,7 +1299,7 @@ struct MPT3SAS_ADAPTER { u8 tm_custom_handling; u8 nvme_abort_timeout; - + u16 max_shutdown_latency; /* static config pages */ struct mpt3sas_facts facts; @@ -1531,6 +1547,17 @@ void *mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked); void mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code); +#define mpt3sas_print_fault_code(ioc, fault_code) \ +do { pr_err("%s fault info from func: %s\n", ioc->name, __func__); \ + mpt3sas_base_fault_info(ioc, fault_code); } while (0) + +void mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code); +#define mpt3sas_print_coredump_info(ioc, fault_code) \ +do { pr_err("%s fault info from func: %s\n", ioc->name, __func__); \ + mpt3sas_base_coredump_info(ioc, fault_code); } while (0) + +int mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc, + const char *caller); int mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, Mpi2SasIoUnitControlReply_t *mpi_reply, Mpi2SasIoUnitControlRequest_t *mpi_request); @@ -1552,6 +1579,11 @@ mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc); u8 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc, u8 status, void *mpi_request, int sz); +#define mpt3sas_check_cmd_timeout(ioc, status, mpi_request, sz, issue_reset) \ +do { ioc_err(ioc, "In func: %s\n", __func__); \ + issue_reset = mpt3sas_base_check_cmd_timeout(ioc, \ + status, mpi_request, sz); } while (0) + int mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int wait_count); /* scsih shared API */ @@ -1560,7 +1592,8 @@ struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply); void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc); -void mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_scsih_clear_outstanding_scsi_tm_commands( + struct MPT3SAS_ADAPTER *ioc); void mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc); int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun, @@ -1694,7 +1727,7 @@ void mpt3sas_ctl_exit(ushort hbas_to_enumerate); u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply); void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc); -void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc); void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc); u8 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply); diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c index 14a1a2793dd5..62ddf53ab3ae 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_config.c +++ b/drivers/scsi/mpt3sas/mpt3sas_config.c @@ -101,9 +101,6 @@ _config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, Mpi2ConfigRequest_t *mpi_request; char *desc = NULL; - if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) - return; - mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); switch (mpi_request->Header.PageType & MPI2_CONFIG_PAGETYPE_MASK) { case MPI2_CONFIG_PAGETYPE_IO_UNIT: @@ -269,7 +266,8 @@ mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, mpi_reply->MsgLength*4); } ioc->config_cmds.status &= ~MPT3_CMD_PENDING; - _config_display_some_debug(ioc, smid, "config_done", mpi_reply); + if (ioc->logging_level & MPT_DEBUG_CONFIG) + _config_display_some_debug(ioc, smid, "config_done", mpi_reply); ioc->config_cmds.smid = USHRT_MAX; complete(&ioc->config_cmds.done); return 1; @@ -305,6 +303,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t u8 retry_count, issue_host_reset = 0; struct config_request mem; u32 ioc_status = UINT_MAX; + u8 issue_reset = 0; mutex_lock(&ioc->config_cmds.mutex); if (ioc->config_cmds.status != MPT3_CMD_NOT_USED) { @@ -378,14 +377,18 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t config_request = mpt3sas_base_get_msg_frame(ioc, smid); ioc->config_cmds.smid = smid; memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t)); - _config_display_some_debug(ioc, smid, "config_request", NULL); + if (ioc->logging_level & MPT_DEBUG_CONFIG) + _config_display_some_debug(ioc, smid, "config_request", NULL); init_completion(&ioc->config_cmds.done); ioc->put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ); if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) { - mpt3sas_base_check_cmd_timeout(ioc, - ioc->config_cmds.status, mpi_request, - sizeof(Mpi2ConfigRequest_t)/4); + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, + smid, "config_request", NULL); + mpt3sas_check_cmd_timeout(ioc, + ioc->config_cmds.status, mpi_request, + sizeof(Mpi2ConfigRequest_t)/4, issue_reset); retry_count++; if (ioc->config_cmds.smid == smid) mpt3sas_base_free_smid(ioc, smid); @@ -404,8 +407,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t /* Reply Frame Sanity Checks to workaround FW issues */ if ((mpi_request->Header.PageType & 0xF) != (mpi_reply->Header.PageType & 0xF)) { + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, + smid, "config_request", NULL); _debug_dump_mf(mpi_request, ioc->request_sz/4); - _debug_dump_reply(mpi_reply, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->reply_sz/4); panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n", ioc->name, __func__, mpi_request->Header.PageType & 0xF, @@ -415,8 +421,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t if (((mpi_request->Header.PageType & 0xF) == MPI2_CONFIG_PAGETYPE_EXTENDED) && mpi_request->ExtPageType != mpi_reply->ExtPageType) { + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, + smid, "config_request", NULL); _debug_dump_mf(mpi_request, ioc->request_sz/4); - _debug_dump_reply(mpi_reply, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->reply_sz/4); panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n", ioc->name, __func__, mpi_request->ExtPageType, @@ -439,8 +448,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t if (p) { if ((mpi_request->Header.PageType & 0xF) != (p[3] & 0xF)) { + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, + smid, "config_request", NULL); _debug_dump_mf(mpi_request, ioc->request_sz/4); - _debug_dump_reply(mpi_reply, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->reply_sz/4); _debug_dump_config(p, min_t(u16, mem.sz, config_page_sz)/4); panic("%s: %s: Firmware BUG: config page mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n", @@ -452,8 +464,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t if (((mpi_request->Header.PageType & 0xF) == MPI2_CONFIG_PAGETYPE_EXTENDED) && (mpi_request->ExtPageType != p[6])) { + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, + smid, "config_request", NULL); _debug_dump_mf(mpi_request, ioc->request_sz/4); - _debug_dump_reply(mpi_reply, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->reply_sz/4); _debug_dump_config(p, min_t(u16, mem.sz, config_page_sz)/4); panic("%s: %s: Firmware BUG: config page mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n", diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 6874cf017739..62e552838565 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -180,6 +180,12 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, case MPI2_FUNCTION_SMP_PASSTHROUGH: desc = "smp_passthrough"; break; + case MPI2_FUNCTION_TOOLBOX: + desc = "toolbox"; + break; + case MPI2_FUNCTION_NVME_ENCAPSULATED: + desc = "nvme_encapsulated"; + break; } if (!desc) @@ -478,14 +484,15 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) } /** - * mpt3sas_ctl_reset_handler - reset callback handler (for ctl) + * mpt3sas_ctl_reset_handler - clears outstanding ioctl cmd. * @ioc: per adapter object * * The handler for doing any required cleanup or initialization. */ -void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc) +void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc) { - dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__)); + dtmprintk(ioc, + ioc_info(ioc, "%s: clear outstanding ioctl cmd\n", __func__)); if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) { ioc->ctl_cmds.status |= MPT3_CMD_RESET; mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid); @@ -1021,10 +1028,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, ioc->ignore_loginfos = 0; } if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->ctl_cmds.status, mpi_request, - karg.data_sge_offset); + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + karg.data_sge_offset, issue_reset); goto issue_host_reset; } @@ -1325,7 +1331,8 @@ _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg) __func__)); retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); - ioc_info(ioc, "host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED")); + ioc_info(ioc, + "Ioctl: host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED")); return 0; } @@ -1733,10 +1740,9 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->ctl_cmds.status, mpi_request, - sizeof(Mpi2DiagBufferPostRequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset); goto issue_host_reset; } @@ -2108,6 +2114,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, u16 ioc_status; u32 ioc_state; int rc; + u8 reset_needed = 0; dctlprintk(ioc, ioc_info(ioc, "%s\n", __func__)); @@ -2115,6 +2122,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, rc = 0; *issue_reset = 0; + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { if (ioc->diag_buffer_status[buffer_type] & @@ -2157,9 +2165,10 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { - *issue_reset = mpt3sas_base_check_cmd_timeout(ioc, - ioc->ctl_cmds.status, mpi_request, - sizeof(Mpi2DiagReleaseRequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + sizeof(Mpi2DiagReleaseRequest_t)/4, reset_needed); + *issue_reset = reset_needed; rc = -EFAULT; goto out; } @@ -2417,10 +2426,9 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->ctl_cmds.status, mpi_request, - sizeof(Mpi2DiagBufferPostRequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset); goto issue_host_reset; } diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index a038be8a0e90..c597d544eb39 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -1050,6 +1050,34 @@ mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) } /** + * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency. + * @ioc: per adapter object + * Context: This function will acquire ioc->pcie_device_lock + * + * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency + * which has reported maximum among all available NVMe drives. + * Minimum max_shutdown_latency will be six seconds. + */ +static void +_scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc) +{ + struct _pcie_device *pcie_device; + unsigned long flags; + u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { + if (pcie_device->shutdown_latency) { + if (shutdown_latency < pcie_device->shutdown_latency) + shutdown_latency = + pcie_device->shutdown_latency; + } + } + ioc->max_shutdown_latency = shutdown_latency; + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); +} + +/** * _scsih_pcie_device_remove - remove pcie_device from list. * @ioc: per adapter object * @pcie_device: the pcie_device object @@ -1063,6 +1091,7 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc, { unsigned long flags; int was_on_pcie_device_list = 0; + u8 update_latency = 0; if (!pcie_device) return; @@ -1082,11 +1111,21 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc, list_del_init(&pcie_device->list); was_on_pcie_device_list = 1; } + if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) + update_latency = 1; spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); if (was_on_pcie_device_list) { kfree(pcie_device->serial_number); pcie_device_put(pcie_device); } + + /* + * This device's RTD3 Entry Latency matches IOC's + * max_shutdown_latency. Recalculate IOC's max_shutdown_latency + * from the available drives as current drive is getting removed. + */ + if (update_latency) + _scsih_set_nvme_max_shutdown_latency(ioc); } @@ -1101,6 +1140,7 @@ _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) struct _pcie_device *pcie_device; unsigned long flags; int was_on_pcie_device_list = 0; + u8 update_latency = 0; if (ioc->shost_recovery) return; @@ -1113,12 +1153,22 @@ _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) was_on_pcie_device_list = 1; pcie_device_put(pcie_device); } + if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) + update_latency = 1; } spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); if (was_on_pcie_device_list) { _scsih_pcie_device_remove_from_sml(ioc, pcie_device); pcie_device_put(pcie_device); } + + /* + * This device's RTD3 Entry Latency matches IOC's + * max_shutdown_latency. Recalculate IOC's max_shutdown_latency + * from the available drives as current drive is getting removed. + */ + if (update_latency) + _scsih_set_nvme_max_shutdown_latency(ioc); } /** @@ -1554,7 +1604,12 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) max_depth = 1; if (qdepth > max_depth) qdepth = max_depth; - return scsi_change_queue_depth(sdev, qdepth); + scsi_change_queue_depth(sdev, qdepth); + sdev_printk(KERN_INFO, sdev, + "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n", + sdev->queue_depth, sdev->tagged_supported, + sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1)); + return sdev->queue_depth; } /** @@ -2673,6 +2728,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun, u16 smid = 0; u32 ioc_state; int rc; + u8 issue_reset = 0; lockdep_assert_held(&ioc->tm_cmds.mutex); @@ -2695,7 +2751,13 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun, } if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { - mpt3sas_base_fault_info(ioc, ioc_state & + mpt3sas_print_fault_code(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return (!rc) ? SUCCESS : FAILED; + } else if ((ioc_state & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + mpt3sas_print_coredump_info(ioc, ioc_state & MPI2_DOORBELL_DATA_MASK); rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); return (!rc) ? SUCCESS : FAILED; @@ -2726,9 +2788,10 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun, ioc->put_smid_hi_priority(ioc, smid, msix_task); wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) { - if (mpt3sas_base_check_cmd_timeout(ioc, - ioc->tm_cmds.status, mpi_request, - sizeof(Mpi2SCSITaskManagementRequest_t)/4)) { + mpt3sas_check_cmd_timeout(ioc, + ioc->tm_cmds.status, mpi_request, + sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset); + if (issue_reset) { rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); rc = (!rc) ? SUCCESS : FAILED; @@ -2875,15 +2938,17 @@ scsih_abort(struct scsi_cmnd *scmd) u8 timeout = 30; struct _pcie_device *pcie_device = NULL; - sdev_printk(KERN_INFO, scmd->device, - "attempting task abort! scmd(%p)\n", scmd); + sdev_printk(KERN_INFO, scmd->device, "attempting task abort!" + "scmd(0x%p), outstanding for %u ms & timeout %u ms\n", + scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc), + (scmd->request->timeout / HZ) * 1000); _scsih_tm_display_info(ioc, scmd); sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target || ioc->remove_host) { sdev_printk(KERN_INFO, scmd->device, - "device been deleted! scmd(%p)\n", scmd); + "device been deleted! scmd(0x%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); r = SUCCESS; @@ -2892,6 +2957,8 @@ scsih_abort(struct scsi_cmnd *scmd) /* check for completed command */ if (st == NULL || st->cb_idx == 0xFF) { + sdev_printk(KERN_INFO, scmd->device, "No reference found at " + "driver, assuming scmd(0x%p) might have completed\n", scmd); scmd->result = DID_RESET << 16; r = SUCCESS; goto out; @@ -2920,7 +2987,7 @@ scsih_abort(struct scsi_cmnd *scmd) if (r == SUCCESS && st->cb_idx != 0xFF) r = FAILED; out: - sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", + sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n", ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); if (pcie_device) pcie_device_put(pcie_device); @@ -2949,14 +3016,14 @@ scsih_dev_reset(struct scsi_cmnd *scmd) struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; sdev_printk(KERN_INFO, scmd->device, - "attempting device reset! scmd(%p)\n", scmd); + "attempting device reset! scmd(0x%p)\n", scmd); _scsih_tm_display_info(ioc, scmd); sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target || ioc->remove_host) { sdev_printk(KERN_INFO, scmd->device, - "device been deleted! scmd(%p)\n", scmd); + "device been deleted! scmd(0x%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); r = SUCCESS; @@ -2996,7 +3063,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd) if (r == SUCCESS && atomic_read(&scmd->device->device_busy)) r = FAILED; out: - sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", + sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n", ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); if (sas_device) @@ -3027,15 +3094,15 @@ scsih_target_reset(struct scsi_cmnd *scmd) struct scsi_target *starget = scmd->device->sdev_target; struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; - starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n", - scmd); + starget_printk(KERN_INFO, starget, + "attempting target reset! scmd(0x%p)\n", scmd); _scsih_tm_display_info(ioc, scmd); sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target || ioc->remove_host) { - starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n", - scmd); + starget_printk(KERN_INFO, starget, + "target been deleted! scmd(0x%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); r = SUCCESS; @@ -3074,7 +3141,7 @@ scsih_target_reset(struct scsi_cmnd *scmd) if (r == SUCCESS && atomic_read(&starget->target_busy)) r = FAILED; out: - starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", + starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n", ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); if (sas_device) @@ -3097,7 +3164,7 @@ scsih_host_reset(struct scsi_cmnd *scmd) struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); int r, retval; - ioc_info(ioc, "attempting host reset! scmd(%p)\n", scmd); + ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd); scsi_print_command(scmd); if (ioc->is_driver_loading || ioc->remove_host) { @@ -3109,7 +3176,7 @@ scsih_host_reset(struct scsi_cmnd *scmd) retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); r = (retval < 0) ? FAILED : SUCCESS; out: - ioc_info(ioc, "host reset: %s scmd(%p)\n", + ioc_info(ioc, "host reset: %s scmd(0x%p)\n", r == SUCCESS ? "SUCCESS" : "FAILED", scmd); return r; @@ -4475,6 +4542,7 @@ static void _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, Mpi2EventDataTemperature_t *event_data) { + u32 doorbell; if (ioc->temp_sensors_count >= event_data->SensorNum) { ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n", le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ", @@ -4484,6 +4552,18 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, event_data->SensorNum); ioc_err(ioc, "Current Temp In Celsius: %d\n", event_data->CurrentTemperature); + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { + doorbell = mpt3sas_base_get_iocstate(ioc, 0); + if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, + doorbell & MPI2_DOORBELL_DATA_MASK); + } else if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + mpt3sas_print_coredump_info(ioc, + doorbell & MPI2_DOORBELL_DATA_MASK); + } + } } } @@ -6933,6 +7013,16 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) le32_to_cpu(pcie_device_pg0.DeviceInfo)))) { pcie_device->nvme_mdts = le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize); + pcie_device->shutdown_latency = + le16_to_cpu(pcie_device_pg2.ShutdownLatency); + /* + * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency + * if drive's RTD3 Entry Latency is greater then IOC's + * max_shutdown_latency. + */ + if (pcie_device->shutdown_latency > ioc->max_shutdown_latency) + ioc->max_shutdown_latency = + pcie_device->shutdown_latency; if (pcie_device_pg2.ControllerResetTO) pcie_device->reset_timeout = pcie_device_pg2.ControllerResetTO; @@ -7669,10 +7759,9 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->scsih_cmds.status, mpi_request, - sizeof(Mpi2RaidActionRequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, + ioc->scsih_cmds.status, mpi_request, + sizeof(Mpi2RaidActionRequest_t)/4, issue_reset); rc = -EFAULT; goto out; } @@ -9272,15 +9361,17 @@ void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) } /** - * mpt3sas_scsih_after_reset_handler - reset callback handler (for scsih) + * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding + * scsi & tm cmds. * @ioc: per adapter object * * The handler for doing any required cleanup or initialization. */ void -mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc) +mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc) { - dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__)); + dtmprintk(ioc, + ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__)); if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) { ioc->scsih_cmds.status |= MPT3_CMD_RESET; mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid); @@ -9357,6 +9448,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) } _scsih_remove_unresponding_devices(ioc); _scsih_scan_for_devices_after_reset(ioc); + _scsih_set_nvme_max_shutdown_latency(ioc); break; case MPT3SAS_PORT_ENABLE_COMPLETE: ioc->start_scan = 0; @@ -9660,6 +9752,75 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, } /** + * _scsih_nvme_shutdown - NVMe shutdown notification + * @ioc: per adapter object + * + * Sending IoUnitControl request with shutdown operation code to alert IOC that + * the host system is shutting down so that IOC can issue NVMe shutdown to + * NVMe drives attached to it. + */ +static void +_scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi26IoUnitControlRequest_t *mpi_request; + Mpi26IoUnitControlReply_t *mpi_reply; + u16 smid; + + /* are there any NVMe devices ? */ + if (list_empty(&ioc->pcie_device_list)) + return; + + mutex_lock(&ioc->scsih_cmds.mutex); + + if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); + goto out; + } + + ioc->scsih_cmds.status = MPT3_CMD_PENDING; + + smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + ioc_err(ioc, + "%s: failed obtaining a smid\n", __func__); + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + goto out; + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t)); + mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL; + mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN; + + init_completion(&ioc->scsih_cmds.done); + ioc->put_smid_default(ioc, smid); + /* Wait for max_shutdown_latency seconds */ + ioc_info(ioc, + "Io Unit Control shutdown (sending), Shutdown latency %d sec\n", + ioc->max_shutdown_latency); + wait_for_completion_timeout(&ioc->scsih_cmds.done, + ioc->max_shutdown_latency*HZ); + + if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { + ioc_err(ioc, "%s: timeout\n", __func__); + goto out; + } + + if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { + mpi_reply = ioc->scsih_cmds.reply; + ioc_info(ioc, "Io Unit Control shutdown (complete):" + "ioc_status(0x%04x), loginfo(0x%08x)\n", + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); + } + out: + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + mutex_unlock(&ioc->scsih_cmds.mutex); +} + + +/** * _scsih_ir_shutdown - IR shutdown notification * @ioc: per adapter object * @@ -9851,6 +10012,7 @@ scsih_shutdown(struct pci_dev *pdev) &ioc->ioc_pg1_copy); _scsih_ir_shutdown(ioc); + _scsih_nvme_shutdown(ioc); mpt3sas_base_detach(ioc); } @@ -10533,6 +10695,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; ioc->logging_level = logging_level; ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds; + /* Host waits for minimum of six seconds */ + ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT; /* * Enable MEMORY MOVE support flag. */ @@ -10681,6 +10845,7 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state) mpt3sas_base_stop_watchdog(ioc); flush_scheduled_work(); scsi_block_requests(shost); + _scsih_nvme_shutdown(ioc); device_state = pci_choose_state(pdev, state); ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n", pdev, pci_name(pdev), device_state); @@ -10715,7 +10880,7 @@ scsih_resume(struct pci_dev *pdev) r = mpt3sas_base_map_resources(ioc); if (r) return r; - + ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n"); mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET); scsi_unblock_requests(shost); mpt3sas_base_start_watchdog(ioc); @@ -10784,6 +10949,7 @@ scsih_pci_slot_reset(struct pci_dev *pdev) if (rc) return PCI_ERS_RESULT_DISCONNECT; + ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n"); rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); ioc_warn(ioc, "hard reset: %s\n", diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index 5324662751bf..6ec5b7f33dfd 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -719,11 +719,10 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, sas_device_put(sas_device); } - if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) - dev_printk(KERN_INFO, &rphy->dev, - "add: handle(0x%04x), sas_addr(0x%016llx)\n", - handle, (unsigned long long) - mpt3sas_port->remote_identify.sas_address); + dev_info(&rphy->dev, + "add: handle(0x%04x), sas_addr(0x%016llx)\n", handle, + (unsigned long long)mpt3sas_port->remote_identify.sas_address); + mpt3sas_port->rphy = rphy; spin_lock_irqsave(&ioc->sas_node_lock, flags); list_add_tail(&mpt3sas_port->port_list, &sas_node->sas_port_list); @@ -813,6 +812,8 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, } if (!ioc->remove_host) sas_port_delete(mpt3sas_port->port); + ioc_info(ioc, "%s: removed: sas_addr(0x%016llx)\n", + __func__, (unsigned long long)sas_address); kfree(mpt3sas_port); } diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index da719b0694dc..7af9173c4925 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -47,6 +47,9 @@ static struct scsi_host_template mvs_sht = { .eh_target_reset_handler = sas_eh_target_reset_handler, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = sas_ioctl, +#endif .shost_attrs = mvst_host_attrs, .track_queue_depth = 1, }; diff --git a/drivers/scsi/myrb.h b/drivers/scsi/myrb.h index 9289c19fcb2f..fb8eacfceee8 100644 --- a/drivers/scsi/myrb.h +++ b/drivers/scsi/myrb.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * +/* SPDX-License-Identifier: GPL-2.0 */ +/* * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers * * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com> diff --git a/drivers/scsi/myrs.h b/drivers/scsi/myrs.h index e6702ee85e9f..9f6696d0ddd5 100644 --- a/drivers/scsi/myrs.h +++ b/drivers/scsi/myrs.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * +/* SPDX-License-Identifier: GPL-2.0 */ +/* * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers * * This driver supports the newer, SCSI-based firmware interface only. diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index ff618ad80ebd..3c6076e4c6d2 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -101,6 +101,9 @@ static struct scsi_host_template pm8001_sht = { .eh_target_reset_handler = sas_eh_target_reset_handler, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = sas_ioctl, +#endif .shost_attrs = pm8001_host_attrs, .track_queue_depth = 1, }; diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 832af4213046..3337cd341d21 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -1699,6 +1699,16 @@ qla1280_load_firmware_pio(struct scsi_qla_host *ha) return err; } +#ifdef QLA_64BIT_PTR +#define LOAD_CMD MBC_LOAD_RAM_A64_ROM +#define DUMP_CMD MBC_DUMP_RAM_A64_ROM +#define CMD_ARGS (BIT_7 | BIT_6 | BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0) +#else +#define LOAD_CMD MBC_LOAD_RAM +#define DUMP_CMD MBC_DUMP_RAM +#define CMD_ARGS (BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0) +#endif + #define DUMP_IT_BACK 0 /* for debug of RISC loading */ static int qla1280_load_firmware_dma(struct scsi_qla_host *ha) @@ -1748,7 +1758,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha) for(i = 0; i < cnt; i++) ((__le16 *)ha->request_ring)[i] = fw_data[i]; - mb[0] = MBC_LOAD_RAM; + mb[0] = LOAD_CMD; mb[1] = risc_address; mb[4] = cnt; mb[3] = ha->request_dma & 0xffff; @@ -1759,8 +1769,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha) __func__, mb[0], (void *)(long)ha->request_dma, mb[6], mb[7], mb[2], mb[3]); - err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 | - BIT_1 | BIT_0, mb); + err = qla1280_mailbox_command(ha, CMD_ARGS, mb); if (err) { printk(KERN_ERR "scsi(%li): Failed to load partial " "segment of f\n", ha->host_no); @@ -1768,7 +1777,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha) } #if DUMP_IT_BACK - mb[0] = MBC_DUMP_RAM; + mb[0] = DUMP_CMD; mb[1] = risc_address; mb[4] = cnt; mb[3] = p_tbuf & 0xffff; @@ -1776,8 +1785,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha) mb[7] = upper_32_bits(p_tbuf) & 0xffff; mb[6] = upper_32_bits(p_tbuf) >> 16; - err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 | - BIT_1 | BIT_0, mb); + err = qla1280_mailbox_command(ha, CMD_ARGS, mb); if (err) { printk(KERN_ERR "Failed to dump partial segment of f/w\n"); diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h index a1a8aefc7cc3..e7820b5bca38 100644 --- a/drivers/scsi/qla1280.h +++ b/drivers/scsi/qla1280.h @@ -277,6 +277,8 @@ struct device_reg { #define MBC_MAILBOX_REGISTER_TEST 6 /* Wrap incoming mailboxes */ #define MBC_VERIFY_CHECKSUM 7 /* Verify checksum */ #define MBC_ABOUT_FIRMWARE 8 /* Get firmware revision */ +#define MBC_LOAD_RAM_A64_ROM 9 /* Load RAM 64bit ROM version */ +#define MBC_DUMP_RAM_A64_ROM 0x0a /* Dump RAM 64bit ROM version */ #define MBC_INIT_REQUEST_QUEUE 0x10 /* Initialize request queue */ #define MBC_INIT_RESPONSE_QUEUE 0x11 /* Initialize response queue */ #define MBC_EXECUTE_IOCB 0x12 /* Execute IOCB command */ diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index cbaf178fc979..d7169e43f5e1 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -54,7 +54,8 @@ void qla2x00_bsg_sp_free(srb_t *sp) if (sp->type == SRB_CT_CMD || sp->type == SRB_FXIOCB_BCMD || sp->type == SRB_ELS_CMD_HST) - kfree(sp->fcport); + qla2x00_free_fcport(sp->fcport); + qla2x00_rel_sp(sp); } @@ -405,7 +406,7 @@ done_unmap_sg: done_free_fcport: if (bsg_request->msgcode == FC_BSG_RPT_ELS) - kfree(fcport); + qla2x00_free_fcport(fcport); done: return rval; } @@ -545,7 +546,7 @@ qla2x00_process_ct(struct bsg_job *bsg_job) return rval; done_free_fcport: - kfree(fcport); + qla2x00_free_fcport(fcport); done_unmap_sg: dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); @@ -796,7 +797,7 @@ qla2x00_process_loopback(struct bsg_job *bsg_job) if (atomic_read(&vha->loop_state) == LOOP_READY && (ha->current_topology == ISP_CFG_F || - (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE && + (get_unaligned_le32(req_data) == ELS_OPCODE_BYTE && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && elreq.options == EXTERNAL_LOOPBACK) { type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; @@ -2049,7 +2050,7 @@ qlafx00_mgmt_cmd(struct bsg_job *bsg_job) return rval; done_free_fcport: - kfree(fcport); + qla2x00_free_fcport(fcport); done_unmap_rsp_sg: if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 30afc59c1870..e5500bba06ca 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -18,7 +18,7 @@ * | Device Discovery | 0x2134 | 0x210e-0x2116 | * | | | 0x211a | * | | | 0x211c-0x2128 | - * | | | 0x212a-0x2130 | + * | | | 0x212a-0x2134 | * | Queue Command and IO tracing | 0x3074 | 0x300b | * | | | 0x3027-0x3028 | * | | | 0x303d-0x3041 | diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 2edd9f7b3074..ed32e9715794 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2402,6 +2402,7 @@ typedef struct fc_port { unsigned int scan_needed:1; unsigned int n2n_flag:1; unsigned int explicit_logout:1; + unsigned int prli_pend_timer:1; struct completion nvme_del_done; uint32_t nvme_prli_service_param; @@ -2428,6 +2429,7 @@ typedef struct fc_port { struct work_struct free_work; struct work_struct reg_work; uint64_t jiffies_at_registration; + unsigned long prli_expired; struct qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX]; uint16_t tgt_id; @@ -2464,6 +2466,7 @@ typedef struct fc_port { struct qla_tgt_sess *tgt_session; struct ct_sns_desc ct_desc; enum discovery_state disc_state; + atomic_t shadow_disc_state; enum discovery_state next_disc_state; enum login_state fw_login_state; unsigned long dm_login_expire; @@ -2510,6 +2513,19 @@ struct event_arg { extern const char *const port_state_str[5]; +static const char * const port_dstate_str[] = { + "DELETED", + "GNN_ID", + "GNL", + "LOGIN_PEND", + "LOGIN_FAILED", + "GPDB", + "UPD_FCPORT", + "LOGIN_COMPLETE", + "ADISC", + "DELETE_PEND" +}; + /* * FC port flags. */ @@ -3263,7 +3279,6 @@ enum qla_work_type { QLA_EVT_IDC_ACK, QLA_EVT_ASYNC_LOGIN, QLA_EVT_ASYNC_LOGOUT, - QLA_EVT_ASYNC_LOGOUT_DONE, QLA_EVT_ASYNC_ADISC, QLA_EVT_UEVENT, QLA_EVT_AENFX, @@ -3953,7 +3968,7 @@ struct qla_hw_data { void *sfp_data; dma_addr_t sfp_data_dma; - void *flt; + struct qla_flt_header *flt; dma_addr_t flt_dma; #define XGMAC_DATA_SIZE 4096 @@ -4845,6 +4860,9 @@ struct sff_8247_a0 { (ha->fc4_type_priority == FC4_PRIORITY_NVME)) || \ NVME_ONLY_TARGET(fcport)) \ +#define PRLI_PHASE(_cls) \ + ((_cls == DSC_LS_PRLI_PEND) || (_cls == DSC_LS_PRLI_COMP)) + #include "qla_target.h" #include "qla_gbl.h" #include "qla_dbg.h" diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 9dc09c117416..d641918cdd46 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -1354,12 +1354,12 @@ struct vp_rpt_id_entry_24xx { uint8_t port_id[3]; uint8_t format; union { - struct { + struct _f0 { /* format 0 loop */ uint8_t vp_idx_map[16]; uint8_t reserved_4[32]; } f0; - struct { + struct _f1 { /* format 1 fabric */ uint8_t vpstat1_subcode; /* vp_status=1 subcode */ uint8_t flags; @@ -1381,21 +1381,22 @@ struct vp_rpt_id_entry_24xx { uint16_t bbcr; uint8_t reserved_5[6]; } f1; - struct { /* format 2: N2N direct connect */ - uint8_t vpstat1_subcode; - uint8_t flags; - uint16_t rsv6; - uint8_t rsv2[12]; - - uint8_t ls_rjt_vendor; - uint8_t ls_rjt_explanation; - uint8_t ls_rjt_reason; - uint8_t rsv3[5]; - - uint8_t port_name[8]; - uint8_t node_name[8]; - uint8_t remote_nport_id[4]; - uint32_t reserved_5; + struct _f2 { /* format 2: N2N direct connect */ + uint8_t vpstat1_subcode; + uint8_t flags; + uint16_t fip_flags; + uint8_t rsv2[12]; + + uint8_t ls_rjt_vendor; + uint8_t ls_rjt_explanation; + uint8_t ls_rjt_reason; + uint8_t rsv3[5]; + + uint8_t port_name[8]; + uint8_t node_name[8]; + uint16_t bbcr; + uint8_t reserved_5[2]; + uint8_t remote_nport_id[4]; } f2; } u; }; @@ -1470,13 +1471,6 @@ struct qla_flt_location { uint16_t checksum; }; -struct qla_flt_header { - uint16_t version; - uint16_t length; - uint16_t checksum; - uint16_t unused; -}; - #define FLT_REG_FW 0x01 #define FLT_REG_BOOT_CODE 0x07 #define FLT_REG_VPD_0 0x14 @@ -1537,6 +1531,14 @@ struct qla_flt_region { uint32_t end; }; +struct qla_flt_header { + uint16_t version; + uint16_t length; + uint16_t checksum; + uint16_t unused; + struct qla_flt_region region[0]; +}; + #define FLT_REGION_SIZE 16 #define FLT_MAX_REGIONS 0xFF #define FLT_REGIONS_SIZE (FLT_REGION_SIZE * FLT_MAX_REGIONS) diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 5b163ad85c34..2a64729a2bc5 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -72,14 +72,13 @@ extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *, extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t); extern void qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *, uint16_t *); -extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *, - uint16_t *); struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *, enum qla_work_type); extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *); int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e); extern void *qla2x00_alloc_iocbs_ready(struct qla_qpair *, srb_t *); extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *); +extern int qla24xx_async_abort_cmd(srb_t *, bool); extern void qla2x00_set_fcport_state(fc_port_t *fcport, int state); extern fc_port_t * @@ -182,8 +181,6 @@ extern int qla2x00_post_async_login_work(struct scsi_qla_host *, fc_port_t *, uint16_t *); extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *, uint16_t *); -extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *, - fc_port_t *, uint16_t *); extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *, uint16_t *); extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *, @@ -201,6 +198,7 @@ extern void qla2x00_free_host(struct scsi_qla_host *); extern void qla2x00_relogin(struct scsi_qla_host *); extern void qla2x00_do_work(struct scsi_qla_host *); extern void qla2x00_free_fcports(struct scsi_qla_host *); +extern void qla2x00_free_fcport(fc_port_t *); extern void qla83xx_schedule_work(scsi_qla_host_t *, int); extern void qla83xx_service_idc_aen(struct work_struct *); @@ -253,8 +251,9 @@ extern scsi_qla_host_t *qla24xx_create_vhost(struct fc_vport *); extern void qla2x00_sp_free_dma(srb_t *sp); extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *); -extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int); -extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int); +extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int); +extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *); +extern int qla24xx_async_abort_cmd(srb_t *, bool); extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *); diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 446a9d6ba255..aaa4a5bbf2ff 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -2963,7 +2963,6 @@ int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport) return QLA_FUNCTION_FAILED; e->u.fcport.fcport = fcport; - fcport->flags |= FCF_ASYNC_ACTIVE; return qla2x00_post_work(vha, e); } @@ -3097,9 +3096,7 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport) done_free_sp: sp->free(sp); - fcport->flags &= ~FCF_ASYNC_SENT; done: - fcport->flags &= ~FCF_ASYNC_ACTIVE; return rval; } @@ -4290,7 +4287,7 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport) if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) return rval; - fcport->disc_state = DSC_GNN_ID; + qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID); sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); if (!sp) goto done; @@ -4464,7 +4461,6 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport) done_free_sp: sp->free(sp); - fcport->flags &= ~FCF_ASYNC_SENT; done: return rval; } diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index aa5204163bec..9e6b56527b25 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -49,16 +49,9 @@ qla2x00_sp_timeout(struct timer_list *t) { srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer); struct srb_iocb *iocb; - struct req_que *req; - unsigned long flags; - struct qla_hw_data *ha = sp->vha->hw; - WARN_ON_ONCE(irqs_disabled()); - spin_lock_irqsave(&ha->hardware_lock, flags); - req = sp->qpair->req; - req->outstanding_cmds[sp->handle] = NULL; + WARN_ON(irqs_disabled()); iocb = &sp->u.iocb_cmd; - spin_unlock_irqrestore(&ha->hardware_lock, flags); iocb->timeout(sp); } @@ -142,7 +135,7 @@ static void qla24xx_abort_sp_done(srb_t *sp, int res) sp->free(sp); } -static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) +int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) { scsi_qla_host_t *vha = cmd_sp->vha; struct srb_iocb *abt_iocb; @@ -242,6 +235,7 @@ qla2x00_async_iocb_timeout(void *data) case SRB_NACK_PRLI: case SRB_NACK_LOGO: case SRB_CTRL_VP: + default: rc = qla24xx_async_abort_cmd(sp, false); if (rc) { spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); @@ -258,10 +252,6 @@ qla2x00_async_iocb_timeout(void *data) sp->done(sp, QLA_FUNCTION_TIMEOUT); } break; - default: - WARN_ON_ONCE(true); - sp->done(sp, QLA_FUNCTION_TIMEOUT); - break; } } @@ -326,10 +316,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, if (!sp) goto done; + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); fcport->flags |= FCF_ASYNC_SENT; fcport->logout_completed = 0; - fcport->disc_state = DSC_LOGIN_PEND; sp->type = SRB_LOGIN_CMD; sp->name = "login"; sp->gen1 = fcport->rscn_gen; @@ -425,7 +415,7 @@ qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport, fcport->flags &= ~FCF_ASYNC_ACTIVE; /* Don't re-login in target mode */ if (!fcport->tgt_session) - qla2x00_mark_device_lost(vha, fcport, 1, 0); + qla2x00_mark_device_lost(vha, fcport, 1); qlt_logo_completion_handler(fcport, data[0]); } @@ -533,7 +523,7 @@ static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport) e->u.fcport.fcport = fcport; fcport->flags |= FCF_ASYNC_ACTIVE; - fcport->disc_state = DSC_LOGIN_PEND; + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); return qla2x00_post_work(vha, e); } @@ -685,7 +675,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, port_id_t id; u64 wwn; u16 data[2]; - u8 current_login_state; + u8 current_login_state, nvme_cls; fcport = ea->fcport; ql_dbg(ql_dbg_disc, vha, 0xffff, @@ -744,10 +734,17 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, loop_id = le16_to_cpu(e->nport_handle); loop_id = (loop_id & 0x7fff); - if (NVME_TARGET(vha->hw, fcport)) - current_login_state = e->current_login_state >> 4; - else - current_login_state = e->current_login_state & 0xf; + nvme_cls = e->current_login_state >> 4; + current_login_state = e->current_login_state & 0xf; + + if (PRLI_PHASE(nvme_cls)) { + current_login_state = nvme_cls; + fcport->fc4_type &= ~FS_FC4TYPE_FCP; + fcport->fc4_type |= FS_FC4TYPE_NVME; + } else if (PRLI_PHASE(current_login_state)) { + fcport->fc4_type |= FS_FC4TYPE_FCP; + fcport->fc4_type &= ~FS_FC4TYPE_NVME; + } ql_dbg(ql_dbg_disc, vha, 0x20e2, "%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n", @@ -836,7 +833,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, * with GNL. Push disc_state back to DELETED * so GNL can go out again */ - fcport->disc_state = DSC_DELETED; + qla2x00_set_fcport_disc_state(fcport, + DSC_DELETED); break; case DSC_LS_PRLI_COMP: if ((e->prli_svc_param_word_3[0] & BIT_4) == 0) @@ -912,7 +910,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, qla24xx_fcport_handle_login(vha, fcport); break; case ISP_CFG_N: - fcport->disc_state = DSC_DELETED; + qla2x00_set_fcport_disc_state(fcport, DSC_DELETED); if (time_after_eq(jiffies, fcport->dm_login_expire)) { if (fcport->n2n_link_reset_cnt < 2) { fcport->n2n_link_reset_cnt++; @@ -992,7 +990,7 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res) set_bit(loop_id, vha->hw->loop_id_map); wwn = wwn_to_u64(e->port_name); - ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8, + ql_dbg(ql_dbg_disc, vha, 0x20e8, "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n", __func__, (void *)&wwn, e->port_id[2], e->port_id[1], e->port_id[0], e->current_login_state, e->last_login_state, @@ -1051,6 +1049,16 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res) spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); vha->gnl.sent = 0; + if (!list_empty(&vha->gnl.fcports)) { + /* retrigger gnl */ + list_for_each_entry_safe(fcport, tf, &vha->gnl.fcports, + gnl_entry) { + list_del_init(&fcport->gnl_entry); + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + if (qla24xx_post_gnl_work(vha, fcport) == QLA_SUCCESS) + break; + } + } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); sp->free(sp); @@ -1072,7 +1080,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); fcport->flags |= FCF_ASYNC_SENT; - fcport->disc_state = DSC_GNL; + qla2x00_set_fcport_disc_state(fcport, DSC_GNL); fcport->last_rscn_gen = fcport->rscn_gen; fcport->last_login_gen = fcport->login_gen; @@ -1121,8 +1129,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) done_free_sp: sp->free(sp); - fcport->flags &= ~FCF_ASYNC_SENT; done: + fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT); return rval; } @@ -1216,12 +1224,19 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) struct srb_iocb *lio; int rval = QLA_FUNCTION_FAILED; - if (!vha->flags.online) + if (!vha->flags.online) { + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n", + __func__, __LINE__, fcport->port_name); return rval; + } - if (fcport->fw_login_state == DSC_LS_PLOGI_PEND || - fcport->fw_login_state == DSC_LS_PRLI_PEND) + if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND || + fcport->fw_login_state == DSC_LS_PRLI_PEND) && + qla_dual_mode_enabled(vha)) { + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n", + __func__, __LINE__, fcport->port_name); return rval; + } sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) @@ -1295,12 +1310,12 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) return rval; } - fcport->disc_state = DSC_GPDB; - sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; + qla2x00_set_fcport_disc_state(fcport, DSC_GPDB); + fcport->flags |= FCF_ASYNC_SENT; sp->type = SRB_MB_IOCB; sp->name = "gpdb"; @@ -1349,6 +1364,7 @@ done_free_sp: sp->free(sp); fcport->flags &= ~FCF_ASYNC_SENT; done: + fcport->flags &= ~FCF_ASYNC_ACTIVE; qla24xx_post_gpdb_work(vha, fcport, opt); return rval; } @@ -1379,7 +1395,7 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) ql_dbg(ql_dbg_disc, vha, 0x20d6, "%s %d %8phC session revalidate success\n", __func__, __LINE__, ea->fcport->port_name); - ea->fcport->disc_state = DSC_LOGIN_COMPLETE; + qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_COMPLETE); } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); } @@ -1433,7 +1449,7 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) /* Set discovery state back to GNL to Relogin attempt */ if (qla_dual_mode_enabled(vha) || qla_ini_mode_enabled(vha)) { - fcport->disc_state = DSC_GNL; + qla2x00_set_fcport_disc_state(fcport, DSC_GNL); set_bit(RELOGIN_NEEDED, &vha->dpc_flags); } return; @@ -1600,6 +1616,10 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) break; default: if (fcport->login_pause) { + ql_dbg(ql_dbg_disc, vha, 0x20d8, + "%s %d %8phC exit\n", + __func__, __LINE__, + fcport->port_name); fcport->last_rscn_gen = fcport->rscn_gen; fcport->last_login_gen = fcport->login_gen; set_bit(RELOGIN_NEEDED, &vha->dpc_flags); @@ -1758,9 +1778,23 @@ qla2x00_tmf_iocb_timeout(void *data) { srb_t *sp = data; struct srb_iocb *tmf = &sp->u.iocb_cmd; + int rc, h; + unsigned long flags; - tmf->u.tmf.comp_status = CS_TIMEOUT; - complete(&tmf->u.tmf.comp); + rc = qla24xx_async_abort_cmd(sp, false); + if (rc) { + spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); + for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { + if (sp->qpair->req->outstanding_cmds[h] == sp) { + sp->qpair->req->outstanding_cmds[h] = NULL; + break; + } + } + spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); + tmf->u.tmf.comp_status = CS_TIMEOUT; + tmf->u.tmf.data = QLA_FUNCTION_FAILED; + complete(&tmf->u.tmf.comp); + } } static void qla2x00_tmf_sp_done(srb_t *sp, int res) @@ -1976,7 +2010,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) qla24xx_post_prli_work(vha, ea->fcport); } else { ql_dbg(ql_dbg_disc, vha, 0x20ea, - "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n", + "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n", __func__, __LINE__, ea->fcport->port_name, ea->fcport->loop_id, ea->fcport->d_id.b24); @@ -1996,11 +2030,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) __func__, __LINE__, ea->fcport->port_name, ea->data[1]); ea->fcport->flags &= ~FCF_ASYNC_SENT; - ea->fcport->disc_state = DSC_LOGIN_FAILED; + qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_FAILED); if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED) set_bit(RELOGIN_NEEDED, &vha->dpc_flags); else - qla2x00_mark_device_lost(vha, ea->fcport, 1, 0); + qla2x00_mark_device_lost(vha, ea->fcport, 1); break; case MBS_LOOP_ID_USED: /* data[1] = IO PARAM 1 = nport ID */ @@ -2047,6 +2081,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) set_bit(lid, vha->hw->loop_id_map); ea->fcport->loop_id = lid; ea->fcport->keep_nport_handle = 0; + ea->fcport->logout_on_delete = 1; qlt_schedule_sess_for_deletion(ea->fcport); } break; @@ -2054,16 +2089,6 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) return; } -void -qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, - uint16_t *data) -{ - qlt_logo_completion_handler(fcport, data[0]); - fcport->login_gen++; - fcport->flags &= ~FCF_ASYNC_ACTIVE; - return; -} - /****************************************************************************/ /* QLogic ISP2x00 Hardware Support Functions. */ /****************************************************************************/ @@ -4925,12 +4950,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) qla2x00_get_data_rate(vha); /* Determine what we need to do */ - if (ha->current_topology == ISP_CFG_FL && - (test_bit(LOCAL_LOOP_UPDATE, &flags))) { - - set_bit(RSCN_UPDATE, &flags); - - } else if (ha->current_topology == ISP_CFG_F && + if ((ha->current_topology == ISP_CFG_FL || + ha->current_topology == ISP_CFG_F) && (test_bit(LOCAL_LOOP_UPDATE, &flags))) { set_bit(RSCN_UPDATE, &flags); @@ -5087,7 +5108,7 @@ skip_login: rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, &entries); if (rval != QLA_SUCCESS) - goto cleanup_allocation; + goto err; ql_dbg(ql_dbg_disc, vha, 0x2011, "Entries in ID list (%d).\n", entries); @@ -5117,7 +5138,7 @@ skip_login: ql_log(ql_log_warn, vha, 0x2012, "Memory allocation failed for fcport.\n"); rval = QLA_MEMORY_ALLOC_FAILED; - goto cleanup_allocation; + goto err; } new_fcport->flags &= ~FCF_FABRIC_DEVICE; @@ -5207,7 +5228,7 @@ skip_login: ql_log(ql_log_warn, vha, 0xd031, "Failed to allocate memory for fcport.\n"); rval = QLA_MEMORY_ALLOC_FAILED; - goto cleanup_allocation; + goto err; } spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); new_fcport->flags &= ~FCF_FABRIC_DEVICE; @@ -5230,7 +5251,7 @@ skip_login: qla_ini_mode_enabled(vha)) && atomic_read(&fcport->state) == FCS_ONLINE) { qla2x00_mark_device_lost(vha, fcport, - ql2xplogiabsentdevice, 0); + ql2xplogiabsentdevice); if (fcport->loop_id != FC_NO_LOOP_ID && (fcport->flags & FCF_FCP2_DEVICE) == 0 && fcport->port_type != FCT_INITIATOR && @@ -5250,15 +5271,14 @@ skip_login: qla24xx_fcport_handle_login(vha, fcport); } -cleanup_allocation: - kfree(new_fcport); + qla2x00_free_fcport(new_fcport); - if (rval != QLA_SUCCESS) { - ql_dbg(ql_dbg_disc, vha, 0x2098, - "Configure local loop error exit: rval=%x.\n", rval); - } + return rval; - return (rval); +err: + ql_dbg(ql_dbg_disc, vha, 0x2098, + "Configure local loop error exit: rval=%x.\n", rval); + return rval; } static void @@ -5385,11 +5405,14 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n", __func__, fcport->port_name); - fcport->disc_state = DSC_UPD_FCPORT; + qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT); fcport->login_retry = vha->hw->login_retry_count; fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); fcport->deleted = 0; - fcport->logout_on_delete = 1; + if (vha->hw->current_topology == ISP_CFG_NL) + fcport->logout_on_delete = 0; + else + fcport->logout_on_delete = 1; fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0; switch (vha->hw->current_topology) { @@ -5405,7 +5428,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) if (NVME_TARGET(vha->hw, fcport)) { qla_nvme_register_remote(vha, fcport); - fcport->disc_state = DSC_LOGIN_COMPLETE; + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); qla2x00_set_fcport_state(fcport, FCS_ONLINE); return; } @@ -5450,7 +5473,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) } } - fcport->disc_state = DSC_LOGIN_COMPLETE; + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); } void qla_register_fcport_fn(struct work_struct *work) @@ -5859,7 +5882,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) if (NVME_TARGET(vha->hw, fcport)) { if (fcport->disc_state == DSC_DELETE_PEND) { - fcport->disc_state = DSC_GNL; + qla2x00_set_fcport_disc_state(fcport, DSC_GNL); vha->fcport_count--; fcport->login_succ = 0; } @@ -5905,7 +5928,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) qla_ini_mode_enabled(vha)) && atomic_read(&fcport->state) == FCS_ONLINE) { qla2x00_mark_device_lost(vha, fcport, - ql2xplogiabsentdevice, 0); + ql2xplogiabsentdevice); if (fcport->loop_id != FC_NO_LOOP_ID && (fcport->flags & FCF_FCP2_DEVICE) == 0 && fcport->port_type != FCT_INITIATOR && @@ -6071,7 +6094,7 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport, ha->isp_ops->fabric_logout(vha, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); - qla2x00_mark_device_lost(vha, fcport, 1, 0); + qla2x00_mark_device_lost(vha, fcport, 1); rval = 1; break; @@ -6585,9 +6608,9 @@ qla2x00_quiesce_io(scsi_qla_host_t *vha) atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); - qla2x00_mark_all_devices_lost(vha, 0); + qla2x00_mark_all_devices_lost(vha); list_for_each_entry(vp, &ha->vp_list, list) - qla2x00_mark_all_devices_lost(vp, 0); + qla2x00_mark_all_devices_lost(vp); } else { if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, @@ -6663,14 +6686,14 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); - qla2x00_mark_all_devices_lost(vha, 0); + qla2x00_mark_all_devices_lost(vha); spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vp, &ha->vp_list, list) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); - qla2x00_mark_all_devices_lost(vp, 0); + qla2x00_mark_all_devices_lost(vp); spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vp->vref_count); diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 352aba4127f7..364b3db8b2dc 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -105,6 +105,30 @@ qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx) INIT_LIST_HEAD(&ctx->dsd_list); } +static inline void +qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state) +{ + int old_val; + uint8_t shiftbits, mask; + + /* This will have to change when the max no. of states > 16 */ + shiftbits = 4; + mask = (1 << shiftbits) - 1; + + fcport->disc_state = state; + while (1) { + old_val = atomic_read(&fcport->shadow_disc_state); + if (old_val == atomic_cmpxchg(&fcport->shadow_disc_state, + old_val, (old_val << shiftbits) | state)) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x2134, + "FCPort %8phC disc_state transition: %s to %s - portid=%06x.\n", + fcport->port_name, port_dstate_str[old_val & mask], + port_dstate_str[state], fcport->d_id.b24); + return; + } + } +} + static inline int qla2x00_hba_err_chk_enabled(srb_t *sp) { diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 8b050f0b4333..47bf60a9490a 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2537,13 +2537,32 @@ qla2x00_els_dcmd_iocb_timeout(void *data) fc_port_t *fcport = sp->fcport; struct scsi_qla_host *vha = sp->vha; struct srb_iocb *lio = &sp->u.iocb_cmd; + unsigned long flags = 0; + int res, h; ql_dbg(ql_dbg_io, vha, 0x3069, "%s Timeout, hdl=%x, portid=%02x%02x%02x\n", sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); - complete(&lio->u.els_logo.comp); + /* Abort the exchange */ + res = qla24xx_async_abort_cmd(sp, false); + if (res) { + ql_dbg(ql_dbg_io, vha, 0x3070, + "mbx abort_command failed.\n"); + spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); + for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { + if (sp->qpair->req->outstanding_cmds[h] == sp) { + sp->qpair->req->outstanding_cmds[h] = NULL; + break; + } + } + spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); + complete(&lio->u.els_logo.comp); + } else { + ql_dbg(ql_dbg_io, vha, 0x3071, + "mbx abort_command success.\n"); + } } static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res) @@ -2717,23 +2736,29 @@ qla2x00_els_dcmd2_iocb_timeout(void *data) srb_t *sp = data; fc_port_t *fcport = sp->fcport; struct scsi_qla_host *vha = sp->vha; - struct qla_hw_data *ha = vha->hw; unsigned long flags = 0; - int res; + int res, h; ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069, "%s hdl=%x ELS Timeout, %8phC portid=%06x\n", sp->name, sp->handle, fcport->port_name, fcport->d_id.b24); /* Abort the exchange */ - spin_lock_irqsave(&ha->hardware_lock, flags); - res = ha->isp_ops->abort_command(sp); + res = qla24xx_async_abort_cmd(sp, false); ql_dbg(ql_dbg_io, vha, 0x3070, "mbx abort_command %s\n", (res == QLA_SUCCESS) ? "successful" : "failed"); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - - sp->done(sp, QLA_FUNCTION_TIMEOUT); + if (res) { + spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); + for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { + if (sp->qpair->req->outstanding_cmds[h] == sp) { + sp->qpair->req->outstanding_cmds[h] = NULL; + break; + } + } + spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); + sp->done(sp, QLA_FUNCTION_TIMEOUT); + } } void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi) @@ -2852,7 +2877,8 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) fw_status[0], fw_status[1], fw_status[2]); fcport->flags &= ~FCF_ASYNC_SENT; - fcport->disc_state = DSC_LOGIN_FAILED; + qla2x00_set_fcport_disc_state(fcport, + DSC_LOGIN_FAILED); set_bit(RELOGIN_NEEDED, &vha->dpc_flags); break; } @@ -2865,7 +2891,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) fw_status[0], fw_status[1], fw_status[2]); sp->fcport->flags &= ~FCF_ASYNC_SENT; - sp->fcport->disc_state = DSC_LOGIN_FAILED; + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED); set_bit(RELOGIN_NEEDED, &vha->dpc_flags); break; } @@ -2898,11 +2924,12 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, if (!sp) { ql_log(ql_log_info, vha, 0x70e6, "SRB allocation failed\n"); + fcport->flags &= ~FCF_ASYNC_ACTIVE; return -ENOMEM; } fcport->flags |= FCF_ASYNC_SENT; - fcport->disc_state = DSC_LOGIN_PEND; + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); elsio = &sp->u.iocb_cmd; ql_dbg(ql_dbg_io, vha, 0x3073, "Enter: PLOGI portid=%06x\n", fcport->d_id.b24); @@ -2975,7 +3002,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, } out: - fcport->flags &= ~(FCF_ASYNC_SENT); + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi); sp->free(sp); done: diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 7b8a6bfcf08d..e7bad0bfffda 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -788,7 +788,7 @@ skip_rio: if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); - qla2x00_mark_all_devices_lost(vha, 1); + qla2x00_mark_all_devices_lost(vha); } if (vha->vp_idx) { @@ -861,7 +861,7 @@ skip_rio: } vha->device_flags |= DFLG_NO_CABLE; - qla2x00_mark_all_devices_lost(vha, 1); + qla2x00_mark_all_devices_lost(vha); } if (vha->vp_idx) { @@ -881,7 +881,7 @@ skip_rio: if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); - qla2x00_mark_all_devices_lost(vha, 1); + qla2x00_mark_all_devices_lost(vha); } if (vha->vp_idx) { @@ -924,7 +924,7 @@ skip_rio: atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); if (!N2N_TOPO(ha)) - qla2x00_mark_all_devices_lost(vha, 1); + qla2x00_mark_all_devices_lost(vha); } if (vha->vp_idx) { @@ -953,7 +953,7 @@ skip_rio: if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); - qla2x00_mark_all_devices_lost(vha, 1); + qla2x00_mark_all_devices_lost(vha); } if (vha->vp_idx) { @@ -1022,7 +1022,6 @@ skip_rio: "Marking port lost loopid=%04x portid=%06x.\n", fcport->loop_id, fcport->d_id.b24); if (qla_ini_mode_enabled(vha)) { - qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); fcport->logout_on_delete = 0; qlt_schedule_sess_for_deletion(fcport); } @@ -1034,14 +1033,14 @@ global_port_update: atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); vha->device_flags |= DFLG_NO_CABLE; - qla2x00_mark_all_devices_lost(vha, 1); + qla2x00_mark_all_devices_lost(vha); } if (vha->vp_idx) { atomic_set(&vha->vp_state, VP_FAILED); fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); - qla2x00_mark_all_devices_lost(vha, 1); + qla2x00_mark_all_devices_lost(vha); } vha->flags.management_server_logged_in = 0; @@ -1253,11 +1252,33 @@ global_port_update: case MBA_DPORT_DIAGNOSTICS: ql_dbg(ql_dbg_async, vha, 0x5052, - "D-Port Diagnostics: %04x result=%s\n", - mb[0], - mb[1] == 0 ? "start" : - mb[1] == 1 ? "done (pass)" : - mb[1] == 2 ? "done (error)" : "other"); + "D-Port Diagnostics: %04x %04x %04x %04x\n", + mb[0], mb[1], mb[2], mb[3]); + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + static char *results[] = { + "start", "done(pass)", "done(error)", "undefined" }; + static char *types[] = { + "none", "dynamic", "static", "other" }; + uint result = mb[1] >> 0 & 0x3; + uint type = mb[1] >> 6 & 0x3; + uint sw = mb[1] >> 15 & 0x1; + ql_dbg(ql_dbg_async, vha, 0x5052, + "D-Port Diagnostics: result=%s type=%s [sw=%u]\n", + results[result], types[type], sw); + if (result == 2) { + static char *reasons[] = { + "reserved", "unexpected reject", + "unexpected phase", "retry exceeded", + "timed out", "not supported", + "user stopped" }; + uint reason = mb[2] >> 0 & 0xf; + uint phase = mb[2] >> 12 & 0xf; + ql_dbg(ql_dbg_async, vha, 0x5052, + "D-Port Diagnostics: reason=%s phase=%u \n", + reason < 7 ? reasons[reason] : "other", + phase >> 1); + } + } break; case MBA_TEMPERATURE_ALERT: @@ -2152,12 +2173,12 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) * swab32 of the "data" field in the beginning of qla2x00_status_entry() * would make guard field appear at offset 2 */ - a_guard = le16_to_cpu(*(uint16_t *)(ap + 2)); - a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); - a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); - e_guard = le16_to_cpu(*(uint16_t *)(ep + 2)); - e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); - e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4)); + a_guard = get_unaligned_le16(ap + 2); + a_app_tag = get_unaligned_le16(ap + 0); + a_ref_tag = get_unaligned_le32(ap + 4); + e_guard = get_unaligned_le16(ep + 2); + e_app_tag = get_unaligned_le16(ep + 0); + e_ref_tag = get_unaligned_le32(ep + 4); ql_dbg(ql_dbg_io, vha, 0x3023, "iocb(s) %p Returned STATUS.\n", sts24); @@ -2745,7 +2766,6 @@ check_scsi_status: port_state_str[FCS_ONLINE], comp_status); - qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); qlt_schedule_sess_for_deletion(fcport); } diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index b7c1108c48e2..9e09964f5c0e 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -6152,9 +6152,8 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, mcp->mb[7] = LSW(MSD(req_dma)); mcp->mb[8] = MSW(addr); /* Setting RAM ID to valid */ - mcp->mb[10] |= BIT_7; /* For MCTP RAM ID is 0x40 */ - mcp->mb[10] |= 0x40; + mcp->mb[10] = BIT_7 | 0x40; mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1| MBX_0; diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index eabc5127174e..8ae639d089d1 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -147,7 +147,7 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) "Marking port dead, loop_id=0x%04x : %x.\n", fcport->loop_id, fcport->vha->vp_idx); - qla2x00_mark_device_lost(vha, fcport, 0, 0); + qla2x00_mark_device_lost(vha, fcport, 0); qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); } } @@ -167,7 +167,7 @@ qla24xx_disable_vp(scsi_qla_host_t *vha) list_for_each_entry(fcport, &vha->vp_fcports, list) fcport->logout_on_delete = 0; - qla2x00_mark_all_devices_lost(vha, 0); + qla2x00_mark_all_devices_lost(vha); /* Remove port id from vp target map */ spin_lock_irqsave(&vha->hw->hardware_lock, flags); @@ -327,7 +327,7 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha) */ if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); - qla2x00_mark_all_devices_lost(vha, 0); + qla2x00_mark_all_devices_lost(vha); } else { if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index a3a44d4ace1e..cad1fc2a1b28 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -1210,9 +1210,9 @@ qlafx00_find_all_targets(scsi_qla_host_t *vha, " Existing TGT-ID %x did not get " " offline event from firmware.\n", fcport->old_tgt_id); - qla2x00_mark_device_lost(vha, fcport, 0, 0); + qla2x00_mark_device_lost(vha, fcport, 0); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); - kfree(new_fcport); + qla2x00_free_fcport(new_fcport); return rval; } break; @@ -1230,7 +1230,7 @@ qlafx00_find_all_targets(scsi_qla_host_t *vha, return QLA_MEMORY_ALLOC_FAILED; } - kfree(new_fcport); + qla2x00_free_fcport(new_fcport); return rval; } @@ -1274,7 +1274,7 @@ qlafx00_configure_all_targets(scsi_qla_host_t *vha) if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { if (fcport->port_type != FCT_INITIATOR) - qla2x00_mark_device_lost(vha, fcport, 0, 0); + qla2x00_mark_device_lost(vha, fcport, 0); } } @@ -1298,7 +1298,7 @@ qlafx00_configure_all_targets(scsi_qla_host_t *vha) /* Free all new device structures not processed. */ list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) { list_del(&fcport->list); - kfree(fcport); + qla2x00_free_fcport(fcport); } return rval; @@ -1706,7 +1706,7 @@ qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id) if (!fcport) return; - qla2x00_mark_device_lost(vha, fcport, 0, 0); + qla2x00_mark_device_lost(vha, fcport, 0); return; } @@ -1740,7 +1740,7 @@ qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt) set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } else if (evt->u.aenfx.mbx[2] == 2) { vha->device_flags |= DFLG_NO_CABLE; - qla2x00_mark_all_devices_lost(vha, 1); + qla2x00_mark_all_devices_lost(vha); } } break; @@ -2513,7 +2513,7 @@ check_scsi_status: atomic_read(&fcport->state)); if (atomic_read(&fcport->state) == FCS_ONLINE) - qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); + qla2x00_mark_device_lost(fcport->vha, fcport, 1); break; case CS_ABORTED: diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 2b2028f2383e..185c5f34d4c1 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -1612,8 +1612,7 @@ qla82xx_get_bootld_offset(struct qla_hw_data *ha) return (u8 *)&ha->hablob->fw->data[offset]; } -static __le32 -qla82xx_get_fw_size(struct qla_hw_data *ha) +static u32 qla82xx_get_fw_size(struct qla_hw_data *ha) { struct qla82xx_uri_data_desc *uri_desc = NULL; @@ -1624,7 +1623,7 @@ qla82xx_get_fw_size(struct qla_hw_data *ha) return cpu_to_le32(uri_desc->size); } - return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]); + return get_unaligned_le32(&ha->hablob->fw->data[FW_SIZE_OFFSET]); } static u8 * @@ -1816,7 +1815,7 @@ qla82xx_fw_load_from_blob(struct qla_hw_data *ha) } flashaddr = FLASH_ADDR_START; - size = (__force u32)qla82xx_get_fw_size(ha) / 8; + size = qla82xx_get_fw_size(ha) / 8; ptr64 = (u64 *)qla82xx_get_fw_offs(ha); for (i = 0; i < size; i++) { @@ -1883,7 +1882,7 @@ qla82xx_set_product_offset(struct qla_hw_data *ha) static int qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type) { - __le32 val; + uint32_t val; uint32_t min_size; struct qla_hw_data *ha = vha->hw; const struct firmware *fw = ha->hablob->fw; @@ -1896,8 +1895,8 @@ qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type) min_size = QLA82XX_URI_FW_MIN_SIZE; } else { - val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]); - if ((__force u32)val != QLA82XX_BDINFO_MAGIC) + val = get_unaligned_le32(&fw->data[QLA82XX_FW_MAGIC_OFFSET]); + if (val != QLA82XX_BDINFO_MAGIC) return -EINVAL; min_size = QLA82XX_FW_MIN_SIZE; @@ -3030,7 +3029,7 @@ qla8xxx_dev_failed_handler(scsi_qla_host_t *vha) /* Set DEV_FAILED flag to disable timer */ vha->device_flags |= DFLG_DEV_FAILED; qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); - qla2x00_mark_all_devices_lost(vha, 0); + qla2x00_mark_all_devices_lost(vha); vha->flags.online = 0; vha->flags.init_done = 0; } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 8b84bc4a6ac8..b520a980d1dc 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1110,7 +1110,7 @@ qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) { u8 i; - qla2x00_mark_all_devices_lost(vha, 0); + qla2x00_mark_all_devices_lost(vha); for (i = 0; i < 10; i++) { if (wait_event_timeout(vha->fcport_waitQ, @@ -1667,7 +1667,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) { atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); - qla2x00_mark_all_devices_lost(vha, 0); + qla2x00_mark_all_devices_lost(vha); ret = qla2x00_full_login_lip(vha); if (ret != QLA_SUCCESS) { ql_dbg(ql_dbg_taskm, vha, 0x802d, @@ -3854,37 +3854,21 @@ void qla2x00_free_fcports(struct scsi_qla_host *vha) } static inline void -qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, - int defer) +qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport) { - struct fc_rport *rport; - scsi_qla_host_t *base_vha; - unsigned long flags; + int now; if (!fcport->rport) return; - rport = fcport->rport; - if (defer) { - base_vha = pci_get_drvdata(vha->hw->pdev); - spin_lock_irqsave(vha->host->host_lock, flags); - fcport->drport = rport; - spin_unlock_irqrestore(vha->host->host_lock, flags); - qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen); - set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); - qla2xxx_wake_dpc(base_vha); - } else { - int now; - - if (rport) { - ql_dbg(ql_dbg_disc, fcport->vha, 0x2109, - "%s %8phN. rport %p roles %x\n", - __func__, fcport->port_name, rport, - rport->roles); - fc_remote_port_delete(rport); - } - qlt_do_generation_tick(vha, &now); + if (fcport->rport) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x2109, + "%s %8phN. rport %p roles %x\n", + __func__, fcport->port_name, fcport->rport, + fcport->rport->roles); + fc_remote_port_delete(fcport->rport); } + qlt_do_generation_tick(vha, &now); } /* @@ -3897,18 +3881,18 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, * Context: */ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, - int do_login, int defer) + int do_login) { if (IS_QLAFX00(vha->hw)) { qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); - qla2x00_schedule_rport_del(vha, fcport, defer); + qla2x00_schedule_rport_del(vha, fcport); return; } if (atomic_read(&fcport->state) == FCS_ONLINE && vha->vp_idx == fcport->vha->vp_idx) { qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); - qla2x00_schedule_rport_del(vha, fcport, defer); + qla2x00_schedule_rport_del(vha, fcport); } /* * We may need to retry the login, so don't change the state of the @@ -3937,7 +3921,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, * Context: */ void -qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer) +qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha) { fc_port_t *fcport; @@ -3957,13 +3941,6 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer) */ if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) continue; - if (atomic_read(&fcport->state) == FCS_ONLINE) { - qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); - if (defer) - qla2x00_schedule_rport_del(vha, fcport, defer); - else if (vha->vp_idx == fcport->vha->vp_idx) - qla2x00_schedule_rport_del(vha, fcport, defer); - } } } @@ -4965,7 +4942,6 @@ int qla2x00_post_async_##name##_work( \ qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN); qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); -qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE); qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC); qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO); qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE); @@ -5032,7 +5008,7 @@ void qla24xx_sched_upd_fcport(fc_port_t *fcport) fcport->jiffies_at_registration = jiffies; fcport->sec_since_registration = 0; fcport->next_disc_state = DSC_DELETED; - fcport->disc_state = DSC_UPD_FCPORT; + qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT); spin_unlock_irqrestore(&fcport->vha->work_lock, flags); queue_work(system_unbound_wq, &fcport->reg_work); @@ -5253,10 +5229,6 @@ qla2x00_do_work(struct scsi_qla_host *vha) case QLA_EVT_ASYNC_LOGOUT: rc = qla2x00_async_logout(vha, e->u.logio.fcport); break; - case QLA_EVT_ASYNC_LOGOUT_DONE: - qla2x00_async_logout_done(vha, e->u.logio.fcport, - e->u.logio.data); - break; case QLA_EVT_ASYNC_ADISC: qla2x00_async_adisc(vha, e->u.logio.fcport, e->u.logio.data); @@ -6899,13 +6871,13 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha) qpair->online = 0; mutex_unlock(&ha->mq_lock); - qla2x00_mark_all_devices_lost(vha, 0); + qla2x00_mark_all_devices_lost(vha); spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vp, &ha->vp_list, list) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); - qla2x00_mark_all_devices_lost(vp, 0); + qla2x00_mark_all_devices_lost(vp); spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vp->vref_count); } @@ -7270,6 +7242,8 @@ qla2x00_module_init(void) BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064); BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64); BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56); + BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16); + BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8); /* Allocate cache for SRBs. */ srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index bbe90354f49b..76a38bf86cbc 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -669,8 +669,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) struct qla_hw_data *ha = vha->hw; uint32_t def = IS_QLA81XX(ha) ? 2 : IS_QLA25XX(ha) ? 1 : 0; - struct qla_flt_header *flt = (void *)ha->flt; - struct qla_flt_region *region = (void *)&flt[1]; + struct qla_flt_header *flt = ha->flt; + struct qla_flt_region *region = &flt->region[0]; uint16_t *wptr, cnt, chksum; uint32_t start; @@ -2652,18 +2652,15 @@ qla28xx_get_flash_region(struct scsi_qla_host *vha, uint32_t start, struct qla_flt_region *region) { struct qla_hw_data *ha = vha->hw; - struct qla_flt_header *flt; - struct qla_flt_region *flt_reg; + struct qla_flt_header *flt = ha->flt; + struct qla_flt_region *flt_reg = &flt->region[0]; uint16_t cnt; int rval = QLA_FUNCTION_FAILED; if (!ha->flt) return QLA_FUNCTION_FAILED; - flt = (struct qla_flt_header *)ha->flt; - flt_reg = (struct qla_flt_region *)&flt[1]; cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region); - for (; cnt; cnt--, flt_reg++) { if (flt_reg->start == start) { memcpy((uint8_t *)region, flt_reg, diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 68c14143e50e..70081b395fb2 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -596,7 +596,8 @@ static void qla2x00_async_nack_sp_done(srb_t *sp, int res) spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); } else { sp->fcport->login_retry = 0; - sp->fcport->disc_state = DSC_LOGIN_COMPLETE; + qla2x00_set_fcport_disc_state(sp->fcport, + DSC_LOGIN_COMPLETE); sp->fcport->deleted = 0; sp->fcport->logout_on_delete = 1; } @@ -957,7 +958,7 @@ void qlt_free_session_done(struct work_struct *work) struct qlt_plogi_ack_t *own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084, + ql_dbg(ql_dbg_disc, vha, 0xf084, "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n", __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, @@ -966,7 +967,7 @@ void qlt_free_session_done(struct work_struct *work) sess->send_els_logo); if (!IS_SW_RESV_ADDR(sess->d_id)) { - qla2x00_mark_device_lost(vha, sess, 0, 0); + qla2x00_mark_device_lost(vha, sess, 0); if (sess->send_els_logo) { qlt_port_logo_t logo; @@ -1024,7 +1025,7 @@ void qlt_free_session_done(struct work_struct *work) while (!READ_ONCE(sess->logout_completed)) { if (!traced) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086, + ql_dbg(ql_dbg_disc, vha, 0xf086, "%s: waiting for sess %p logout\n", __func__, sess); traced = true; @@ -1045,6 +1046,10 @@ void qlt_free_session_done(struct work_struct *work) (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO); } + spin_lock_irqsave(&vha->work_lock, flags); + sess->flags &= ~FCF_ASYNC_SENT; + spin_unlock_irqrestore(&vha->work_lock, flags); + spin_lock_irqsave(&ha->tgt.sess_lock, flags); if (sess->se_sess) { sess->se_sess = NULL; @@ -1052,7 +1057,7 @@ void qlt_free_session_done(struct work_struct *work) tgt->sess_count--; } - sess->disc_state = DSC_DELETED; + qla2x00_set_fcport_disc_state(sess, DSC_DELETED); sess->fw_login_state = DSC_LS_PORT_UNAVAIL; sess->deleted = QLA_SESS_DELETED; @@ -1108,7 +1113,7 @@ void qlt_free_session_done(struct work_struct *work) spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); sess->free_pending = 0; - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, + ql_dbg(ql_dbg_disc, vha, 0xf001, "Unregistration of sess %p %8phC finished fcp_cnt %d\n", sess, sess->port_name, vha->fcport_count); @@ -1151,13 +1156,18 @@ void qlt_unreg_sess(struct fc_port *sess) return; } sess->free_pending = 1; + /* + * Use FCF_ASYNC_SENT flag to block other cmds used in sess + * management from being sent. + */ + sess->flags |= FCF_ASYNC_SENT; spin_unlock_irqrestore(&sess->vha->work_lock, flags); if (sess->se_sess) vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; - sess->disc_state = DSC_DELETE_PEND; + qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND); sess->last_rscn_gen = sess->rscn_gen; sess->last_login_gen = sess->login_gen; @@ -1257,7 +1267,8 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; spin_unlock_irqrestore(&sess->vha->work_lock, flags); - sess->disc_state = DSC_DELETE_PEND; + sess->prli_pend_timer = 0; + qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND); qla24xx_chk_fcp_state(sess); @@ -3446,13 +3457,13 @@ qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, cmd->trc_flags |= TRC_DIF_ERR; - cmd->a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); - cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); - cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); + cmd->a_guard = get_unaligned_be16(ap + 0); + cmd->a_app_tag = get_unaligned_be16(ap + 2); + cmd->a_ref_tag = get_unaligned_be32(ap + 4); - cmd->e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); - cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); - cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); + cmd->e_guard = get_unaligned_be16(ep + 0); + cmd->e_app_tag = get_unaligned_be16(ep + 2); + cmd->e_ref_tag = get_unaligned_be32(ep + 4); ql_dbg(ql_dbg_tgt_dif, vha, 0xf075, "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state); @@ -4579,7 +4590,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, /* find other sess with nport_id collision */ if (port_id.b24 == other_sess->d_id.b24) { if (loop_id != other_sess->loop_id) { - ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c, + ql_dbg(ql_dbg_disc, vha, 0x1000c, "Invalidating sess %p loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn); @@ -4595,7 +4606,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, * Another wwn used to have our s_id/loop_id * kill the session, but don't free the loop_id */ - ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b, + ql_dbg(ql_dbg_disc, vha, 0xf01b, "Invalidating sess %p loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn); @@ -4610,7 +4621,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, /* find other sess with nport handle collision */ if ((loop_id == other_sess->loop_id) && (loop_id != FC_NO_LOOP_ID)) { - ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d, + ql_dbg(ql_dbg_disc, vha, 0x1000d, "Invalidating sess %p loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn); @@ -6053,7 +6064,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, if (!IS_SW_RESV_ADDR(fcport->d_id)) vha->fcport_count++; fcport->login_gen++; - fcport->disc_state = DSC_LOGIN_COMPLETE; + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); fcport->login_succ = 1; newfcport = 1; } diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index d006f0a97b8c..6539499e9e95 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -379,8 +379,7 @@ static inline int get_datalen_for_atio(struct atio_from_isp *atio) { int len = atio->u.isp24.fcp_cmnd.add_cdb_len; - return (be32_to_cpu(get_unaligned((uint32_t *) - &atio->u.isp24.fcp_cmnd.add_cdb[len * 4]))); + return get_unaligned_be32(&atio->u.isp24.fcp_cmnd.add_cdb[len * 4]); } #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 03bd3b712b77..bb03c022e023 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "10.01.00.21-k" +#define QLA2XXX_VERSION "10.01.00.22-k" #define QLA_DRIVER_MAJOR_VER 10 #define QLA_DRIVER_MINOR_VER 1 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 2323432a0edb..5504ab11decc 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -4145,7 +4145,7 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha) dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues, ha->queues_dma); - if (ha->fw_dump) + if (ha->fw_dump) vfree(ha->fw_dump); ha->queues_len = 0; diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index 57bcd05605bf..8f3af87b6bb0 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -189,17 +189,7 @@ static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg) } -/** - * scsi_ioctl - Dispatch ioctl to scsi device - * @sdev: scsi device receiving ioctl - * @cmd: which ioctl is it - * @arg: data associated with ioctl - * - * Description: The scsi_ioctl() function differs from most ioctls in that it - * does not take a major/minor number as the dev field. Rather, it takes - * a pointer to a &struct scsi_device. - */ -int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) +static int scsi_ioctl_common(struct scsi_device *sdev, int cmd, void __user *arg) { char scsi_cmd[MAX_COMMAND_SIZE]; struct scsi_sense_hdr sense_hdr; @@ -266,14 +256,50 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) return scsi_ioctl_get_pci(sdev, arg); case SG_SCSI_RESET: return scsi_ioctl_reset(sdev, arg); - default: - if (sdev->host->hostt->ioctl) - return sdev->host->hostt->ioctl(sdev, cmd, arg); } + return -ENOIOCTLCMD; +} + +/** + * scsi_ioctl - Dispatch ioctl to scsi device + * @sdev: scsi device receiving ioctl + * @cmd: which ioctl is it + * @arg: data associated with ioctl + * + * Description: The scsi_ioctl() function differs from most ioctls in that it + * does not take a major/minor number as the dev field. Rather, it takes + * a pointer to a &struct scsi_device. + */ +int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) +{ + int ret = scsi_ioctl_common(sdev, cmd, arg); + + if (ret != -ENOIOCTLCMD) + return ret; + + if (sdev->host->hostt->ioctl) + return sdev->host->hostt->ioctl(sdev, cmd, arg); + return -EINVAL; } EXPORT_SYMBOL(scsi_ioctl); +#ifdef CONFIG_COMPAT +int scsi_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) +{ + int ret = scsi_ioctl_common(sdev, cmd, arg); + + if (ret != -ENOIOCTLCMD) + return ret; + + if (sdev->host->hostt->compat_ioctl) + return sdev->host->hostt->compat_ioctl(sdev, cmd, arg); + + return ret; +} +EXPORT_SYMBOL(scsi_compat_ioctl); +#endif + /* * We can process a reset even when a device isn't fully operable. */ diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 3e7a45d0daca..610ee41fa54c 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -2108,6 +2108,8 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, memset(data, 0, sizeof(*data)); memset(&cmd[0], 0, 12); + + dbd = sdev->set_dbd_for_ms ? 8 : dbd; cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ cmd[2] = modepage; diff --git a/drivers/scsi/scsi_logging.h b/drivers/scsi/scsi_logging.h index 836185de28c4..3df877886119 100644 --- a/drivers/scsi/scsi_logging.h +++ b/drivers/scsi/scsi_logging.h @@ -53,7 +53,7 @@ do { \ } while (0) #else #define SCSI_LOG_LEVEL(SHIFT, BITS) 0 -#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD) +#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD) do { } while (0) #endif /* CONFIG_SCSI_LOGGING */ /* diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index ed8d9709b9b9..dfc726fa34e3 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -2093,7 +2093,12 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) "could not register session's dev\n"); goto release_ida; } - transport_register_device(&session->dev); + err = transport_register_device(&session->dev); + if (err) { + iscsi_cls_session_printk(KERN_ERR, session, + "could not register transport's dev\n"); + goto release_dev; + } spin_lock_irqsave(&sesslock, flags); list_add(&session->sess_list, &sesslist); @@ -2103,6 +2108,8 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) ISCSI_DBG_TRANS_SESSION(session, "Completed session adding\n"); return 0; +release_dev: + device_del(&session->dev); release_ida: if (session->ida_used) ida_simple_remove(&iscsi_sess_ida, session->target_id); @@ -2263,7 +2270,12 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid) "register connection's dev\n"); goto release_parent_ref; } - transport_register_device(&conn->dev); + err = transport_register_device(&conn->dev); + if (err) { + iscsi_cls_session_printk(KERN_ERR, session, "could not " + "register transport's dev\n"); + goto release_conn_ref; + } spin_lock_irqsave(&connlock, flags); list_add(&conn->conn_list, &connlist); @@ -2272,6 +2284,8 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid) ISCSI_DBG_TRANS_CONN(conn, "Completed conn creation\n"); return conn; +release_conn_ref: + put_device(&conn->dev); release_parent_ref: put_device(&session->dev); free_conn: @@ -2947,6 +2961,24 @@ iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev) return err; } +static int iscsi_session_has_conns(int sid) +{ + struct iscsi_cls_conn *conn; + unsigned long flags; + int found = 0; + + spin_lock_irqsave(&connlock, flags); + list_for_each_entry(conn, &connlist, conn_list) { + if (iscsi_conn_get_sid(conn) == sid) { + found = 1; + break; + } + } + spin_unlock_irqrestore(&connlock, flags); + + return found; +} + static int iscsi_set_iface_params(struct iscsi_transport *transport, struct iscsi_uevent *ev, uint32_t len) @@ -3524,10 +3556,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) break; case ISCSI_UEVENT_DESTROY_SESSION: session = iscsi_session_lookup(ev->u.d_session.sid); - if (session) - transport->destroy_session(session); - else + if (!session) err = -EINVAL; + else if (iscsi_session_has_conns(ev->u.d_session.sid)) + err = -EBUSY; + else + transport->destroy_session(session); break; case ISCSI_UEVENT_UNBIND_SESSION: session = iscsi_session_lookup(ev->u.d_session.sid); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 902b649fc8ef..8ca9299ffd36 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1465,13 +1465,12 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo) * Note: most ioctls are forward onto the block subsystem or further * down in the scsi subsystem. **/ -static int sd_ioctl(struct block_device *bdev, fmode_t mode, - unsigned int cmd, unsigned long arg) +static int sd_ioctl_common(struct block_device *bdev, fmode_t mode, + unsigned int cmd, void __user *p) { struct gendisk *disk = bdev->bd_disk; struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_device *sdp = sdkp->device; - void __user *p = (void __user *)arg; int error; SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, " @@ -1507,9 +1506,6 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode, break; default: error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p); - if (error != -ENOTTY) - break; - error = scsi_ioctl(sdp, cmd, p); break; } out: @@ -1691,39 +1687,31 @@ static void sd_rescan(struct device *dev) revalidate_disk(sdkp->disk); } +static int sd_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + void __user *p = (void __user *)arg; + int ret; + + ret = sd_ioctl_common(bdev, mode, cmd, p); + if (ret != -ENOTTY) + return ret; + + return scsi_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p); +} #ifdef CONFIG_COMPAT -/* - * This gets directly called from VFS. When the ioctl - * is not recognized we go back to the other translation paths. - */ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { - struct gendisk *disk = bdev->bd_disk; - struct scsi_disk *sdkp = scsi_disk(disk); - struct scsi_device *sdev = sdkp->device; void __user *p = compat_ptr(arg); - int error; - - error = scsi_verify_blk_ioctl(bdev, cmd); - if (error < 0) - return error; + int ret; - error = scsi_ioctl_block_when_processing_errors(sdev, cmd, - (mode & FMODE_NDELAY) != 0); - if (error) - return error; + ret = sd_ioctl_common(bdev, mode, cmd, p); + if (ret != -ENOTTY) + return ret; - if (is_sed_ioctl(cmd)) - return sed_ioctl(sdkp->opal_dev, cmd, p); - - /* - * Let the static ioctl translation table take care of it. - */ - if (!sdev->host->hostt->compat_ioctl) - return -ENOIOCTLCMD; - return sdev->host->hostt->compat_ioctl(sdev, cmd, p); + return scsi_compat_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p); } #endif diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index e0bd4cf17230..e4282bce5834 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -325,22 +325,21 @@ static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp, } /** - * sd_zbc_check_zones - Check the device capacity and zone sizes + * sd_zbc_check_capacity - Check the device capacity * @sdkp: Target disk + * @buf: command buffer + * @zblock: zone size in number of blocks * - * Check that the device capacity as reported by READ CAPACITY matches the - * max_lba value (plus one)of the report zones command reply. Also check that - * all zones of the device have an equal size, only allowing the last zone of - * the disk to have a smaller size (runt zone). The zone size must also be a - * power of two. + * Get the device zone size and check that the device capacity as reported + * by READ CAPACITY matches the max_lba value (plus one) of the report zones + * command reply for devices with RC_BASIS == 0. * - * Returns the zone size in number of blocks upon success or an error code - * upon failure. + * Returns 0 upon success or an error code upon failure. */ -static int sd_zbc_check_zones(struct scsi_disk *sdkp, unsigned char *buf, - u32 *zblocks) +static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf, + u32 *zblocks) { - u64 zone_blocks = 0; + u64 zone_blocks; sector_t max_lba; unsigned char *rec; int ret; @@ -363,17 +362,9 @@ static int sd_zbc_check_zones(struct scsi_disk *sdkp, unsigned char *buf, } } - /* Parse REPORT ZONES header */ + /* Get the size of the first reported zone */ rec = buf + 64; zone_blocks = get_unaligned_be64(&rec[8]); - if (!zone_blocks || !is_power_of_2(zone_blocks)) { - if (sdkp->first_scan) - sd_printk(KERN_NOTICE, sdkp, - "Devices with non power of 2 zone " - "size are not supported\n"); - return -ENODEV; - } - if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) { if (sdkp->first_scan) sd_printk(KERN_NOTICE, sdkp, @@ -405,11 +396,8 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf) if (ret) goto err; - /* - * Check zone size: only devices with a constant zone size (except - * an eventual last runt zone) that is a power of 2 are supported. - */ - ret = sd_zbc_check_zones(sdkp, buf, &zone_blocks); + /* Check the device capacity reported by report zones */ + ret = sd_zbc_check_capacity(sdkp, buf, &zone_blocks); if (ret != 0) goto err; diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 160748ad9c0f..bafeaf7b9ad8 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -405,6 +405,38 @@ sg_release(struct inode *inode, struct file *filp) return 0; } +static int get_sg_io_pack_id(int *pack_id, void __user *buf, size_t count) +{ + struct sg_header __user *old_hdr = buf; + int reply_len; + + if (count >= SZ_SG_HEADER) { + /* negative reply_len means v3 format, otherwise v1/v2 */ + if (get_user(reply_len, &old_hdr->reply_len)) + return -EFAULT; + + if (reply_len >= 0) + return get_user(*pack_id, &old_hdr->pack_id); + + if (in_compat_syscall() && + count >= sizeof(struct compat_sg_io_hdr)) { + struct compat_sg_io_hdr __user *hp = buf; + + return get_user(*pack_id, &hp->pack_id); + } + + if (count >= sizeof(struct sg_io_hdr)) { + struct sg_io_hdr __user *hp = buf; + + return get_user(*pack_id, &hp->pack_id); + } + } + + /* no valid header was passed, so ignore the pack_id */ + *pack_id = -1; + return 0; +} + static ssize_t sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) { @@ -413,8 +445,8 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) Sg_request *srp; int req_pack_id = -1; sg_io_hdr_t *hp; - struct sg_header *old_hdr = NULL; - int retval = 0; + struct sg_header *old_hdr; + int retval; /* * This could cause a response to be stranded. Close the associated @@ -429,79 +461,34 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_read: count=%d\n", (int) count)); - if (sfp->force_packid && (count >= SZ_SG_HEADER)) { - old_hdr = memdup_user(buf, SZ_SG_HEADER); - if (IS_ERR(old_hdr)) - return PTR_ERR(old_hdr); - if (old_hdr->reply_len < 0) { - if (count >= SZ_SG_IO_HDR) { - /* - * This is stupid. - * - * We're copying the whole sg_io_hdr_t from user - * space just to get the 'pack_id' field. But the - * field is at different offsets for the compat - * case, so we'll use "get_sg_io_hdr()" to copy - * the whole thing and convert it. - * - * We could do something like just calculating the - * offset based of 'in_compat_syscall()', but the - * 'compat_sg_io_hdr' definition is in the wrong - * place for that. - */ - sg_io_hdr_t *new_hdr; - new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL); - if (!new_hdr) { - retval = -ENOMEM; - goto free_old_hdr; - } - retval = get_sg_io_hdr(new_hdr, buf); - req_pack_id = new_hdr->pack_id; - kfree(new_hdr); - if (retval) { - retval = -EFAULT; - goto free_old_hdr; - } - } - } else - req_pack_id = old_hdr->pack_id; - } + if (sfp->force_packid) + retval = get_sg_io_pack_id(&req_pack_id, buf, count); + if (retval) + return retval; + srp = sg_get_rq_mark(sfp, req_pack_id); if (!srp) { /* now wait on packet to arrive */ - if (atomic_read(&sdp->detaching)) { - retval = -ENODEV; - goto free_old_hdr; - } - if (filp->f_flags & O_NONBLOCK) { - retval = -EAGAIN; - goto free_old_hdr; - } + if (atomic_read(&sdp->detaching)) + return -ENODEV; + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; retval = wait_event_interruptible(sfp->read_wait, (atomic_read(&sdp->detaching) || (srp = sg_get_rq_mark(sfp, req_pack_id)))); - if (atomic_read(&sdp->detaching)) { - retval = -ENODEV; - goto free_old_hdr; - } - if (retval) { + if (atomic_read(&sdp->detaching)) + return -ENODEV; + if (retval) /* -ERESTARTSYS as signal hit process */ - goto free_old_hdr; - } - } - if (srp->header.interface_id != '\0') { - retval = sg_new_read(sfp, buf, count, srp); - goto free_old_hdr; + return retval; } + if (srp->header.interface_id != '\0') + return sg_new_read(sfp, buf, count, srp); hp = &srp->header; - if (old_hdr == NULL) { - old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL); - if (! old_hdr) { - retval = -ENOMEM; - goto free_old_hdr; - } - } - memset(old_hdr, 0, SZ_SG_HEADER); + old_hdr = kzalloc(SZ_SG_HEADER, GFP_KERNEL); + if (!old_hdr) + return -ENOMEM; + old_hdr->reply_len = (int) hp->timeout; old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */ old_hdr->pack_id = hp->pack_id; @@ -575,7 +562,12 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) int err = 0, err2; int len; - if (count < SZ_SG_IO_HDR) { + if (in_compat_syscall()) { + if (count < sizeof(struct compat_sg_io_hdr)) { + err = -EINVAL; + goto err_out; + } + } else if (count < SZ_SG_IO_HDR) { err = -EINVAL; goto err_out; } @@ -919,19 +911,14 @@ static int put_compat_request_table(struct compat_sg_req_info __user *o, #endif static long -sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) +sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp, + unsigned int cmd_in, void __user *p) { - void __user *p = (void __user *)arg; int __user *ip = p; int result, val, read_only; - Sg_device *sdp; - Sg_fd *sfp; Sg_request *srp; unsigned long iflags; - if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) - return -ENXIO; - SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_ioctl: cmd=0x%x\n", (int) cmd_in)); read_only = (O_RDWR != (filp->f_flags & O_ACCMODE)); @@ -1154,29 +1141,44 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) cmd_in, filp->f_flags & O_NDELAY); if (result) return result; + + return -ENOIOCTLCMD; +} + +static long +sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) +{ + void __user *p = (void __user *)arg; + Sg_device *sdp; + Sg_fd *sfp; + int ret; + + if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) + return -ENXIO; + + ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p); + if (ret != -ENOIOCTLCMD) + return ret; + return scsi_ioctl(sdp->device, cmd_in, p); } #ifdef CONFIG_COMPAT static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) { + void __user *p = compat_ptr(arg); Sg_device *sdp; Sg_fd *sfp; - struct scsi_device *sdev; + int ret; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; - sdev = sdp->device; - if (sdev->host->hostt->compat_ioctl) { - int ret; - - ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg); - + ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p); + if (ret != -ENOIOCTLCMD) return ret; - } - - return -ENOIOCTLCMD; + + return scsi_compat_ioctl(sdp->device, cmd_in, p); } #endif diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 4664fdf75c0f..0fbb8fe6e521 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -38,6 +38,7 @@ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/bio.h> +#include <linux/compat.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/cdrom.h> @@ -598,6 +599,51 @@ out: return ret; } +#ifdef CONFIG_COMPAT +static int sr_block_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, + unsigned long arg) +{ + struct scsi_cd *cd = scsi_cd(bdev->bd_disk); + struct scsi_device *sdev = cd->device; + void __user *argp = compat_ptr(arg); + int ret; + + mutex_lock(&sr_mutex); + + ret = scsi_ioctl_block_when_processing_errors(sdev, cmd, + (mode & FMODE_NDELAY) != 0); + if (ret) + goto out; + + scsi_autopm_get_device(sdev); + + /* + * Send SCSI addressing ioctls directly to mid level, send other + * ioctls to cdrom/block level. + */ + switch (cmd) { + case SCSI_IOCTL_GET_IDLUN: + case SCSI_IOCTL_GET_BUS_NUMBER: + ret = scsi_compat_ioctl(sdev, cmd, argp); + goto put; + } + + ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, (unsigned long)argp); + if (ret != -ENOSYS) + goto put; + + ret = scsi_compat_ioctl(sdev, cmd, argp); + +put: + scsi_autopm_put_device(sdev); + +out: + mutex_unlock(&sr_mutex); + return ret; + +} +#endif + static unsigned int sr_block_check_events(struct gendisk *disk, unsigned int clearing) { @@ -641,12 +687,11 @@ static const struct block_device_operations sr_bdops = .open = sr_block_open, .release = sr_block_release, .ioctl = sr_block_ioctl, +#ifdef CONFIG_COMPAT + .ioctl = sr_block_compat_ioctl, +#endif .check_events = sr_block_check_events, .revalidate_disk = sr_block_revalidate_disk, - /* - * No compat_ioctl for now because sr_block_ioctl never - * seems to pass arbitrary ioctls down to host drivers. - */ }; static int sr_open(struct cdrom_device_info *cdi, int purpose) diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 9e3fff2de83e..393f3019ccac 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -3501,7 +3501,7 @@ out: /* The ioctl command */ -static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) +static long st_ioctl_common(struct file *file, unsigned int cmd_in, void __user *p) { int i, cmd_nr, cmd_type, bt; int retval = 0; @@ -3509,7 +3509,6 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) struct scsi_tape *STp = file->private_data; struct st_modedef *STm; struct st_partstat *STps; - void __user *p = (void __user *)arg; if (mutex_lock_interruptible(&STp->lock)) return -ERESTARTSYS; @@ -3824,9 +3823,19 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) } mutex_unlock(&STp->lock); switch (cmd_in) { + case SCSI_IOCTL_STOP_UNIT: + /* unload */ + retval = scsi_ioctl(STp->device, cmd_in, p); + if (!retval) { + STp->rew_at_close = 0; + STp->ready = ST_NO_TAPE; + } + return retval; + case SCSI_IOCTL_GET_IDLUN: case SCSI_IOCTL_GET_BUS_NUMBER: break; + default: if ((cmd_in == SG_IO || cmd_in == SCSI_IOCTL_SEND_COMMAND || @@ -3840,42 +3849,46 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) return i; break; } - retval = scsi_ioctl(STp->device, cmd_in, p); - if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) { /* unload */ - STp->rew_at_close = 0; - STp->ready = ST_NO_TAPE; - } - return retval; + return -ENOTTY; out: mutex_unlock(&STp->lock); return retval; } +static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) +{ + void __user *p = (void __user *)arg; + struct scsi_tape *STp = file->private_data; + int ret; + + ret = st_ioctl_common(file, cmd_in, p); + if (ret != -ENOTTY) + return ret; + + return scsi_ioctl(STp->device, cmd_in, p); +} + #ifdef CONFIG_COMPAT static long st_compat_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) { void __user *p = compat_ptr(arg); struct scsi_tape *STp = file->private_data; - struct scsi_device *sdev = STp->device; - int ret = -ENOIOCTLCMD; + int ret; /* argument conversion is handled using put_user_mtpos/put_user_mtget */ switch (cmd_in) { - case MTIOCTOP: - return st_ioctl(file, MTIOCTOP, (unsigned long)p); case MTIOCPOS32: - return st_ioctl(file, MTIOCPOS, (unsigned long)p); + return st_ioctl_common(file, MTIOCPOS, p); case MTIOCGET32: - return st_ioctl(file, MTIOCGET, (unsigned long)p); + return st_ioctl_common(file, MTIOCGET, p); } - if (sdev->host->hostt->compat_ioctl) { + ret = st_ioctl_common(file, cmd_in, p); + if (ret != -ENOTTY) + return ret; - ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg); - - } - return ret; + return scsi_compat_ioctl(STp->device, cmd_in, p); } #endif diff --git a/drivers/scsi/sym53c8xx_2/sym_nvram.c b/drivers/scsi/sym53c8xx_2/sym_nvram.c index 9dc17f1288f9..d37e2a69136a 100644 --- a/drivers/scsi/sym53c8xx_2/sym_nvram.c +++ b/drivers/scsi/sym53c8xx_2/sym_nvram.c @@ -227,7 +227,7 @@ static void sym_display_Tekram_nvram(struct sym_device *np, Tekram_nvram *nvram) /* * 24C16 EEPROM reading. * - * GPOI0 - data in/data out + * GPIO0 - data in/data out * GPIO1 - clock * Symbios NVRAM wiring now also used by Tekram. */ @@ -524,7 +524,7 @@ static int sym_read_Symbios_nvram(struct sym_device *np, Symbios_nvram *nvram) /* * 93C46 EEPROM reading. * - * GPOI0 - data in + * GPIO0 - data in * GPIO1 - data out * GPIO2 - clock * GPIO4 - chip select diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c index 6feeb0faf123..56a6a1ed5ec2 100644 --- a/drivers/scsi/ufs/cdns-pltfrm.c +++ b/drivers/scsi/ufs/cdns-pltfrm.c @@ -19,6 +19,85 @@ #define CDNS_UFS_REG_HCLKDIV 0xFC #define CDNS_UFS_REG_PHY_XCFGD1 0x113C +#define CDNS_UFS_MAX_L4_ATTRS 12 + +struct cdns_ufs_host { + /** + * cdns_ufs_dme_attr_val - for storing L4 attributes + */ + u32 cdns_ufs_dme_attr_val[CDNS_UFS_MAX_L4_ATTRS]; +}; + +/** + * cdns_ufs_get_l4_attr - get L4 attributes on local side + * @hba: per adapter instance + * + */ +static void cdns_ufs_get_l4_attr(struct ufs_hba *hba) +{ + struct cdns_ufs_host *host = ufshcd_get_variant(hba); + + ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERDEVICEID), + &host->cdns_ufs_dme_attr_val[0]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERCPORTID), + &host->cdns_ufs_dme_attr_val[1]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_TRAFFICCLASS), + &host->cdns_ufs_dme_attr_val[2]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_PROTOCOLID), + &host->cdns_ufs_dme_attr_val[3]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTFLAGS), + &host->cdns_ufs_dme_attr_val[4]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_TXTOKENVALUE), + &host->cdns_ufs_dme_attr_val[5]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_RXTOKENVALUE), + &host->cdns_ufs_dme_attr_val[6]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE), + &host->cdns_ufs_dme_attr_val[7]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE), + &host->cdns_ufs_dme_attr_val[8]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_CREDITSTOSEND), + &host->cdns_ufs_dme_attr_val[9]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTMODE), + &host->cdns_ufs_dme_attr_val[10]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), + &host->cdns_ufs_dme_attr_val[11]); +} + +/** + * cdns_ufs_set_l4_attr - set L4 attributes on local side + * @hba: per adapter instance + * + */ +static void cdns_ufs_set_l4_attr(struct ufs_hba *hba) +{ + struct cdns_ufs_host *host = ufshcd_get_variant(hba); + + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), 0); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID), + host->cdns_ufs_dme_attr_val[0]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID), + host->cdns_ufs_dme_attr_val[1]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_TRAFFICCLASS), + host->cdns_ufs_dme_attr_val[2]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PROTOCOLID), + host->cdns_ufs_dme_attr_val[3]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS), + host->cdns_ufs_dme_attr_val[4]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_TXTOKENVALUE), + host->cdns_ufs_dme_attr_val[5]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_RXTOKENVALUE), + host->cdns_ufs_dme_attr_val[6]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE), + host->cdns_ufs_dme_attr_val[7]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE), + host->cdns_ufs_dme_attr_val[8]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CREDITSTOSEND), + host->cdns_ufs_dme_attr_val[9]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTMODE), + host->cdns_ufs_dme_attr_val[10]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), + host->cdns_ufs_dme_attr_val[11]); +} /** * Sets HCLKDIV register value based on the core_clk @@ -78,6 +157,22 @@ static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba, } /** + * Called around hibern8 enter/exit. + * @hba: host controller instance + * @cmd: UIC Command + * @status: notify stage (pre, post change) + * + */ +static void cdns_ufs_hibern8_notify(struct ufs_hba *hba, enum uic_cmd_dme cmd, + enum ufs_notify_change_status status) +{ + if (status == PRE_CHANGE && cmd == UIC_CMD_DME_HIBER_ENTER) + cdns_ufs_get_l4_attr(hba); + if (status == POST_CHANGE && cmd == UIC_CMD_DME_HIBER_EXIT) + cdns_ufs_set_l4_attr(hba); +} + +/** * Called before and after Link startup is carried out. * @hba: host controller instance * @status: notify stage (pre, post change) @@ -117,6 +212,14 @@ static int cdns_ufs_link_startup_notify(struct ufs_hba *hba, static int cdns_ufs_init(struct ufs_hba *hba) { int status = 0; + struct cdns_ufs_host *host; + struct device *dev = hba->dev; + + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); + + if (!host) + return -ENOMEM; + ufshcd_set_variant(hba, host); if (hba->vops && hba->vops->phy_initialization) status = hba->vops->phy_initialization(hba); @@ -144,8 +247,10 @@ static int cdns_ufs_m31_16nm_phy_initialization(struct ufs_hba *hba) static const struct ufs_hba_variant_ops cdns_ufs_pltfm_hba_vops = { .name = "cdns-ufs-pltfm", + .init = cdns_ufs_init, .hce_enable_notify = cdns_ufs_hce_enable_notify, .link_startup_notify = cdns_ufs_link_startup_notify, + .hibern8_notify = cdns_ufs_hibern8_notify, }; static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = { @@ -154,6 +259,7 @@ static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = { .hce_enable_notify = cdns_ufs_hce_enable_notify, .link_startup_notify = cdns_ufs_link_startup_notify, .phy_initialization = cdns_ufs_m31_16nm_phy_initialization, + .hibern8_notify = cdns_ufs_hibern8_notify, }; static const struct of_device_id cdns_ufs_of_match[] = { @@ -219,6 +325,7 @@ static const struct dev_pm_ops cdns_ufs_dev_pm_ops = { static struct platform_driver cdns_ufs_pltfrm_driver = { .probe = cdns_ufs_pltfrm_probe, .remove = cdns_ufs_pltfrm_remove, + .shutdown = ufshcd_pltfrm_shutdown, .driver = { .name = "cdns-ufshcd", .pm = &cdns_ufs_dev_pm_ops, diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c index 83e28edc3ac5..53eae5fe2ade 100644 --- a/drivers/scsi/ufs/ufs-mediatek.c +++ b/drivers/scsi/ufs/ufs-mediatek.c @@ -6,16 +6,30 @@ * Peter Wang <peter.wang@mediatek.com> */ +#include <linux/arm-smccc.h> +#include <linux/bitfield.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> +#include <linux/soc/mediatek/mtk_sip_svc.h> #include "ufshcd.h" #include "ufshcd-pltfrm.h" +#include "ufs_quirks.h" #include "unipro.h" #include "ufs-mediatek.h" +#define ufs_mtk_smc(cmd, val, res) \ + arm_smccc_smc(MTK_SIP_UFS_CONTROL, \ + cmd, val, 0, 0, 0, 0, 0, &(res)) + +#define ufs_mtk_ref_clk_notify(on, res) \ + ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res) + +#define ufs_mtk_device_reset_ctrl(high, res) \ + ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res) + static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable) { u32 tmp; @@ -81,6 +95,49 @@ static int ufs_mtk_bind_mphy(struct ufs_hba *hba) return err; } +static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct arm_smccc_res res; + unsigned long timeout; + u32 value; + + if (host->ref_clk_enabled == on) + return 0; + + if (on) { + ufs_mtk_ref_clk_notify(on, res); + ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL); + } else { + ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL); + } + + /* Wait for ack */ + timeout = jiffies + msecs_to_jiffies(REFCLK_REQ_TIMEOUT_MS); + do { + value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL); + + /* Wait until ack bit equals to req bit */ + if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST)) + goto out; + + usleep_range(100, 200); + } while (time_before(jiffies, timeout)); + + dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value); + + ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res); + + return -ETIMEDOUT; + +out: + host->ref_clk_enabled = on; + if (!on) + ufs_mtk_ref_clk_notify(on, res); + + return 0; +} + /** * ufs_mtk_setup_clocks - enables/disable clocks * @hba: host controller instance @@ -105,12 +162,16 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on, switch (status) { case PRE_CHANGE: - if (!on) + if (!on) { + ufs_mtk_setup_ref_clk(hba, on); ret = phy_power_off(host->mphy); + } break; case POST_CHANGE: - if (on) + if (on) { ret = phy_power_on(host->mphy); + ufs_mtk_setup_ref_clk(hba, on); + } break; } @@ -150,6 +211,9 @@ static int ufs_mtk_init(struct ufs_hba *hba) /* Enable runtime autosuspend */ hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; + /* Enable clock-gating */ + hba->caps |= UFSHCD_CAP_CLK_GATING; + /* * ufshcd_vops_init() is invoked after * ufshcd_setup_clock(true) in ufshcd_hba_init() thus @@ -238,6 +302,23 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba) return ret; } +static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba) +{ + unsigned long flags; + u32 ah_ms; + + if (ufshcd_is_clkgating_allowed(hba)) { + if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) + ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, + hba->ahit); + else + ah_ms = 10; + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_gating.delay_ms = ah_ms + 5; + spin_unlock_irqrestore(hba->host->host_lock, flags); + } +} + static int ufs_mtk_post_link(struct ufs_hba *hba) { /* disable device LCC */ @@ -246,6 +327,15 @@ static int ufs_mtk_post_link(struct ufs_hba *hba) /* enable unipro clock gating feature */ ufs_mtk_cfg_unipro_cg(hba, true); + /* configure auto-hibern8 timer to 10ms */ + if (ufshcd_is_auto_hibern8_supported(hba)) { + ufshcd_auto_hibern8_update(hba, + FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) | + FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3)); + } + + ufs_mtk_setup_clk_gating(hba); + return 0; } @@ -269,12 +359,86 @@ static int ufs_mtk_link_startup_notify(struct ufs_hba *hba, return ret; } +static void ufs_mtk_device_reset(struct ufs_hba *hba) +{ + struct arm_smccc_res res; + + ufs_mtk_device_reset_ctrl(0, res); + + /* + * The reset signal is active low. UFS devices shall detect + * more than or equal to 1us of positive or negative RST_n + * pulse width. + * + * To be on safe side, keep the reset low for at least 10us. + */ + usleep_range(10, 15); + + ufs_mtk_device_reset_ctrl(1, res); + + /* Some devices may need time to respond to rst_n */ + usleep_range(10000, 15000); + + dev_info(hba->dev, "device reset done\n"); +} + +static int ufs_mtk_link_set_hpm(struct ufs_hba *hba) +{ + int err; + + err = ufshcd_hba_enable(hba); + if (err) + return err; + + err = ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0), + 0); + if (err) + return err; + + err = ufshcd_uic_hibern8_exit(hba); + if (!err) + ufshcd_set_link_active(hba); + else + return err; + + err = ufshcd_make_hba_operational(hba); + if (err) + return err; + + return 0; +} + +static int ufs_mtk_link_set_lpm(struct ufs_hba *hba) +{ + int err; + + err = ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0), + 1); + if (err) { + /* Resume UniPro state for following error recovery */ + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0), + 0); + return err; + } + + return 0; +} + static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) { + int err; struct ufs_mtk_host *host = ufshcd_get_variant(hba); - if (ufshcd_is_link_hibern8(hba)) + if (ufshcd_is_link_hibern8(hba)) { + err = ufs_mtk_link_set_lpm(hba); + if (err) + return -EAGAIN; phy_power_off(host->mphy); + ufs_mtk_setup_ref_clk(hba, false); + } return 0; } @@ -282,9 +446,40 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); + int err; - if (ufshcd_is_link_hibern8(hba)) + if (ufshcd_is_link_hibern8(hba)) { + ufs_mtk_setup_ref_clk(hba, true); phy_power_on(host->mphy); + err = ufs_mtk_link_set_hpm(hba); + if (err) + return err; + } + + return 0; +} + +static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba) +{ + ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl "); + + ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg "); + + ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL, + REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4, + "MPHY Ctrl "); + + /* Direct debugging information to REG_MTK_PROBE */ + ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL); + ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe "); +} + +static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba) +{ + struct ufs_dev_info *dev_info = &hba->dev_info; + + if (dev_info->wmanufacturerid == UFS_VENDOR_SAMSUNG) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6); return 0; } @@ -301,8 +496,11 @@ static struct ufs_hba_variant_ops ufs_hba_mtk_vops = { .setup_clocks = ufs_mtk_setup_clocks, .link_startup_notify = ufs_mtk_link_startup_notify, .pwr_change_notify = ufs_mtk_pwr_change_notify, + .apply_dev_quirks = ufs_mtk_apply_dev_quirks, .suspend = ufs_mtk_suspend, .resume = ufs_mtk_resume, + .dbg_register_dump = ufs_mtk_dbg_register_dump, + .device_reset = ufs_mtk_device_reset, }; /** diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h index 19f8c42fe06f..fccdd979d6fb 100644 --- a/drivers/scsi/ufs/ufs-mediatek.h +++ b/drivers/scsi/ufs/ufs-mediatek.h @@ -6,6 +6,30 @@ #ifndef _UFS_MEDIATEK_H #define _UFS_MEDIATEK_H +#include <linux/bitops.h> +#include <linux/soc/mediatek/mtk_sip_svc.h> + +/* + * Vendor specific UFSHCI Registers + */ +#define REG_UFS_REFCLK_CTRL 0x144 +#define REG_UFS_EXTREG 0x2100 +#define REG_UFS_MPHYCTRL 0x2200 +#define REG_UFS_REJECT_MON 0x22AC +#define REG_UFS_DEBUG_SEL 0x22C0 +#define REG_UFS_PROBE 0x22C8 + +/* + * Ref-clk control + * + * Values for register REG_UFS_REFCLK_CTRL + */ +#define REFCLK_RELEASE 0x0 +#define REFCLK_REQUEST BIT(0) +#define REFCLK_ACK BIT(1) + +#define REFCLK_REQ_TIMEOUT_MS 3 + /* * Vendor specific pre-defined parameters */ @@ -30,6 +54,13 @@ #define VS_UNIPROPOWERDOWNCONTROL 0xD0A8 /* + * SiP commands + */ +#define MTK_SIP_UFS_CONTROL MTK_SIP_SMC_CMD(0x276) +#define UFS_MTK_SIP_DEVICE_RESET BIT(1) +#define UFS_MTK_SIP_REF_CLK_NOTIFICATION BIT(3) + +/* * VS_DEBUGCLOCKENABLE */ enum { @@ -48,6 +79,7 @@ enum { struct ufs_mtk_host { struct ufs_hba *hba; struct phy *mphy; + bool ref_clk_enabled; }; #endif /* !_UFS_MEDIATEK_H */ diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c index ad2abc96c0f1..dbdf8b01abed 100644 --- a/drivers/scsi/ufs/ufs-sysfs.c +++ b/drivers/scsi/ufs/ufs-sysfs.c @@ -118,26 +118,6 @@ static ssize_t spm_target_link_state_show(struct device *dev, ufs_pm_lvl_states[hba->spm_lvl].link_state)); } -static void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit) -{ - unsigned long flags; - - if (!ufshcd_is_auto_hibern8_supported(hba)) - return; - - spin_lock_irqsave(hba->host->host_lock, flags); - if (hba->ahit != ahit) - hba->ahit = ahit; - spin_unlock_irqrestore(hba->host->host_lock, flags); - if (!pm_runtime_suspended(hba->dev)) { - pm_runtime_get_sync(hba->dev); - ufshcd_hold(hba, false); - ufshcd_auto_hibern8_enable(hba); - ufshcd_release(hba); - pm_runtime_put(hba->dev); - } -} - /* Convert Auto-Hibernate Idle Timer register value to microseconds */ static int ufshcd_ahit_to_us(u32 ahit) { @@ -733,7 +713,7 @@ static ssize_t _pname##_show(struct device *dev, \ struct scsi_device *sdev = to_scsi_device(dev); \ struct ufs_hba *hba = shost_priv(sdev->host); \ u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); \ - if (!ufs_is_valid_unit_desc_lun(lun)) \ + if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) \ return -EINVAL; \ return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \ lun, _duname##_DESC_PARAM##_puname, buf, _size); \ diff --git a/drivers/scsi/ufs/ufs-sysfs.h b/drivers/scsi/ufs/ufs-sysfs.h index e5621e59a432..0f4e750a6748 100644 --- a/drivers/scsi/ufs/ufs-sysfs.h +++ b/drivers/scsi/ufs/ufs-sysfs.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Copyright (C) 2018 Western Digital Corporation +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018 Western Digital Corporation */ #ifndef __UFS_SYSFS_H__ diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 3327981ef894..dde2eb02f76f 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -63,7 +63,6 @@ #define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F #define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID) #define UFS_UPIU_WLUN_ID (1 << 7) -#define UFS_UPIU_MAX_GENERAL_LUN 8 /* Well known logical unit id in LUN field of UPIU */ enum { @@ -499,9 +498,9 @@ struct ufs_query_res { #define UFS_VREG_VCC_MAX_UV 3600000 /* uV */ #define UFS_VREG_VCC_1P8_MIN_UV 1700000 /* uV */ #define UFS_VREG_VCC_1P8_MAX_UV 1950000 /* uV */ -#define UFS_VREG_VCCQ_MIN_UV 1100000 /* uV */ -#define UFS_VREG_VCCQ_MAX_UV 1300000 /* uV */ -#define UFS_VREG_VCCQ2_MIN_UV 1650000 /* uV */ +#define UFS_VREG_VCCQ_MIN_UV 1140000 /* uV */ +#define UFS_VREG_VCCQ_MAX_UV 1260000 /* uV */ +#define UFS_VREG_VCCQ2_MIN_UV 1700000 /* uV */ #define UFS_VREG_VCCQ2_MAX_UV 1950000 /* uV */ /* @@ -530,28 +529,28 @@ struct ufs_dev_info { bool f_power_on_wp_en; /* Keeps information if any of the LU is power on write protected */ bool is_lu_power_on_wp; -}; - -#define MAX_MODEL_LEN 16 -/** - * ufs_dev_desc - ufs device details from the device descriptor - * - * @wmanufacturerid: card details - * @model: card model - */ -struct ufs_dev_desc { + /* Maximum number of general LU supported by the UFS device */ + u8 max_lu_supported; u16 wmanufacturerid; + /*UFS device Product Name */ u8 *model; }; /** * ufs_is_valid_unit_desc_lun - checks if the given LUN has a unit descriptor + * @dev_info: pointer of instance of struct ufs_dev_info * @lun: LU number to check * @return: true if the lun has a matching unit descriptor, false otherwise */ -static inline bool ufs_is_valid_unit_desc_lun(u8 lun) +static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info, + u8 lun) { - return lun == UFS_UPIU_RPMB_WLUN || (lun < UFS_UPIU_MAX_GENERAL_LUN); + if (!dev_info || !dev_info->max_lu_supported) { + pr_err("Max General LU supported by UFS isn't initilized\n"); + return false; + } + + return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported); } #endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h index fe6cad9b2a0d..d0ab147f98d3 100644 --- a/drivers/scsi/ufs/ufs_quirks.h +++ b/drivers/scsi/ufs/ufs_quirks.h @@ -22,16 +22,17 @@ * @quirk: device quirk */ struct ufs_dev_fix { - struct ufs_dev_desc card; + u16 wmanufacturerid; + u8 *model; unsigned int quirk; }; -#define END_FIX { { 0 }, 0 } +#define END_FIX { } /* add specific device quirk */ #define UFS_FIX(_vendor, _model, _quirk) { \ - .card.wmanufacturerid = (_vendor),\ - .card.model = (_model), \ + .wmanufacturerid = (_vendor),\ + .model = (_model), \ .quirk = (_quirk), \ } diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index b5966faf3e98..abd0e6b05f79 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -246,11 +246,10 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba); static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd); static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); static void ufshcd_hba_exit(struct ufs_hba *hba); -static int ufshcd_probe_hba(struct ufs_hba *hba); +static int ufshcd_probe_hba(struct ufs_hba *hba, bool async); static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, bool skip_ref_clk); static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); -static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); @@ -266,26 +265,18 @@ static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag) return tag >= 0 && tag < hba->nutrs; } -static inline int ufshcd_enable_irq(struct ufs_hba *hba) +static inline void ufshcd_enable_irq(struct ufs_hba *hba) { - int ret = 0; - if (!hba->is_irq_enabled) { - ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, - hba); - if (ret) - dev_err(hba->dev, "%s: request_irq failed, ret=%d\n", - __func__, ret); + enable_irq(hba->irq); hba->is_irq_enabled = true; } - - return ret; } static inline void ufshcd_disable_irq(struct ufs_hba *hba) { if (hba->is_irq_enabled) { - free_irq(hba->irq, hba); + disable_irq(hba->irq); hba->is_irq_enabled = false; } } @@ -335,27 +326,27 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, u8 opcode = 0; u32 intr, doorbell; struct ufshcd_lrb *lrbp = &hba->lrb[tag]; + struct scsi_cmnd *cmd = lrbp->cmd; int transfer_len = -1; if (!trace_ufshcd_command_enabled()) { /* trace UPIU W/O tracing command */ - if (lrbp->cmd) + if (cmd) ufshcd_add_cmd_upiu_trace(hba, tag, str); return; } - if (lrbp->cmd) { /* data phase exists */ + if (cmd) { /* data phase exists */ /* trace UPIU also */ ufshcd_add_cmd_upiu_trace(hba, tag, str); - opcode = (u8)(*lrbp->cmd->cmnd); + opcode = cmd->cmnd[0]; if ((opcode == READ_10) || (opcode == WRITE_10)) { /* * Currently we only fully trace read(10) and write(10) * commands */ - if (lrbp->cmd->request && lrbp->cmd->request->bio) - lba = - lrbp->cmd->request->bio->bi_iter.bi_sector; + if (cmd->request && cmd->request->bio) + lba = cmd->request->bio->bi_iter.bi_sector; transfer_len = be32_to_cpu( lrbp->ucd_req_ptr->sc.exp_data_transfer_len); } @@ -393,7 +384,7 @@ static void ufshcd_print_err_hist(struct ufs_hba *hba, for (i = 0; i < UFS_ERR_REG_HIST_LENGTH; i++) { int p = (i + err_hist->pos) % UFS_ERR_REG_HIST_LENGTH; - if (err_hist->reg[p] == 0) + if (err_hist->tstamp[p] == 0) continue; dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p, err_hist->reg[p], ktime_to_us(err_hist->tstamp[p])); @@ -401,7 +392,7 @@ static void ufshcd_print_err_hist(struct ufs_hba *hba, } if (!found) - dev_err(hba->dev, "No record of %s errors\n", err_name); + dev_err(hba->dev, "No record of %s\n", err_name); } static void ufshcd_print_host_regs(struct ufs_hba *hba) @@ -436,8 +427,7 @@ static void ufshcd_print_host_regs(struct ufs_hba *hba) ufshcd_print_clk_freqs(hba); - if (hba->vops && hba->vops->dbg_register_dump) - hba->vops->dbg_register_dump(hba); + ufshcd_vops_dbg_register_dump(hba); } static @@ -497,8 +487,8 @@ static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap) static void ufshcd_print_host_state(struct ufs_hba *hba) { dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); - dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n", - hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks); + dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n", + hba->outstanding_reqs, hba->outstanding_tasks); dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n", hba->saved_err, hba->saved_uic_err); dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n", @@ -646,40 +636,6 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) } /** - * ufshcd_get_tm_free_slot - get a free slot for task management request - * @hba: per adapter instance - * @free_slot: pointer to variable with available slot value - * - * Get a free tag and lock it until ufshcd_put_tm_slot() is called. - * Returns 0 if free slot is not available, else return 1 with tag value - * in @free_slot. - */ -static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot) -{ - int tag; - bool ret = false; - - if (!free_slot) - goto out; - - do { - tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs); - if (tag >= hba->nutmrs) - goto out; - } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use)); - - *free_slot = tag; - ret = true; -out: - return ret; -} - -static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot) -{ - clear_bit_unlock(slot, &hba->tm_slots_in_use); -} - -/** * ufshcd_utrl_clear - Clear a bit in UTRLCLR register * @hba: per adapter instance * @pos: position of the bit to be cleared @@ -1273,6 +1229,24 @@ out: return ret; } +static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved) +{ + int *busy = priv; + + WARN_ON_ONCE(reserved); + (*busy)++; + return false; +} + +/* Whether or not any tag is in use by a request that is in progress. */ +static bool ufshcd_any_tag_in_use(struct ufs_hba *hba) +{ + struct request_queue *q = hba->cmd_queue; + int busy = 0; + + blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy); + return busy; +} static int ufshcd_devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat) @@ -1490,6 +1464,8 @@ static void ufshcd_ungate_work(struct work_struct *work) spin_unlock_irqrestore(hba->host->host_lock, flags); ufshcd_setup_clocks(hba, true); + ufshcd_enable_irq(hba); + /* Exit from hibern8 */ if (ufshcd_can_hibern8_during_gating(hba)) { /* Prevent gating in this path */ @@ -1619,7 +1595,7 @@ static void ufshcd_gate_work(struct work_struct *work) if (hba->clk_gating.active_reqs || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL - || hba->lrb_in_use || hba->outstanding_tasks + || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks || hba->active_uic_cmd || hba->uic_async_done) goto rel_lock; @@ -1636,6 +1612,8 @@ static void ufshcd_gate_work(struct work_struct *work) ufshcd_set_link_hibern8(hba); } + ufshcd_disable_irq(hba); + if (!ufshcd_is_link_active(hba)) ufshcd_setup_clocks(hba, false); else @@ -1673,7 +1651,7 @@ static void __ufshcd_release(struct ufs_hba *hba) if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL - || hba->lrb_in_use || hba->outstanding_tasks + || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks || hba->active_uic_cmd || hba->uic_async_done || ufshcd_eh_in_progress(hba)) return; @@ -1881,12 +1859,12 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) { hba->lrb[task_tag].issue_time_stamp = ktime_get(); hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0); + ufshcd_add_command_trace(hba, task_tag, "send"); ufshcd_clk_scaling_start_busy(hba); __set_bit(task_tag, &hba->outstanding_reqs); ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); /* Make sure that doorbell is committed immediately */ wmb(); - ufshcd_add_command_trace(hba, task_tag, "send"); } /** @@ -2239,6 +2217,7 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, static void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags) { + struct scsi_cmnd *cmd = lrbp->cmd; struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; unsigned short cdb_len; @@ -2252,12 +2231,11 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags) /* Total EHS length and Data segment length will be zero */ ucd_req_ptr->header.dword_2 = 0; - ucd_req_ptr->sc.exp_data_transfer_len = - cpu_to_be32(lrbp->cmd->sdb.length); + ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length); - cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, UFS_CDB_SIZE); + cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE); memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE); - memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len); + memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len); memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); } @@ -2443,22 +2421,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) hba->req_abort_count = 0; - /* acquire the tag to make sure device cmds don't use it */ - if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) { - /* - * Dev manage command in progress, requeue the command. - * Requeuing the command helps in cases where the request *may* - * find different tag instead of waiting for dev manage command - * completion. - */ - err = SCSI_MLQUEUE_HOST_BUSY; - goto out; - } - err = ufshcd_hold(hba, true); if (err) { err = SCSI_MLQUEUE_HOST_BUSY; - clear_bit_unlock(tag, &hba->lrb_in_use); goto out; } WARN_ON(hba->clk_gating.state != CLKS_ON); @@ -2479,7 +2444,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) err = ufshcd_map_sg(hba, lrbp); if (err) { lrbp->cmd = NULL; - clear_bit_unlock(tag, &hba->lrb_in_use); + ufshcd_release(hba); goto out; } /* Make sure descriptors are ready before ringing the doorbell */ @@ -2627,44 +2592,6 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, } /** - * ufshcd_get_dev_cmd_tag - Get device management command tag - * @hba: per-adapter instance - * @tag_out: pointer to variable with available slot value - * - * Get a free slot and lock it until device management command - * completes. - * - * Returns false if free slot is unavailable for locking, else - * return true with tag value in @tag. - */ -static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out) -{ - int tag; - bool ret = false; - unsigned long tmp; - - if (!tag_out) - goto out; - - do { - tmp = ~hba->lrb_in_use; - tag = find_last_bit(&tmp, hba->nutrs); - if (tag >= hba->nutrs) - goto out; - } while (test_and_set_bit_lock(tag, &hba->lrb_in_use)); - - *tag_out = tag; - ret = true; -out: - return ret; -} - -static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag) -{ - clear_bit_unlock(tag, &hba->lrb_in_use); -} - -/** * ufshcd_exec_dev_cmd - API for sending device management requests * @hba: UFS hba * @cmd_type: specifies the type (NOP, Query...) @@ -2676,6 +2603,8 @@ static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag) static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type, int timeout) { + struct request_queue *q = hba->cmd_queue; + struct request *req; struct ufshcd_lrb *lrbp; int err; int tag; @@ -2689,7 +2618,13 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, * Even though we use wait_event() which sleeps indefinitely, * the maximum wait time is bounded by SCSI request timeout. */ - wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag)); + req = blk_get_request(q, REQ_OP_DRV_OUT, 0); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto out_unlock; + } + tag = req->tag; + WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag)); init_completion(&wait); lrbp = &hba->lrb[tag]; @@ -2714,8 +2649,8 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, err ? "query_complete_err" : "query_complete"); out_put_tag: - ufshcd_put_dev_cmd_tag(hba, tag); - wake_up(&hba->dev_cmd.tag_wq); + blk_put_request(req); +out_unlock: up_read(&hba->clk_scaling_lock); return err; } @@ -2918,7 +2853,7 @@ static int ufshcd_query_attr_retry(struct ufs_hba *hba, int ret = 0; u32 retries; - for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { + for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { ret = ufshcd_query_attr(hba, opcode, idn, index, selector, attr_val); if (ret) @@ -3210,17 +3145,6 @@ static inline int ufshcd_read_desc(struct ufs_hba *hba, return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); } -static inline int ufshcd_read_power_desc(struct ufs_hba *hba, - u8 *buf, - u32 size) -{ - return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size); -} - -static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) -{ - return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size); -} /** * struct uc_string_id - unicode string @@ -3345,7 +3269,7 @@ static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, * Unit descriptors are only available for general purpose LUs (LUN id * from 0 to 7) and RPMB Well known LU. */ - if (!ufs_is_valid_unit_desc_lun(lun)) + if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) return -EOPNOTSUPP; return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, @@ -3929,7 +3853,7 @@ out: return ret; } -static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) +int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) { struct uic_command uic_cmd = {0}; int ret; @@ -3955,6 +3879,25 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) return ret; } +EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit); + +void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit) +{ + unsigned long flags; + + if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT)) + return; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->ahit == ahit) + goto out_unlock; + hba->ahit = ahit; + if (!pm_runtime_suspended(hba->dev)) + ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); +out_unlock: + spin_unlock_irqrestore(hba->host->host_lock, flags); +} +EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update); void ufshcd_auto_hibern8_enable(struct ufs_hba *hba) { @@ -4095,6 +4038,26 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba, ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), pwr_mode->hs_rate); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), + DL_FC0ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), + DL_TC0ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), + DL_AFC0ReqTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3), + DL_FC1ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4), + DL_TC1ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5), + DL_AFC1ReqTimeOutVal_Default); + + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal), + DL_FC0ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal), + DL_TC0ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal), + DL_AFC0ReqTimeOutVal_Default); + ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 | pwr_mode->pwr_tx); @@ -4188,7 +4151,7 @@ out: * * Returns 0 on success, non-zero value on failure */ -static int ufshcd_make_hba_operational(struct ufs_hba *hba) +int ufshcd_make_hba_operational(struct ufs_hba *hba) { int err = 0; u32 reg; @@ -4234,6 +4197,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba) out: return err; } +EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational); /** * ufshcd_hba_stop - Send controller to reset state @@ -4311,7 +4275,7 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba) return 0; } -static int ufshcd_hba_enable(struct ufs_hba *hba) +int ufshcd_hba_enable(struct ufs_hba *hba) { int ret; @@ -4336,6 +4300,8 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) return ret; } +EXPORT_SYMBOL_GPL(ufshcd_hba_enable); + static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) { int tx_lanes, i, err = 0; @@ -4372,13 +4338,14 @@ static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) return ufshcd_disable_tx_lcc(hba, true); } -static void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist, - u32 reg) +void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist, + u32 reg) { reg_hist->reg[reg_hist->pos] = reg; reg_hist->tstamp[reg_hist->pos] = ktime_get(); reg_hist->pos = (reg_hist->pos + 1) % UFS_ERR_REG_HIST_LENGTH; } +EXPORT_SYMBOL_GPL(ufshcd_update_reg_hist); /** * ufshcd_link_startup - Initialize unipro link startup @@ -4561,7 +4528,7 @@ static int ufshcd_get_lu_wp(struct ufs_hba *hba, * protected so skip reading bLUWriteProtect parameter for * it. For other W-LUs, UNIT DESCRIPTOR is not available. */ - else if (lun >= UFS_UPIU_MAX_GENERAL_LUN) + else if (lun >= hba->dev_info.max_lu_supported) ret = -ENOTSUPP; else ret = ufshcd_read_unit_desc_param(hba, @@ -4608,6 +4575,9 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev) /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */ sdev->use_10_for_ms = 1; + /* DBD field should be set to 1 in mode sense(10) */ + sdev->set_dbd_for_ms = 1; + /* allow SCSI layer to restart the device in case of errors */ sdev->allow_restart = 1; @@ -4799,7 +4769,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) break; } /* end of switch */ - if (host_byte(result) != DID_OK) + if ((host_byte(result) != DID_OK) && !hba->silence_err_logs) ufshcd_print_trs(hba, 1 << lrbp->task_tag, true); return result; } @@ -4856,12 +4826,13 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, cmd->result = result; /* Mark completed command as NULL in LRB */ lrbp->cmd = NULL; - clear_bit_unlock(index, &hba->lrb_in_use); + lrbp->compl_time_stamp = ktime_get(); /* Do not touch lrbp after scsi done */ cmd->scsi_done(cmd); __ufshcd_release(hba); } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE || lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) { + lrbp->compl_time_stamp = ktime_get(); if (hba->dev_cmd.complete) { ufshcd_add_command_trace(hba, index, "dev_complete"); @@ -4870,17 +4841,12 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, } if (ufshcd_is_clkscaling_supported(hba)) hba->clk_scaling.active_reqs--; - - lrbp->compl_time_stamp = ktime_get(); } /* clear corresponding bits of completed commands */ hba->outstanding_reqs ^= completed_reqs; ufshcd_clk_scaling_update_busy(hba); - - /* we might have free'd some tags above */ - wake_up(&hba->dev_cmd.tag_wq); } /** @@ -5053,6 +5019,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) hba->auto_bkops_enabled = false; trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled"); + hba->is_urgent_bkops_lvl_checked = false; out: return err; } @@ -5077,6 +5044,7 @@ static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS; ufshcd_disable_auto_bkops(hba); } + hba->is_urgent_bkops_lvl_checked = false; } static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) @@ -5123,6 +5091,7 @@ static int ufshcd_bkops_ctrl(struct ufs_hba *hba, err = ufshcd_enable_auto_bkops(hba); else err = ufshcd_disable_auto_bkops(hba); + hba->urgent_bkops_lvl = curr_status; out: return err; } @@ -5200,7 +5169,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work) hba = container_of(work, struct ufs_hba, eeh_work); pm_runtime_get_sync(hba->dev); - scsi_block_requests(hba->host); + ufshcd_scsi_block_requests(hba); err = ufshcd_get_ee_status(hba, &status); if (err) { dev_err(hba->dev, "%s: failed to get exception status %d\n", @@ -5214,7 +5183,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work) ufshcd_bkops_exception_event_handler(hba); out: - scsi_unblock_requests(hba->host); + ufshcd_scsi_unblock_requests(hba); pm_runtime_put_sync(hba->dev); return; } @@ -5348,8 +5317,8 @@ static void ufshcd_err_handler(struct work_struct *work) /* * if host reset is required then skip clearing the pending - * transfers forcefully because they will automatically get - * cleared after link startup. + * transfers forcefully because they will get cleared during + * host reset and restore */ if (needs_reset) goto skip_pending_xfer_clear; @@ -5603,6 +5572,27 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba) return retval; } +struct ctm_info { + struct ufs_hba *hba; + unsigned long pending; + unsigned int ncpl; +}; + +static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved) +{ + struct ctm_info *const ci = priv; + struct completion *c; + + WARN_ON_ONCE(reserved); + if (test_bit(req->tag, &ci->pending)) + return true; + ci->ncpl++; + c = req->end_io_data; + if (c) + complete(c); + return true; +} + /** * ufshcd_tmc_handler - handle task management function completion * @hba: per adapter instance @@ -5613,16 +5603,14 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba) */ static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba) { - u32 tm_doorbell; + struct request_queue *q = hba->tmf_queue; + struct ctm_info ci = { + .hba = hba, + .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL), + }; - tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); - hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; - if (hba->tm_condition) { - wake_up(&hba->tm_wq); - return IRQ_HANDLED; - } else { - return IRQ_NONE; - } + blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci); + return ci.ncpl ? IRQ_HANDLED : IRQ_NONE; } /** @@ -5728,7 +5716,10 @@ out: static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, struct utp_task_req_desc *treq, u8 tm_function) { + struct request_queue *q = hba->tmf_queue; struct Scsi_Host *host = hba->host; + DECLARE_COMPLETION_ONSTACK(wait); + struct request *req; unsigned long flags; int free_slot, task_tag, err; @@ -5737,7 +5728,10 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, * Even though we use wait_event() which sleeps indefinitely, * the maximum wait time is bounded by %TM_CMD_TIMEOUT. */ - wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); + req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED); + req->end_io_data = &wait; + free_slot = req->tag; + WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs); ufshcd_hold(hba, false); spin_lock_irqsave(host->host_lock, flags); @@ -5763,10 +5757,14 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send"); /* wait until the task management command is completed */ - err = wait_event_timeout(hba->tm_wq, - test_bit(free_slot, &hba->tm_condition), + err = wait_for_completion_io_timeout(&wait, msecs_to_jiffies(TM_CMD_TIMEOUT)); if (!err) { + /* + * Make sure that ufshcd_compl_tm() does not trigger a + * use-after-free. + */ + req->end_io_data = NULL; ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err"); dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", __func__, tm_function); @@ -5785,9 +5783,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, __clear_bit(free_slot, &hba->outstanding_tasks); spin_unlock_irqrestore(hba->host->host_lock, flags); - clear_bit(free_slot, &hba->tm_condition); - ufshcd_put_tm_slot(hba, free_slot); - wake_up(&hba->tm_tag_wq); + blk_put_request(req); ufshcd_release(hba); return err; @@ -5863,6 +5859,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type, enum query_opcode desc_op) { + struct request_queue *q = hba->cmd_queue; + struct request *req; struct ufshcd_lrb *lrbp; int err = 0; int tag; @@ -5872,7 +5870,13 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, down_read(&hba->clk_scaling_lock); - wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag)); + req = blk_get_request(q, REQ_OP_DRV_OUT, 0); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto out_unlock; + } + tag = req->tag; + WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag)); init_completion(&wait); lrbp = &hba->lrb[tag]; @@ -5948,8 +5952,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, } } - ufshcd_put_dev_cmd_tag(hba, tag); - wake_up(&hba->dev_cmd.tag_wq); + blk_put_request(req); +out_unlock: up_read(&hba->clk_scaling_lock); return err; } @@ -6244,9 +6248,6 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) hba->lrb[tag].cmd = NULL; spin_unlock_irqrestore(host->host_lock, flags); - clear_bit_unlock(tag, &hba->lrb_in_use); - wake_up(&hba->dev_cmd.tag_wq); - out: if (!err) { err = SUCCESS; @@ -6279,9 +6280,15 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) int err; unsigned long flags; - /* Reset the host controller */ + /* + * Stop the host controller and complete the requests + * cleared by h/w + */ spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_hba_stop(hba, false); + hba->silence_err_logs = true; + ufshcd_complete_requests(hba); + hba->silence_err_logs = false; spin_unlock_irqrestore(hba->host->host_lock, flags); /* scale up clocks to max frequency before full reinitialization */ @@ -6292,7 +6299,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) goto out; /* Establish the link again and restore the device */ - err = ufshcd_probe_hba(hba); + err = ufshcd_probe_hba(hba, false); if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) err = -EIO; @@ -6315,7 +6322,6 @@ out: static int ufshcd_reset_and_restore(struct ufs_hba *hba) { int err = 0; - unsigned long flags; int retries = MAX_HOST_RESET_RETRIES; do { @@ -6325,15 +6331,6 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba) err = ufshcd_host_reset_and_restore(hba); } while (err && --retries); - /* - * After reset the door-bell might be cleared, complete - * outstanding requests in s/w here. - */ - spin_lock_irqsave(hba->host->host_lock, flags); - ufshcd_transfer_req_compl(hba); - ufshcd_tmc_handler(hba); - spin_unlock_irqrestore(hba->host->host_lock, flags); - return err; } @@ -6488,7 +6485,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba) if (!desc_buf) return; - ret = ufshcd_read_power_desc(hba, desc_buf, buff_len); + ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, + desc_buf, buff_len); if (ret) { dev_err(hba->dev, "%s: Failed reading power descriptor.len = %d ret = %d", @@ -6578,16 +6576,13 @@ out: return ret; } -static int ufs_get_device_desc(struct ufs_hba *hba, - struct ufs_dev_desc *dev_desc) +static int ufs_get_device_desc(struct ufs_hba *hba) { int err; size_t buff_len; u8 model_index; u8 *desc_buf; - - if (!dev_desc) - return -EINVAL; + struct ufs_dev_info *dev_info = &hba->dev_info; buff_len = max_t(size_t, hba->desc_size.dev_desc, QUERY_DESC_MAX_SIZE + 1); @@ -6597,7 +6592,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba, goto out; } - err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc); + err = ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, desc_buf, + hba->desc_size.dev_desc); if (err) { dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", __func__, err); @@ -6608,12 +6604,12 @@ static int ufs_get_device_desc(struct ufs_hba *hba, * getting vendor (manufacturerID) and Bank Index in big endian * format */ - dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | + dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; err = ufshcd_read_string_desc(hba, model_index, - &dev_desc->model, SD_ASCII_STD); + &dev_info->model, SD_ASCII_STD); if (err < 0) { dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", __func__, err); @@ -6631,23 +6627,25 @@ out: return err; } -static void ufs_put_device_desc(struct ufs_dev_desc *dev_desc) +static void ufs_put_device_desc(struct ufs_hba *hba) { - kfree(dev_desc->model); - dev_desc->model = NULL; + struct ufs_dev_info *dev_info = &hba->dev_info; + + kfree(dev_info->model); + dev_info->model = NULL; } -static void ufs_fixup_device_setup(struct ufs_hba *hba, - struct ufs_dev_desc *dev_desc) +static void ufs_fixup_device_setup(struct ufs_hba *hba) { struct ufs_dev_fix *f; + struct ufs_dev_info *dev_info = &hba->dev_info; for (f = ufs_fixups; f->quirk; f++) { - if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid || - f->card.wmanufacturerid == UFS_ANY_VENDOR) && - ((dev_desc->model && - STR_PRFX_EQUAL(f->card.model, dev_desc->model)) || - !strcmp(f->card.model, UFS_ANY_MODEL))) + if ((f->wmanufacturerid == dev_info->wmanufacturerid || + f->wmanufacturerid == UFS_ANY_VENDOR) && + ((dev_info->model && + STR_PRFX_EQUAL(f->model, dev_info->model)) || + !strcmp(f->model, UFS_ANY_MODEL))) hba->dev_quirks |= f->quirk; } } @@ -6863,6 +6861,37 @@ static void ufshcd_init_desc_sizes(struct ufs_hba *hba) hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; } +static int ufshcd_device_geo_params_init(struct ufs_hba *hba) +{ + int err; + size_t buff_len; + u8 *desc_buf; + + buff_len = hba->desc_size.geom_desc; + desc_buf = kmalloc(buff_len, GFP_KERNEL); + if (!desc_buf) { + err = -ENOMEM; + goto out; + } + + err = ufshcd_read_desc(hba, QUERY_DESC_IDN_GEOMETRY, 0, + desc_buf, buff_len); + if (err) { + dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n", + __func__, err); + goto out; + } + + if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1) + hba->dev_info.max_lu_supported = 32; + else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0) + hba->dev_info.max_lu_supported = 8; + +out: + kfree(desc_buf); + return err; +} + static struct ufs_ref_clk ufs_ref_clk_freqs[] = { {19200000, REF_CLK_FREQ_19_2_MHZ}, {26000000, REF_CLK_FREQ_26_MHZ}, @@ -6931,15 +6960,92 @@ out: return err; } +static int ufshcd_device_params_init(struct ufs_hba *hba) +{ + bool flag; + int ret; + + /* Clear any previous UFS device information */ + memset(&hba->dev_info, 0, sizeof(hba->dev_info)); + + /* Init check for device descriptor sizes */ + ufshcd_init_desc_sizes(hba); + + /* Init UFS geometry descriptor related parameters */ + ret = ufshcd_device_geo_params_init(hba); + if (ret) + goto out; + + /* Check and apply UFS device quirks */ + ret = ufs_get_device_desc(hba); + if (ret) { + dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", + __func__, ret); + goto out; + } + + ufs_fixup_device_setup(hba); + + if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_PWR_ON_WPE, &flag)) + hba->dev_info.f_power_on_wp_en = flag; + + /* Probe maximum power mode co-supported by both UFS host and device */ + if (ufshcd_get_max_pwr_mode(hba)) + dev_err(hba->dev, + "%s: Failed getting max supported power mode\n", + __func__); +out: + return ret; +} + +/** + * ufshcd_add_lus - probe and add UFS logical units + * @hba: per-adapter instance + */ +static int ufshcd_add_lus(struct ufs_hba *hba) +{ + int ret; + + ufshcd_init_icc_levels(hba); + + /* Add required well known logical units to scsi mid layer */ + ret = ufshcd_scsi_add_wlus(hba); + if (ret) + goto out; + + /* Initialize devfreq after UFS device is detected */ + if (ufshcd_is_clkscaling_supported(hba)) { + memcpy(&hba->clk_scaling.saved_pwr_info.info, + &hba->pwr_info, + sizeof(struct ufs_pa_layer_attr)); + hba->clk_scaling.saved_pwr_info.is_valid = true; + if (!hba->devfreq) { + ret = ufshcd_devfreq_init(hba); + if (ret) + goto out; + } + + hba->clk_scaling.is_allowed = true; + } + + ufs_bsg_probe(hba); + scsi_scan_host(hba->host); + pm_runtime_put_sync(hba->dev); + +out: + return ret; +} + /** * ufshcd_probe_hba - probe hba to detect device and initialize * @hba: per-adapter instance + * @async: asynchronous execution or not * * Execute link-startup and verify device initialization */ -static int ufshcd_probe_hba(struct ufs_hba *hba) +static int ufshcd_probe_hba(struct ufs_hba *hba, bool async) { - struct ufs_dev_desc card = {0}; int ret; ktime_t start = ktime_get(); @@ -6957,27 +7063,26 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) /* UniPro link is active now */ ufshcd_set_link_active(hba); + /* Verify device initialization by sending NOP OUT UPIU */ ret = ufshcd_verify_dev_init(hba); if (ret) goto out; + /* Initiate UFS initialization, and waiting until completion */ ret = ufshcd_complete_dev_init(hba); if (ret) goto out; - /* Init check for device descriptor sizes */ - ufshcd_init_desc_sizes(hba); - - ret = ufs_get_device_desc(hba, &card); - if (ret) { - dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", - __func__, ret); - goto out; + /* + * Initialize UFS device parameters used by driver, these + * parameters are associated with UFS descriptors. + */ + if (async) { + ret = ufshcd_device_params_init(hba); + if (ret) + goto out; } - ufs_fixup_device_setup(hba, &card); - ufs_put_device_desc(&card); - ufshcd_tune_unipro_params(hba); /* UFS device is also active now */ @@ -6985,11 +7090,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) ufshcd_force_reset_auto_bkops(hba); hba->wlun_dev_clr_ua = true; - if (ufshcd_get_max_pwr_mode(hba)) { - dev_err(hba->dev, - "%s: Failed getting max supported power mode\n", - __func__); - } else { + /* Gear up to HS gear if supported */ + if (hba->max_pwr_info.is_valid) { /* * Set the right value to bRefClkFreq before attempting to * switch to HS gears. @@ -7010,59 +7112,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) /* Enable Auto-Hibernate if configured */ ufshcd_auto_hibern8_enable(hba); - /* - * If we are in error handling context or in power management callbacks - * context, no need to scan the host - */ - if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { - bool flag; - - /* clear any previous UFS device information */ - memset(&hba->dev_info, 0, sizeof(hba->dev_info)); - if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, - QUERY_FLAG_IDN_PWR_ON_WPE, &flag)) - hba->dev_info.f_power_on_wp_en = flag; - - if (!hba->is_init_prefetch) - ufshcd_init_icc_levels(hba); - - /* Add required well known logical units to scsi mid layer */ - if (ufshcd_scsi_add_wlus(hba)) - goto out; - - /* Initialize devfreq after UFS device is detected */ - if (ufshcd_is_clkscaling_supported(hba)) { - memcpy(&hba->clk_scaling.saved_pwr_info.info, - &hba->pwr_info, - sizeof(struct ufs_pa_layer_attr)); - hba->clk_scaling.saved_pwr_info.is_valid = true; - if (!hba->devfreq) { - ret = ufshcd_devfreq_init(hba); - if (ret) - goto out; - } - hba->clk_scaling.is_allowed = true; - } - - ufs_bsg_probe(hba); - - scsi_scan_host(hba->host); - pm_runtime_put_sync(hba->dev); - } - - if (!hba->is_init_prefetch) - hba->is_init_prefetch = true; - out: - /* - * If we failed to initialize the device or the device is not - * present, turn off the power/clocks etc. - */ - if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { - pm_runtime_put_sync(hba->dev); - ufshcd_exit_clk_scaling(hba); - ufshcd_hba_exit(hba); - } trace_ufshcd_init(dev_name(hba->dev), ret, ktime_to_us(ktime_sub(ktime_get(), start)), @@ -7078,43 +7128,25 @@ out: static void ufshcd_async_scan(void *data, async_cookie_t cookie) { struct ufs_hba *hba = (struct ufs_hba *)data; + int ret; - ufshcd_probe_hba(hba); -} - -static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd) -{ - unsigned long flags; - struct Scsi_Host *host; - struct ufs_hba *hba; - int index; - bool found = false; - - if (!scmd || !scmd->device || !scmd->device->host) - return BLK_EH_DONE; - - host = scmd->device->host; - hba = shost_priv(host); - if (!hba) - return BLK_EH_DONE; - - spin_lock_irqsave(host->host_lock, flags); - - for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) { - if (hba->lrb[index].cmd == scmd) { - found = true; - break; - } - } - - spin_unlock_irqrestore(host->host_lock, flags); + /* Initialize hba, detect and initialize UFS device */ + ret = ufshcd_probe_hba(hba, true); + if (ret) + goto out; + /* Probe and add UFS logical units */ + ret = ufshcd_add_lus(hba); +out: /* - * Bypass SCSI error handling and reset the block layer timer if this - * SCSI command was not actually dispatched to UFS driver, otherwise - * let SCSI layer handle the error as usual. + * If we failed to initialize the device or the device is not + * present, turn off the power/clocks etc. */ - return found ? BLK_EH_DONE : BLK_EH_RESET_TIMER; + if (ret) { + pm_runtime_put_sync(hba->dev); + ufshcd_exit_clk_scaling(hba); + ufshcd_hba_exit(hba); + } } static const struct attribute_group *ufshcd_driver_groups[] = { @@ -7135,7 +7167,6 @@ static struct scsi_host_template ufshcd_driver_template = { .eh_abort_handler = ufshcd_abort, .eh_device_reset_handler = ufshcd_eh_device_reset_handler, .eh_host_reset_handler = ufshcd_eh_host_reset_handler, - .eh_timed_out = ufshcd_eh_timed_out, .this_id = -1, .sg_tablesize = SG_ALL, .cmd_per_lun = UFSHCD_CMD_PER_LUN, @@ -7574,6 +7605,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba) ufshcd_setup_clocks(hba, false); ufshcd_setup_hba_vreg(hba, false); hba->is_powered = false; + ufs_put_device_desc(hba); } } @@ -7701,8 +7733,7 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba, * turning off the link would also turn off the device. */ else if ((req_link_state == UIC_LINK_OFF_STATE) && - (!check_for_bkops || (check_for_bkops && - !hba->auto_bkops_enabled))) { + (!check_for_bkops || !hba->auto_bkops_enabled)) { /* * Let's make sure that link is in low power mode, we are doing * this currently by putting the link in Hibern8. Otherway to @@ -7908,6 +7939,11 @@ disable_clks: ret = ufshcd_vops_suspend(hba, pm_op); if (ret) goto set_link_active; + /* + * Disable the host irq as host controller as there won't be any + * host controller transaction expected till resume. + */ + ufshcd_disable_irq(hba); if (!ufshcd_is_link_active(hba)) ufshcd_setup_clocks(hba, false); @@ -7917,11 +7953,7 @@ disable_clks: hba->clk_gating.state = CLKS_OFF; trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); - /* - * Disable the host irq as host controller as there won't be any - * host controller transaction expected till resume. - */ - ufshcd_disable_irq(hba); + /* Put the host controller in low power mode if possible */ ufshcd_hba_vreg_set_lpm(hba); goto out; @@ -7974,9 +8006,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) goto out; /* enable the host irq as host controller would be active soon */ - ret = ufshcd_enable_irq(hba); - if (ret) - goto disable_irq_and_vops_clks; + ufshcd_enable_irq(hba); ret = ufshcd_vreg_set_hpm(hba); if (ret) @@ -8250,6 +8280,9 @@ void ufshcd_remove(struct ufs_hba *hba) { ufs_bsg_remove(hba); ufs_sysfs_remove_nodes(hba->dev); + blk_cleanup_queue(hba->tmf_queue); + blk_mq_free_tag_set(&hba->tmf_tag_set); + blk_cleanup_queue(hba->cmd_queue); scsi_remove_host(hba->host); /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); @@ -8328,6 +8361,18 @@ out_error: } EXPORT_SYMBOL(ufshcd_alloc_host); +/* This function exists because blk_mq_alloc_tag_set() requires this. */ +static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *qd) +{ + WARN_ON_ONCE(true); + return BLK_STS_NOTSUPP; +} + +static const struct blk_mq_ops ufshcd_tmf_ops = { + .queue_rq = ufshcd_queue_tmf, +}; + /** * ufshcd_init - Driver initialization routine * @hba: per-adapter instance @@ -8397,10 +8442,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) hba->max_pwr_info.is_valid = false; - /* Initailize wait queue for task management */ - init_waitqueue_head(&hba->tm_wq); - init_waitqueue_head(&hba->tm_tag_wq); - /* Initialize work queues */ INIT_WORK(&hba->eh_work, ufshcd_err_handler); INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); @@ -8413,9 +8454,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) init_rwsem(&hba->clk_scaling_lock); - /* Initialize device management tag acquire wait queue */ - init_waitqueue_head(&hba->dev_cmd.tag_wq); - ufshcd_init_clk_gating(hba); ufshcd_init_clk_scaling(hba); @@ -8449,6 +8487,27 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) goto exit_gating; } + hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set); + if (IS_ERR(hba->cmd_queue)) { + err = PTR_ERR(hba->cmd_queue); + goto out_remove_scsi_host; + } + + hba->tmf_tag_set = (struct blk_mq_tag_set) { + .nr_hw_queues = 1, + .queue_depth = hba->nutmrs, + .ops = &ufshcd_tmf_ops, + .flags = BLK_MQ_F_NO_SCHED, + }; + err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); + if (err < 0) + goto free_cmd_queue; + hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set); + if (IS_ERR(hba->tmf_queue)) { + err = PTR_ERR(hba->tmf_queue); + goto free_tmf_tag_set; + } + /* Reset the attached device */ ufshcd_vops_device_reset(hba); @@ -8458,7 +8517,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) dev_err(hba->dev, "Host controller enable failed\n"); ufshcd_print_host_regs(hba); ufshcd_print_host_state(hba); - goto out_remove_scsi_host; + goto free_tmf_queue; } /* @@ -8495,6 +8554,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) return 0; +free_tmf_queue: + blk_cleanup_queue(hba->tmf_queue); +free_tmf_tag_set: + blk_mq_free_tag_set(&hba->tmf_tag_set); +free_cmd_queue: + blk_cleanup_queue(hba->cmd_queue); out_remove_scsi_host: scsi_remove_host(hba->host); exit_gating: diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 2740f6941ec6..2ae6c7c8528c 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -212,13 +212,11 @@ struct ufs_query { * @type: device management command type - Query, NOP OUT * @lock: lock to allow one command at a time * @complete: internal commands completion - * @tag_wq: wait queue until free command slot is available */ struct ufs_dev_cmd { enum dev_cmd_type type; struct mutex lock; struct completion *complete; - wait_queue_head_t tag_wq; struct ufs_query query; }; @@ -322,7 +320,7 @@ struct ufs_hba_variant_ops { void (*setup_task_mgmt)(struct ufs_hba *, int, u8); void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme, enum ufs_notify_change_status); - int (*apply_dev_quirks)(struct ufs_hba *); + int (*apply_dev_quirks)(struct ufs_hba *hba); int (*suspend)(struct ufs_hba *, enum ufs_pm_op); int (*resume)(struct ufs_hba *, enum ufs_pm_op); void (*dbg_register_dump)(struct ufs_hba *hba); @@ -483,7 +481,7 @@ struct ufs_stats { * @host: Scsi_Host instance of the driver * @dev: device handle * @lrb: local reference block - * @lrb_in_use: lrb in use + * @cmd_queue: Used to allocate command tags from hba->host->tag_set. * @outstanding_tasks: Bits representing outstanding task requests * @outstanding_reqs: Bits representing outstanding transfer requests * @capabilities: UFS Controller Capabilities @@ -495,17 +493,14 @@ struct ufs_stats { * @irq: Irq number of the controller * @active_uic_cmd: handle of active UIC command * @uic_cmd_mutex: mutex for uic command - * @tm_wq: wait queue for task management - * @tm_tag_wq: wait queue for free task management slots - * @tm_slots_in_use: bit map of task management request slots in use + * @tmf_tag_set: TMF tag set. + * @tmf_queue: Used to allocate TMF tags. * @pwr_done: completion for power mode change - * @tm_condition: condition variable for task management * @ufshcd_state: UFSHCD states * @eh_flags: Error handling flags * @intr_mask: Interrupt Mask Bits * @ee_ctrl_mask: Exception event control mask * @is_powered: flag to check if HBA is powered - * @is_init_prefetch: flag to check if data was pre-fetched in initialization * @init_prefetch_data: data pre-fetched during initialization * @eh_work: Worker to handle UFS errors that require s/w attention * @eeh_work: Worker to handle exception events @@ -513,6 +508,7 @@ struct ufs_stats { * @uic_error: UFS interconnect layer error status * @saved_err: sticky error mask * @saved_uic_err: sticky UIC error mask + * @silence_err_logs: flag to silence error logs * @dev_cmd: ufs device management command information * @last_dme_cmd_tstamp: time stamp of the last completed DME command * @auto_bkops_enabled: to track whether bkops is enabled in device @@ -541,6 +537,7 @@ struct ufs_hba { struct Scsi_Host *host; struct device *dev; + struct request_queue *cmd_queue; /* * This field is to keep a reference to "scsi_device" corresponding to * "UFS device" W-LU. @@ -561,7 +558,6 @@ struct ufs_hba { u32 ahit; struct ufshcd_lrb *lrb; - unsigned long lrb_in_use; unsigned long outstanding_tasks; unsigned long outstanding_reqs; @@ -643,10 +639,8 @@ struct ufs_hba { /* Device deviations from standard UFS device spec. */ unsigned int dev_quirks; - wait_queue_head_t tm_wq; - wait_queue_head_t tm_tag_wq; - unsigned long tm_condition; - unsigned long tm_slots_in_use; + struct blk_mq_tag_set tmf_tag_set; + struct request_queue *tmf_queue; struct uic_command *active_uic_cmd; struct mutex uic_cmd_mutex; @@ -657,7 +651,6 @@ struct ufs_hba { u32 intr_mask; u16 ee_ctrl_mask; bool is_powered; - bool is_init_prefetch; struct ufs_init_prefetch init_prefetch_data; /* Work Queues */ @@ -670,6 +663,7 @@ struct ufs_hba { u32 saved_err; u32 saved_uic_err; struct ufs_stats ufs_stats; + bool silence_err_logs; /* Device management request data */ struct ufs_dev_cmd dev_cmd; @@ -803,12 +797,17 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg) int ufshcd_alloc_host(struct device *, struct ufs_hba **); void ufshcd_dealloc_host(struct ufs_hba *); +int ufshcd_hba_enable(struct ufs_hba *hba); int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int); +int ufshcd_make_hba_operational(struct ufs_hba *hba); void ufshcd_remove(struct ufs_hba *); +int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, u32 val, unsigned long interval_us, unsigned long timeout_ms, bool can_sleep); void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk); +void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist, + u32 reg); static inline void check_upiu_size(void) { @@ -927,6 +926,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, enum flag_idn idn, bool *flag_res); void ufshcd_auto_hibern8_enable(struct ufs_hba *hba); +void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit); #define SD_ASCII_STD true #define SD_RAW false @@ -1086,8 +1086,10 @@ static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba) static inline void ufshcd_vops_device_reset(struct ufs_hba *hba) { - if (hba->vops && hba->vops->device_reset) + if (hba->vops && hba->vops->device_reset) { hba->vops->device_reset(hba); + ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, 0); + } } extern struct ufs_pm_lvl_states ufs_pm_lvl_states[]; diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h index f539f873f94d..3dc4d8b76509 100644 --- a/drivers/scsi/ufs/unipro.h +++ b/drivers/scsi/ufs/unipro.h @@ -161,6 +161,17 @@ /* PHY Adapter Protocol Constants */ #define PA_MAXDATALANES 4 +#define DL_FC0ProtectionTimeOutVal_Default 8191 +#define DL_TC0ReplayTimeOutVal_Default 65535 +#define DL_AFC0ReqTimeOutVal_Default 32767 +#define DL_FC1ProtectionTimeOutVal_Default 8191 +#define DL_TC1ReplayTimeOutVal_Default 65535 +#define DL_AFC1ReqTimeOutVal_Default 32767 + +#define DME_LocalFC0ProtectionTimeOutVal 0xD041 +#define DME_LocalTC0ReplayTimeOutVal 0xD042 +#define DME_LocalAFC0ReqTimeOutVal 0xD043 + /* PA power modes */ enum { FAST_MODE = 1, diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 70008816c91f..c3f010df641e 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -365,7 +365,7 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter, int segs = scsi_dma_map(cmd); if (segs == -ENOMEM) { - scmd_printk(KERN_ERR, cmd, + scmd_printk(KERN_DEBUG, cmd, "vmw_pvscsi: Failed to map cmd sglist for DMA.\n"); return -ENOMEM; } else if (segs > 1) { @@ -392,7 +392,7 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter, ctx->dataPA = dma_map_single(&adapter->dev->dev, sg, bufflen, cmd->sc_data_direction); if (dma_mapping_error(&adapter->dev->dev, ctx->dataPA)) { - scmd_printk(KERN_ERR, cmd, + scmd_printk(KERN_DEBUG, cmd, "vmw_pvscsi: Failed to map direct data buffer for DMA.\n"); return -ENOMEM; } @@ -402,6 +402,17 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter, return 0; } +/* + * The device incorrectly doesn't clear the first byte of the sense + * buffer in some cases. We have to do it ourselves. + * Otherwise we run into trouble when SWIOTLB is forced. + */ +static void pvscsi_patch_sense(struct scsi_cmnd *cmd) +{ + if (cmd->sense_buffer) + cmd->sense_buffer[0] = 0; +} + static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter, struct pvscsi_ctx *ctx) { @@ -544,6 +555,8 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter, cmd = ctx->cmd; abort_cmp = ctx->abort_cmp; pvscsi_unmap_buffers(adapter, ctx); + if (sdstat != SAM_STAT_CHECK_CONDITION) + pvscsi_patch_sense(cmd); pvscsi_release_context(adapter, ctx); if (abort_cmp) { /* @@ -712,7 +725,7 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&adapter->dev->dev, ctx->sensePA)) { - scmd_printk(KERN_ERR, cmd, + scmd_printk(KERN_DEBUG, cmd, "vmw_pvscsi: Failed to map sense buffer for DMA.\n"); ctx->sensePA = 0; return -ENOMEM; @@ -873,6 +886,7 @@ static void pvscsi_reset_all(struct pvscsi_adapter *adapter) scmd_printk(KERN_ERR, cmd, "Forced reset on cmd %p\n", cmd); pvscsi_unmap_buffers(adapter, ctx); + pvscsi_patch_sense(cmd); pvscsi_release_context(adapter, ctx); cmd->result = (DID_RESET << 16); cmd->scsi_done(cmd); diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index 1354a157e9af..6a38ff936389 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -221,7 +221,6 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) ep = fc_seq_exch(seq); lport = ep->lp; if (cmd->was_ddp_setup) { - BUG_ON(!ep); BUG_ON(!lport); /* * Since DDP (Large Rx offload) was setup for this request, |