diff options
Diffstat (limited to 'drivers/block')
28 files changed, 972 insertions, 594 deletions
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index b594768b0241..51b0af1cebee 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -6,7 +6,7 @@ menu "Block devices" config BLK_DEV_FD tristate "Normal floppy disk support" - depends on (!ARCH_S390 && !M68K && !IA64 && !UML && !ARM) || Q40 || (SUN3X && BROKEN) || ARCH_RPC || ARCH_EBSA285 + depends on ARCH_MAY_HAVE_PC_FDC ---help--- If you want to use the floppy disk drive(s) of your PC under Linux, say Y. Information about this driver, especially important for IBM @@ -408,54 +408,12 @@ config BLK_DEV_INITRD "real" root file system, etc. See <file:Documentation/initrd.txt> for details. -config INITRAMFS_SOURCE - string "Initramfs source file(s)" - default "" - help - This can be either a single cpio archive with a .cpio suffix or a - space-separated list of directories and files for building the - initramfs image. A cpio archive should contain a filesystem archive - to be used as an initramfs image. Directories should contain a - filesystem layout to be included in the initramfs image. Files - should contain entries according to the format described by the - "usr/gen_init_cpio" program in the kernel tree. - - When multiple directories and files are specified then the - initramfs image will be the aggregate of all of them. - - See <file:Documentation/early-userspace/README for more details. - - If you are not sure, leave it blank. - -config INITRAMFS_ROOT_UID - int "User ID to map to 0 (user root)" - depends on INITRAMFS_SOURCE!="" - default "0" - help - This setting is only meaningful if the INITRAMFS_SOURCE is - contains a directory. Setting this user ID (UID) to something - other than "0" will cause all files owned by that UID to be - owned by user root in the initial ramdisk image. - - If you are not sure, leave it set to "0". - -config INITRAMFS_ROOT_GID - int "Group ID to map to 0 (group root)" - depends on INITRAMFS_SOURCE!="" - default "0" - help - This setting is only meaningful if the INITRAMFS_SOURCE is - contains a directory. Setting this group ID (GID) to something - other than "0" will cause all files owned by that GID to be - owned by group root in the initial ramdisk image. - - If you are not sure, leave it set to "0". #XXX - it makes sense to enable this only for 32-bit subarch's, not for x86_64 #for instance. config LBD bool "Support for Large Block Devices" - depends on X86 || MIPS32 || PPC32 || ARCH_S390_31 || SUPERH || UML + depends on X86 || (MIPS && 32BIT) || PPC32 || ARCH_S390_31 || SUPERH || UML help Say Y here if you want to attach large (bigger than 2TB) discs to your machine, or if you want to have a raid or loopback device diff --git a/drivers/block/acsi.c b/drivers/block/acsi.c index ce933de48084..0e1f34fef0c8 100644 --- a/drivers/block/acsi.c +++ b/drivers/block/acsi.c @@ -371,7 +371,7 @@ static int acsi_revalidate (struct gendisk *disk); /************************* End of Prototypes **************************/ -struct timer_list acsi_timer = TIMER_INITIALIZER(acsi_times_out, 0, 0); +DEFINE_TIMER(acsi_timer, acsi_times_out, 0, 0); #ifdef CONFIG_ATARI_SLM diff --git a/drivers/block/acsi_slm.c b/drivers/block/acsi_slm.c index e3be8c31a74c..a5c1c8e871ec 100644 --- a/drivers/block/acsi_slm.c +++ b/drivers/block/acsi_slm.c @@ -268,7 +268,7 @@ static int slm_get_pagesize( int device, int *w, int *h ); /************************* End of Prototypes **************************/ -static struct timer_list slm_timer = TIMER_INITIALIZER(slm_test_ready, 0, 0); +static DEFINE_TIMER(slm_timer, slm_test_ready, 0, 0); static struct file_operations slm_fops = { .owner = THIS_MODULE, diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index 721ba8086043..0e9e586e9ba3 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h @@ -1,5 +1,5 @@ /* Copyright (c) 2004 Coraid, Inc. See COPYING for GPL terms. */ -#define VERSION "10" +#define VERSION "12" #define AOE_MAJOR 152 #define DEVICE_NAME "aoe" @@ -7,12 +7,12 @@ * default is 16, which is 15 partitions plus the whole disk */ #ifndef AOE_PARTITIONS -#define AOE_PARTITIONS 16 +#define AOE_PARTITIONS (16) #endif -#define SYSMINOR(aoemajor, aoeminor) ((aoemajor) * 10 + (aoeminor)) -#define AOEMAJOR(sysminor) ((sysminor) / 10) -#define AOEMINOR(sysminor) ((sysminor) % 10) +#define SYSMINOR(aoemajor, aoeminor) ((aoemajor) * NPERSHELF + (aoeminor)) +#define AOEMAJOR(sysminor) ((sysminor) / NPERSHELF) +#define AOEMINOR(sysminor) ((sysminor) % NPERSHELF) #define WHITESPACE " \t\v\f\n" enum { @@ -83,7 +83,7 @@ enum { enum { MAXATADATA = 1024, - NPERSHELF = 10, + NPERSHELF = 16, /* number of slots per shelf address */ FREETAG = -1, MIN_BUFS = 8, }; diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c index 6e231c5a1199..ded33ba31acc 100644 --- a/drivers/block/aoe/aoedev.c +++ b/drivers/block/aoe/aoedev.c @@ -35,7 +35,7 @@ aoedev_newdev(ulong nframes) struct aoedev *d; struct frame *f, *e; - d = kcalloc(1, sizeof *d, GFP_ATOMIC); + d = kzalloc(sizeof *d, GFP_ATOMIC); if (d == NULL) return NULL; f = kcalloc(nframes, sizeof *f, GFP_ATOMIC); diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index db05a5a99f35..22bda05fc693 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -371,16 +371,10 @@ static int floppy_release( struct inode * inode, struct file * filp ); /************************* End of Prototypes **************************/ -static struct timer_list motor_off_timer = - TIMER_INITIALIZER(fd_motor_off_timer, 0, 0); -static struct timer_list readtrack_timer = - TIMER_INITIALIZER(fd_readtrack_check, 0, 0); - -static struct timer_list timeout_timer = - TIMER_INITIALIZER(fd_times_out, 0, 0); - -static struct timer_list fd_timer = - TIMER_INITIALIZER(check_change, 0, 0); +static DEFINE_TIMER(motor_off_timer, fd_motor_off_timer, 0, 0); +static DEFINE_TIMER(readtrack_timer, fd_readtrack_check, 0, 0); +static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0); +static DEFINE_TIMER(fd_timer, check_change, 0, 0); static inline void start_motor_off_timer(void) { diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 418b1469d75d..486b6e1c7dfb 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -47,14 +47,14 @@ #include <linux/completion.h> #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin)) -#define DRIVER_NAME "HP CISS Driver (v 2.6.6)" -#define DRIVER_VERSION CCISS_DRIVER_VERSION(2,6,6) +#define DRIVER_NAME "HP CISS Driver (v 2.6.8)" +#define DRIVER_VERSION CCISS_DRIVER_VERSION(2,6,8) /* Embedded module documentation macros - see modules.h */ MODULE_AUTHOR("Hewlett-Packard Company"); -MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.6"); +MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.8"); MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400" - " SA6i P600 P800 E400 E300"); + " SA6i P600 P800 P400 P400i E200 E200i"); MODULE_LICENSE("GPL"); #include "cciss_cmd.h" @@ -83,12 +83,22 @@ static const struct pci_device_id cciss_pci_device_id[] = { 0x0E11, 0x4091, 0, 0, 0}, { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225, 0, 0, 0}, - { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSB, + { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103c, 0x3223, 0, 0, 0}, { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, - 0x103c, 0x3231, 0, 0, 0}, + 0x103c, 0x3234, 0, 0, 0}, { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, - 0x103c, 0x3233, 0, 0, 0}, + 0x103c, 0x3235, 0, 0, 0}, + { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, + 0x103c, 0x3211, 0, 0, 0}, + { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, + 0x103c, 0x3212, 0, 0, 0}, + { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, + 0x103c, 0x3213, 0, 0, 0}, + { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, + 0x103c, 0x3214, 0, 0, 0}, + { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, + 0x103c, 0x3215, 0, 0, 0}, {0,} }; MODULE_DEVICE_TABLE(pci, cciss_pci_device_id); @@ -111,8 +121,13 @@ static struct board_type products[] = { { 0x40910E11, "Smart Array 6i", &SA5_access}, { 0x3225103C, "Smart Array P600", &SA5_access}, { 0x3223103C, "Smart Array P800", &SA5_access}, - { 0x3231103C, "Smart Array E400", &SA5_access}, - { 0x3233103C, "Smart Array E300", &SA5_access}, + { 0x3234103C, "Smart Array P400", &SA5_access}, + { 0x3235103C, "Smart Array P400i", &SA5_access}, + { 0x3211103C, "Smart Array E200i", &SA5_access}, + { 0x3212103C, "Smart Array E200", &SA5_access}, + { 0x3213103C, "Smart Array E200i", &SA5_access}, + { 0x3214103C, "Smart Array E200i", &SA5_access}, + { 0x3215103C, "Smart Array E200i", &SA5_access}, }; /* How long to wait (in millesconds) for board to go into simple mode */ @@ -140,15 +155,26 @@ static int cciss_ioctl(struct inode *inode, struct file *filep, static int revalidate_allvol(ctlr_info_t *host); static int cciss_revalidate(struct gendisk *disk); -static int deregister_disk(struct gendisk *disk); -static int register_new_disk(ctlr_info_t *h); +static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk); +static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, int clear_all); +static void cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf, + int withirq, unsigned int *total_size, unsigned int *block_size); +static void cciss_geometry_inquiry(int ctlr, int logvol, + int withirq, unsigned int total_size, + unsigned int block_size, InquiryData_struct *inq_buff, + drive_info_struct *drv); static void cciss_getgeometry(int cntl_num); static void start_io( ctlr_info_t *h); static int sendcmd( __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, unsigned int log_unit, __u8 page_code, unsigned char *scsi3addr, int cmd_type); +static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size, + unsigned int use_unit_num, unsigned int log_unit, __u8 page_code, + int cmd_type); + +static void fail_all_cmds(unsigned long ctlr); #ifdef CONFIG_PROC_FS static int cciss_proc_get_info(char *buffer, char **start, off_t offset, @@ -265,7 +291,7 @@ static int cciss_proc_get_info(char *buffer, char **start, off_t offset, for(i=0; i<=h->highest_lun; i++) { drv = &h->drv[i]; - if (drv->block_size == 0) + if (drv->heads == 0) continue; vol_sz = drv->nr_blocks; @@ -363,6 +389,8 @@ static CommandList_struct * cmd_alloc(ctlr_info_t *h, int get_from_pool) return NULL; memset(c, 0, sizeof(CommandList_struct)); + c->cmdindex = -1; + c->err_info = (ErrorInfo_struct *)pci_alloc_consistent( h->pdev, sizeof(ErrorInfo_struct), &err_dma_handle); @@ -393,6 +421,8 @@ static CommandList_struct * cmd_alloc(ctlr_info_t *h, int get_from_pool) err_dma_handle = h->errinfo_pool_dhandle + i*sizeof(ErrorInfo_struct); h->nr_allocs++; + + c->cmdindex = i; } c->busaddr = (__u32) cmd_dma_handle; @@ -453,6 +483,8 @@ static int cciss_open(struct inode *inode, struct file *filep) printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name); #endif /* CCISS_DEBUG */ + if (host->busy_initializing || drv->busy_configuring) + return -EBUSY; /* * Root is allowed to open raw volume zero even if it's not configured * so array config can still work. Root is also allowed to open any @@ -796,10 +828,10 @@ static int cciss_ioctl(struct inode *inode, struct file *filep, return(0); } case CCISS_DEREGDISK: - return deregister_disk(disk); + return rebuild_lun_table(host, disk); case CCISS_REGNEWD: - return register_new_disk(host); + return rebuild_lun_table(host, NULL); case CCISS_PASSTHRU: { @@ -1143,48 +1175,323 @@ static int revalidate_allvol(ctlr_info_t *host) return 0; } -static int deregister_disk(struct gendisk *disk) +/* This function will check the usage_count of the drive to be updated/added. + * If the usage_count is zero then the drive information will be updated and + * the disk will be re-registered with the kernel. If not then it will be + * left alone for the next reboot. The exception to this is disk 0 which + * will always be left registered with the kernel since it is also the + * controller node. Any changes to disk 0 will show up on the next + * reboot. +*/ +static void cciss_update_drive_info(int ctlr, int drv_index) + { + ctlr_info_t *h = hba[ctlr]; + struct gendisk *disk; + ReadCapdata_struct *size_buff = NULL; + InquiryData_struct *inq_buff = NULL; + unsigned int block_size; + unsigned int total_size; + unsigned long flags = 0; + int ret = 0; + + /* if the disk already exists then deregister it before proceeding*/ + if (h->drv[drv_index].raid_level != -1){ + spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); + h->drv[drv_index].busy_configuring = 1; + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); + ret = deregister_disk(h->gendisk[drv_index], + &h->drv[drv_index], 0); + h->drv[drv_index].busy_configuring = 0; + } + + /* If the disk is in use return */ + if (ret) + return; + + + /* Get information about the disk and modify the driver sturcture */ + size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL); + if (size_buff == NULL) + goto mem_msg; + inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL); + if (inq_buff == NULL) + goto mem_msg; + + cciss_read_capacity(ctlr, drv_index, size_buff, 1, + &total_size, &block_size); + cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size, + inq_buff, &h->drv[drv_index]); + + ++h->num_luns; + disk = h->gendisk[drv_index]; + set_capacity(disk, h->drv[drv_index].nr_blocks); + + + /* if it's the controller it's already added */ + if (drv_index){ + disk->queue = blk_init_queue(do_cciss_request, &h->lock); + + /* Set up queue information */ + disk->queue->backing_dev_info.ra_pages = READ_AHEAD; + blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask); + + /* This is a hardware imposed limit. */ + blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES); + + /* This is a limit in the driver and could be eliminated. */ + blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES); + + blk_queue_max_sectors(disk->queue, 512); + + disk->queue->queuedata = hba[ctlr]; + + blk_queue_hardsect_size(disk->queue, + hba[ctlr]->drv[drv_index].block_size); + + h->drv[drv_index].queue = disk->queue; + add_disk(disk); + } + +freeret: + kfree(size_buff); + kfree(inq_buff); + return; +mem_msg: + printk(KERN_ERR "cciss: out of memory\n"); + goto freeret; +} + +/* This function will find the first index of the controllers drive array + * that has a -1 for the raid_level and will return that index. This is + * where new drives will be added. If the index to be returned is greater + * than the highest_lun index for the controller then highest_lun is set + * to this new index. If there are no available indexes then -1 is returned. +*/ +static int cciss_find_free_drive_index(int ctlr) { + int i; + + for (i=0; i < CISS_MAX_LUN; i++){ + if (hba[ctlr]->drv[i].raid_level == -1){ + if (i > hba[ctlr]->highest_lun) + hba[ctlr]->highest_lun = i; + return i; + } + } + return -1; +} + +/* This function will add and remove logical drives from the Logical + * drive array of the controller and maintain persistancy of ordering + * so that mount points are preserved until the next reboot. This allows + * for the removal of logical drives in the middle of the drive array + * without a re-ordering of those drives. + * INPUT + * h = The controller to perform the operations on + * del_disk = The disk to remove if specified. If the value given + * is NULL then no disk is removed. +*/ +static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk) +{ + int ctlr = h->ctlr; + int num_luns; + ReportLunData_struct *ld_buff = NULL; + drive_info_struct *drv = NULL; + int return_code; + int listlength = 0; + int i; + int drv_found; + int drv_index = 0; + __u32 lunid = 0; unsigned long flags; + + /* Set busy_configuring flag for this operation */ + spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); + if (h->num_luns >= CISS_MAX_LUN){ + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); + return -EINVAL; + } + + if (h->busy_configuring){ + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); + return -EBUSY; + } + h->busy_configuring = 1; + + /* if del_disk is NULL then we are being called to add a new disk + * and update the logical drive table. If it is not NULL then + * we will check if the disk is in use or not. + */ + if (del_disk != NULL){ + drv = get_drv(del_disk); + drv->busy_configuring = 1; + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); + return_code = deregister_disk(del_disk, drv, 1); + drv->busy_configuring = 0; + h->busy_configuring = 0; + return return_code; + } else { + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + + ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL); + if (ld_buff == NULL) + goto mem_msg; + + return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff, + sizeof(ReportLunData_struct), 0, 0, 0, + TYPE_CMD); + + if (return_code == IO_OK){ + listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24; + listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16; + listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8; + listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]); + } else{ /* reading number of logical volumes failed */ + printk(KERN_WARNING "cciss: report logical volume" + " command failed\n"); + listlength = 0; + goto freeret; + } + + num_luns = listlength / 8; /* 8 bytes per entry */ + if (num_luns > CISS_MAX_LUN){ + num_luns = CISS_MAX_LUN; + printk(KERN_WARNING "cciss: more luns configured" + " on controller than can be handled by" + " this driver.\n"); + } + + /* Compare controller drive array to drivers drive array. + * Check for updates in the drive information and any new drives + * on the controller. + */ + for (i=0; i < num_luns; i++){ + int j; + + drv_found = 0; + + lunid = (0xff & + (unsigned int)(ld_buff->LUN[i][3])) << 24; + lunid |= (0xff & + (unsigned int)(ld_buff->LUN[i][2])) << 16; + lunid |= (0xff & + (unsigned int)(ld_buff->LUN[i][1])) << 8; + lunid |= 0xff & + (unsigned int)(ld_buff->LUN[i][0]); + + /* Find if the LUN is already in the drive array + * of the controller. If so then update its info + * if not is use. If it does not exist then find + * the first free index and add it. + */ + for (j=0; j <= h->highest_lun; j++){ + if (h->drv[j].LunID == lunid){ + drv_index = j; + drv_found = 1; + } + } + + /* check if the drive was found already in the array */ + if (!drv_found){ + drv_index = cciss_find_free_drive_index(ctlr); + if (drv_index == -1) + goto freeret; + + } + h->drv[drv_index].LunID = lunid; + cciss_update_drive_info(ctlr, drv_index); + } /* end for */ + } /* end else */ + +freeret: + kfree(ld_buff); + h->busy_configuring = 0; + /* We return -1 here to tell the ACU that we have registered/updated + * all of the drives that we can and to keep it from calling us + * additional times. + */ + return -1; +mem_msg: + printk(KERN_ERR "cciss: out of memory\n"); + goto freeret; +} + +/* This function will deregister the disk and it's queue from the + * kernel. It must be called with the controller lock held and the + * drv structures busy_configuring flag set. It's parameters are: + * + * disk = This is the disk to be deregistered + * drv = This is the drive_info_struct associated with the disk to be + * deregistered. It contains information about the disk used + * by the driver. + * clear_all = This flag determines whether or not the disk information + * is going to be completely cleared out and the highest_lun + * reset. Sometimes we want to clear out information about + * the disk in preperation for re-adding it. In this case + * the highest_lun should be left unchanged and the LunID + * should not be cleared. +*/ +static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, + int clear_all) +{ ctlr_info_t *h = get_host(disk); - drive_info_struct *drv = get_drv(disk); - int ctlr = h->ctlr; if (!capable(CAP_SYS_RAWIO)) return -EPERM; - spin_lock_irqsave(CCISS_LOCK(ctlr), flags); /* make sure logical volume is NOT is use */ - if( drv->usage_count > 1) { - spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); + if(clear_all || (h->gendisk[0] == disk)) { + if (drv->usage_count > 1) return -EBUSY; } - drv->usage_count++; - spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); + else + if( drv->usage_count > 0 ) + return -EBUSY; - /* invalidate the devices and deregister the disk */ - if (disk->flags & GENHD_FL_UP) + /* invalidate the devices and deregister the disk. If it is disk + * zero do not deregister it but just zero out it's values. This + * allows us to delete disk zero but keep the controller registered. + */ + if (h->gendisk[0] != disk){ + if (disk->flags & GENHD_FL_UP){ + blk_cleanup_queue(disk->queue); del_gendisk(disk); + drv->queue = NULL; + } + } + + --h->num_luns; + /* zero out the disk size info */ + drv->nr_blocks = 0; + drv->block_size = 0; + drv->heads = 0; + drv->sectors = 0; + drv->cylinders = 0; + drv->raid_level = -1; /* This can be used as a flag variable to + * indicate that this element of the drive + * array is free. + */ + + if (clear_all){ /* check to see if it was the last disk */ if (drv == h->drv + h->highest_lun) { /* if so, find the new hightest lun */ int i, newhighest =-1; for(i=0; i<h->highest_lun; i++) { /* if the disk has size > 0, it is available */ - if (h->drv[i].nr_blocks) + if (h->drv[i].heads) newhighest = i; } h->highest_lun = newhighest; - } - --h->num_luns; - /* zero out the disk size info */ - drv->nr_blocks = 0; - drv->block_size = 0; - drv->cylinders = 0; + drv->LunID = 0; + } return(0); } + static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller, @@ -1420,8 +1727,10 @@ case CMD_HARDWARE_ERR: } } /* unlock the buffers from DMA */ + buff_dma_handle.val32.lower = c->SG[0].Addr.lower; + buff_dma_handle.val32.upper = c->SG[0].Addr.upper; pci_unmap_single( h->pdev, (dma_addr_t) buff_dma_handle.val, - size, PCI_DMA_BIDIRECTIONAL); + c->SG[0].Len, PCI_DMA_BIDIRECTIONAL); cmd_free(h, c, 0); return(return_status); @@ -1495,164 +1804,6 @@ cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf, return; } -static int register_new_disk(ctlr_info_t *h) -{ - struct gendisk *disk; - int ctlr = h->ctlr; - int i; - int num_luns; - int logvol; - int new_lun_found = 0; - int new_lun_index = 0; - int free_index_found = 0; - int free_index = 0; - ReportLunData_struct *ld_buff = NULL; - ReadCapdata_struct *size_buff = NULL; - InquiryData_struct *inq_buff = NULL; - int return_code; - int listlength = 0; - __u32 lunid = 0; - unsigned int block_size; - unsigned int total_size; - - if (!capable(CAP_SYS_RAWIO)) - return -EPERM; - /* if we have no space in our disk array left to add anything */ - if( h->num_luns >= CISS_MAX_LUN) - return -EINVAL; - - ld_buff = kmalloc(sizeof(ReportLunData_struct), GFP_KERNEL); - if (ld_buff == NULL) - goto mem_msg; - memset(ld_buff, 0, sizeof(ReportLunData_struct)); - size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL); - if (size_buff == NULL) - goto mem_msg; - inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL); - if (inq_buff == NULL) - goto mem_msg; - - return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff, - sizeof(ReportLunData_struct), 0, 0, 0, TYPE_CMD); - - if( return_code == IO_OK) - { - - // printk("LUN Data\n--------------------------\n"); - - listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24; - listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16; - listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8; - listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]); - } else /* reading number of logical volumes failed */ - { - printk(KERN_WARNING "cciss: report logical volume" - " command failed\n"); - listlength = 0; - goto free_err; - } - num_luns = listlength / 8; // 8 bytes pre entry - if (num_luns > CISS_MAX_LUN) - { - num_luns = CISS_MAX_LUN; - } -#ifdef CCISS_DEBUG - printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0], - ld_buff->LUNListLength[1], ld_buff->LUNListLength[2], - ld_buff->LUNListLength[3], num_luns); -#endif - for(i=0; i< num_luns; i++) - { - int j; - int lunID_found = 0; - - lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3])) << 24; - lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2])) << 16; - lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1])) << 8; - lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]); - - /* check to see if this is a new lun */ - for(j=0; j <= h->highest_lun; j++) - { -#ifdef CCISS_DEBUG - printk("Checking %d %x against %x\n", j,h->drv[j].LunID, - lunid); -#endif /* CCISS_DEBUG */ - if (h->drv[j].LunID == lunid) - { - lunID_found = 1; - break; - } - - } - if( lunID_found == 1) - continue; - else - { /* It is the new lun we have been looking for */ -#ifdef CCISS_DEBUG - printk("new lun found at %d\n", i); -#endif /* CCISS_DEBUG */ - new_lun_index = i; - new_lun_found = 1; - break; - } - } - if (!new_lun_found) - { - printk(KERN_WARNING "cciss: New Logical Volume not found\n"); - goto free_err; - } - /* Now find the free index */ - for(i=0; i <CISS_MAX_LUN; i++) - { -#ifdef CCISS_DEBUG - printk("Checking Index %d\n", i); -#endif /* CCISS_DEBUG */ - if(h->drv[i].LunID == 0) - { -#ifdef CCISS_DEBUG - printk("free index found at %d\n", i); -#endif /* CCISS_DEBUG */ - free_index_found = 1; - free_index = i; - break; - } - } - if (!free_index_found) - { - printk(KERN_WARNING "cciss: unable to find free slot for disk\n"); - goto free_err; - } - - logvol = free_index; - h->drv[logvol].LunID = lunid; - /* there could be gaps in lun numbers, track hightest */ - if(h->highest_lun < lunid) - h->highest_lun = logvol; - cciss_read_capacity(ctlr, logvol, size_buff, 1, - &total_size, &block_size); - cciss_geometry_inquiry(ctlr, logvol, 1, total_size, block_size, - inq_buff, &h->drv[logvol]); - h->drv[logvol].usage_count = 0; - ++h->num_luns; - /* setup partitions per disk */ - disk = h->gendisk[logvol]; - set_capacity(disk, h->drv[logvol].nr_blocks); - /* if it's the controller it's already added */ - if(logvol) - add_disk(disk); -freeret: - kfree(ld_buff); - kfree(size_buff); - kfree(inq_buff); - return (logvol); -mem_msg: - printk(KERN_ERR "cciss: out of memory\n"); -free_err: - logvol = -1; - goto freeret; -} - static int cciss_revalidate(struct gendisk *disk) { ctlr_info_t *h = get_host(disk); @@ -1713,10 +1864,9 @@ static unsigned long pollcomplete(int ctlr) for (i = 20 * HZ; i > 0; i--) { done = hba[ctlr]->access.command_completed(hba[ctlr]); - if (done == FIFO_EMPTY) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(1); - } else + if (done == FIFO_EMPTY) + schedule_timeout_uninterruptible(1); + else return (done); } /* Invalid address to tell caller we ran out of time */ @@ -1860,8 +2010,10 @@ resend_cmd1: cleanup1: /* unlock the data buffer from DMA */ + buff_dma_handle.val32.lower = c->SG[0].Addr.lower; + buff_dma_handle.val32.upper = c->SG[0].Addr.upper; pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val, - size, PCI_DMA_BIDIRECTIONAL); + c->SG[0].Len, PCI_DMA_BIDIRECTIONAL); cmd_free(info_p, c, 1); return (status); } @@ -2112,7 +2264,11 @@ queue: /* fill in the request */ drv = creq->rq_disk->private_data; c->Header.ReplyQueue = 0; // unused in simple mode - c->Header.Tag.lower = c->busaddr; // use the physical address the cmd block for tag + /* got command from pool, so use the command block index instead */ + /* for direct lookups. */ + /* The first 2 bits are reserved for controller error reporting. */ + c->Header.Tag.lower = (c->cmdindex << 3); + c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */ c->Header.LUN.LogDev.VolId= drv->LunID; c->Header.LUN.LogDev.Mode = 1; c->Request.CDBLen = 10; // 12 byte commands not in FW yet; @@ -2187,7 +2343,7 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs) ctlr_info_t *h = dev_id; CommandList_struct *c; unsigned long flags; - __u32 a, a1; + __u32 a, a1, a2; int j; int start_queue = h->next_to_run; @@ -2205,10 +2361,21 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs) while((a = h->access.command_completed(h)) != FIFO_EMPTY) { a1 = a; + if ((a & 0x04)) { + a2 = (a >> 3); + if (a2 >= NR_CMDS) { + printk(KERN_WARNING "cciss: controller cciss%d failed, stopping.\n", h->ctlr); + fail_all_cmds(h->ctlr); + return IRQ_HANDLED; + } + + c = h->cmd_pool + a2; + a = c->busaddr; + + } else { a &= ~3; - if ((c = h->cmpQ) == NULL) - { - printk(KERN_WARNING "cciss: Completion of %08lx ignored\n", (unsigned long)a1); + if ((c = h->cmpQ) == NULL) { + printk(KERN_WARNING "cciss: Completion of %08x ignored\n", a1); continue; } while(c->busaddr != a) { @@ -2216,6 +2383,7 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs) if (c == h->cmpQ) break; } + } /* * If we've found the command, take it off the * completion Q and free it @@ -2635,12 +2803,16 @@ static void cciss_getgeometry(int cntl_num) #endif /* CCISS_DEBUG */ hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns-1; - for(i=0; i< hba[cntl_num]->num_luns; i++) +// for(i=0; i< hba[cntl_num]->num_luns; i++) + for(i=0; i < CISS_MAX_LUN; i++) { - - lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3])) << 24; - lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2])) << 16; - lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1])) << 8; + if (i < hba[cntl_num]->num_luns){ + lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3])) + << 24; + lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2])) + << 16; + lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1])) + << 8; lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]); hba[cntl_num]->drv[i].LunID = lunid; @@ -2648,13 +2820,18 @@ static void cciss_getgeometry(int cntl_num) #ifdef CCISS_DEBUG printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i, - ld_buff->LUN[i][0], ld_buff->LUN[i][1],ld_buff->LUN[i][2], - ld_buff->LUN[i][3], hba[cntl_num]->drv[i].LunID); + ld_buff->LUN[i][0], ld_buff->LUN[i][1], + ld_buff->LUN[i][2], ld_buff->LUN[i][3], + hba[cntl_num]->drv[i].LunID); #endif /* CCISS_DEBUG */ cciss_read_capacity(cntl_num, i, size_buff, 0, &total_size, &block_size); - cciss_geometry_inquiry(cntl_num, i, 0, total_size, block_size, - inq_buff, &hba[cntl_num]->drv[i]); + cciss_geometry_inquiry(cntl_num, i, 0, total_size, + block_size, inq_buff, &hba[cntl_num]->drv[i]); + } else { + /* initialize raid_level to indicate a free space */ + hba[cntl_num]->drv[i].raid_level = -1; + } } kfree(ld_buff); kfree(size_buff); @@ -2728,6 +2905,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, i = alloc_cciss_hba(); if(i < 0) return (-1); + + hba[i]->busy_initializing = 1; + if (cciss_pci_init(hba[i], pdev) != 0) goto clean1; @@ -2808,6 +2988,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON); cciss_procinit(i); + hba[i]->busy_initializing = 0; for(j=0; j < NWD; j++) { /* mfm */ drive_info_struct *drv = &(hba[i]->drv[j]); @@ -2870,6 +3051,7 @@ clean2: clean1: release_io_mem(hba[i]); free_hba(i); + hba[i]->busy_initializing = 0; return(-1); } @@ -2914,9 +3096,10 @@ static void __devexit cciss_remove_one (struct pci_dev *pdev) /* remove it from the disk list */ for (j = 0; j < NWD; j++) { struct gendisk *disk = hba[i]->gendisk[j]; - if (disk->flags & GENHD_FL_UP) - blk_cleanup_queue(disk->queue); + if (disk->flags & GENHD_FL_UP) { del_gendisk(disk); + blk_cleanup_queue(disk->queue); + } } pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct), @@ -2965,5 +3148,43 @@ static void __exit cciss_cleanup(void) remove_proc_entry("cciss", proc_root_driver); } +static void fail_all_cmds(unsigned long ctlr) +{ + /* If we get here, the board is apparently dead. */ + ctlr_info_t *h = hba[ctlr]; + CommandList_struct *c; + unsigned long flags; + + printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr); + h->alive = 0; /* the controller apparently died... */ + + spin_lock_irqsave(CCISS_LOCK(ctlr), flags); + + pci_disable_device(h->pdev); /* Make sure it is really dead. */ + + /* move everything off the request queue onto the completed queue */ + while( (c = h->reqQ) != NULL ) { + removeQ(&(h->reqQ), c); + h->Qdepth--; + addQ (&(h->cmpQ), c); + } + + /* Now, fail everything on the completed queue with a HW error */ + while( (c = h->cmpQ) != NULL ) { + removeQ(&h->cmpQ, c); + c->err_info->CommandStatus = CMD_HARDWARE_ERR; + if (c->cmd_type == CMD_RWREQ) { + complete_command(h, c, 0); + } else if (c->cmd_type == CMD_IOCTL_PEND) + complete(c->waiting); +#ifdef CONFIG_CISS_SCSI_TAPE + else if (c->cmd_type == CMD_SCSI) + complete_scsi_command(c, 0, 0); +#endif + } + spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); + return; +} + module_init(cciss_init); module_exit(cciss_cleanup); diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h index 566587d0a500..ef277baee9fd 100644 --- a/drivers/block/cciss.h +++ b/drivers/block/cciss.h @@ -35,7 +35,13 @@ typedef struct _drive_info_struct int heads; int sectors; int cylinders; - int raid_level; + int raid_level; /* set to -1 to indicate that + * the drive is not in use/configured + */ + int busy_configuring; /*This is set when the drive is being removed + *to prevent it from being opened or it's queue + *from being started. + */ } drive_info_struct; struct ctlr_info @@ -83,6 +89,7 @@ struct ctlr_info int nr_allocs; int nr_frees; int busy_configuring; + int busy_initializing; /* This element holds the zero based queue number of the last * queue to be started. It is used for fairness. @@ -94,6 +101,7 @@ struct ctlr_info #ifdef CONFIG_CISS_SCSI_TAPE void *scsi_ctlr; /* ptr to structure containing scsi related stuff */ #endif + unsigned char alive; }; /* Defining the diffent access_menthods */ diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h index a88a88817623..53fea549ba8b 100644 --- a/drivers/block/cciss_cmd.h +++ b/drivers/block/cciss_cmd.h @@ -226,6 +226,10 @@ typedef struct _ErrorInfo_struct { #define CMD_MSG_DONE 0x04 #define CMD_MSG_TIMEOUT 0x05 +/* This structure needs to be divisible by 8 for new + * indexing method. + */ +#define PADSIZE (sizeof(long) - 4) typedef struct _CommandList_struct { CommandListHeader_struct Header; RequestBlock_struct Request; @@ -236,14 +240,14 @@ typedef struct _CommandList_struct { ErrorInfo_struct * err_info; /* pointer to the allocated mem */ int ctlr; int cmd_type; + long cmdindex; struct _CommandList_struct *prev; struct _CommandList_struct *next; struct request * rq; struct completion *waiting; int retry_count; -#ifdef CONFIG_CISS_SCSI_TAPE void * scsi_cmd; -#endif + char pad[PADSIZE]; } CommandList_struct; //Configuration Table Structure diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index f16e3caed58a..e183a3ef7839 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c @@ -93,6 +93,7 @@ struct cciss_scsi_cmd_stack_elem_t { CommandList_struct cmd; ErrorInfo_struct Err; __u32 busaddr; + __u32 pad; }; #pragma pack() @@ -877,7 +878,7 @@ cciss_scsi_interpret_error(CommandList_struct *cp) static int cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr, - InquiryData_struct *buf) + unsigned char *buf, unsigned char bufsize) { int rc; CommandList_struct *cp; @@ -900,11 +901,10 @@ cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr, cdb[1] = 0; cdb[2] = 0; cdb[3] = 0; - cdb[4] = sizeof(*buf) & 0xff; + cdb[4] = bufsize; cdb[5] = 0; rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr, cdb, - 6, (unsigned char *) buf, - sizeof(*buf), XFER_READ); + 6, buf, bufsize, XFER_READ); if (rc != 0) return rc; /* something went wrong */ @@ -1000,9 +1000,10 @@ cciss_update_non_disk_devices(int cntl_num, int hostno) that though. */ - +#define OBDR_TAPE_INQ_SIZE 49 +#define OBDR_TAPE_SIG "$DR-10" ReportLunData_struct *ld_buff; - InquiryData_struct *inq_buff; + unsigned char *inq_buff; unsigned char scsi3addr[8]; ctlr_info_t *c; __u32 num_luns=0; @@ -1020,7 +1021,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno) return; } memset(ld_buff, 0, reportlunsize); - inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL); + inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); if (inq_buff == NULL) { printk(KERN_ERR "cciss: out of memory\n"); kfree(ld_buff); @@ -1051,19 +1052,36 @@ cciss_update_non_disk_devices(int cntl_num, int hostno) /* for each physical lun, do an inquiry */ if (ld_buff->LUN[i][3] & 0xC0) continue; - memset(inq_buff, 0, sizeof(InquiryData_struct)); + memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE); memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8); - if (cciss_scsi_do_inquiry(hba[cntl_num], - scsi3addr, inq_buff) != 0) - { + if (cciss_scsi_do_inquiry(hba[cntl_num], scsi3addr, inq_buff, + (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { /* Inquiry failed (msg printed already) */ devtype = 0; /* so we will skip this device. */ } else /* what kind of device is this? */ - devtype = (inq_buff->data_byte[0] & 0x1f); + devtype = (inq_buff[0] & 0x1f); switch (devtype) { + case 0x05: /* CD-ROM */ { + + /* We don't *really* support actual CD-ROM devices, + * just this "One Button Disaster Recovery" tape drive + * which temporarily pretends to be a CD-ROM drive. + * So we check that the device is really an OBDR tape + * device by checking for "$DR-10" in bytes 43-48 of + * the inquiry data. + */ + char obdr_sig[7]; + + strncpy(obdr_sig, &inq_buff[43], 6); + obdr_sig[6] = '\0'; + if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0) + /* Not OBDR device, ignore it. */ + break; + } + /* fall through . . . */ case 0x01: /* sequential access, (tape) */ case 0x08: /* medium changer */ if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) { @@ -1126,6 +1144,7 @@ cciss_scsi_proc_info(struct Scsi_Host *sh, int buflen, datalen; ctlr_info_t *ci; + int i; int cntl_num; @@ -1136,8 +1155,28 @@ cciss_scsi_proc_info(struct Scsi_Host *sh, cntl_num = ci->ctlr; /* Get our index into the hba[] array */ if (func == 0) { /* User is reading from /proc/scsi/ciss*?/?* */ - buflen = sprintf(buffer, "hostnum=%d\n", sh->host_no); - + buflen = sprintf(buffer, "cciss%d: SCSI host: %d\n", + cntl_num, sh->host_no); + + /* this information is needed by apps to know which cciss + device corresponds to which scsi host number without + having to open a scsi target device node. The device + information is not a duplicate of /proc/scsi/scsi because + the two may be out of sync due to scsi hotplug, rather + this info is for an app to be able to use to know how to + get them back in sync. */ + + for (i=0;i<ccissscsi[cntl_num].ndevices;i++) { + struct cciss_scsi_dev_t *sd = &ccissscsi[cntl_num].dev[i]; + buflen += sprintf(&buffer[buflen], "c%db%dt%dl%d %02d " + "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + sh->host_no, sd->bus, sd->target, sd->lun, + sd->devtype, + sd->scsi3addr[0], sd->scsi3addr[1], + sd->scsi3addr[2], sd->scsi3addr[3], + sd->scsi3addr[4], sd->scsi3addr[5], + sd->scsi3addr[6], sd->scsi3addr[7]); + } datalen = buflen - offset; if (datalen < 0) { /* they're reading past EOF. */ datalen = 0; @@ -1399,7 +1438,7 @@ cciss_proc_tape_report(int ctlr, unsigned char *buffer, off_t *pos, off_t *len) CPQ_TAPE_LOCK(ctlr, flags); size = sprintf(buffer + *len, - " Sequential access devices: %d\n\n", + "Sequential access devices: %d\n\n", ccissscsi[ctlr].ndevices); CPQ_TAPE_UNLOCK(ctlr, flags); *pos += size; *len += size; diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c index 5be6f998d8c5..3d4261c39f16 100644 --- a/drivers/block/cryptoloop.c +++ b/drivers/block/cryptoloop.c @@ -57,9 +57,11 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info) mode = strsep(&cmsp, "-"); if (mode == NULL || strcmp(mode, "cbc") == 0) - tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC); + tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC | + CRYPTO_TFM_REQ_MAY_SLEEP); else if (strcmp(mode, "ecb") == 0) - tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB); + tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB | + CRYPTO_TFM_REQ_MAY_SLEEP); if (tfm == NULL) return -EINVAL; diff --git a/drivers/block/deadline-iosched.c b/drivers/block/deadline-iosched.c index ff5201e02153..52a3ae5289a0 100644 --- a/drivers/block/deadline-iosched.c +++ b/drivers/block/deadline-iosched.c @@ -507,18 +507,15 @@ static int deadline_dispatch_requests(struct deadline_data *dd) const int reads = !list_empty(&dd->fifo_list[READ]); const int writes = !list_empty(&dd->fifo_list[WRITE]); struct deadline_rq *drq; - int data_dir, other_dir; + int data_dir; /* * batches are currently reads XOR writes */ - drq = NULL; - - if (dd->next_drq[READ]) - drq = dd->next_drq[READ]; - if (dd->next_drq[WRITE]) drq = dd->next_drq[WRITE]; + else + drq = dd->next_drq[READ]; if (drq) { /* we have a "next request" */ @@ -544,7 +541,6 @@ static int deadline_dispatch_requests(struct deadline_data *dd) goto dispatch_writes; data_dir = READ; - other_dir = WRITE; goto dispatch_find_request; } @@ -560,7 +556,6 @@ dispatch_writes: dd->starved = 0; data_dir = WRITE; - other_dir = READ; goto dispatch_find_request; } diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index f0c1084b840f..00895477155e 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -493,6 +493,8 @@ static struct floppy_struct user_params[N_DRIVE]; static sector_t floppy_sizes[256]; +static char floppy_device_name[] = "floppy"; + /* * The driver is trying to determine the correct media format * while probing is set. rw_interrupt() clears it after a @@ -626,7 +628,7 @@ static inline void debugt(const char *message) { } #endif /* DEBUGT */ typedef void (*timeout_fn) (unsigned long); -static struct timer_list fd_timeout = TIMER_INITIALIZER(floppy_shutdown, 0, 0); +static DEFINE_TIMER(fd_timeout, floppy_shutdown, 0, 0); static const char *timeout_message; @@ -1010,7 +1012,7 @@ static void schedule_bh(void (*handler) (void)) schedule_work(&floppy_work); } -static struct timer_list fd_timer = TIMER_INITIALIZER(NULL, 0, 0); +static DEFINE_TIMER(fd_timer, NULL, 0, 0); static void cancel_activity(void) { @@ -4191,18 +4193,24 @@ static int __init floppy_setup(char *str) static int have_no_fdc = -ENODEV; +static ssize_t floppy_cmos_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *p; + int drive; + + p = container_of(dev, struct platform_device,dev); + drive = p->id; + return sprintf(buf, "%X\n", UDP->cmos); +} +DEVICE_ATTR(cmos,S_IRUGO,floppy_cmos_show,NULL); + static void floppy_device_release(struct device *dev) { complete(&device_release); } -static struct platform_device floppy_device = { - .name = "floppy", - .id = 0, - .dev = { - .release = floppy_device_release, - } -}; +static struct platform_device floppy_device[N_DRIVE]; static struct kobject *floppy_find(dev_t dev, int *part, void *data) { @@ -4370,20 +4378,26 @@ static int __init floppy_init(void) goto out_flush_work; } - err = platform_device_register(&floppy_device); - if (err) - goto out_flush_work; - for (drive = 0; drive < N_DRIVE; drive++) { if (!(allowed_drive_mask & (1 << drive))) continue; if (fdc_state[FDC(drive)].version == FDC_NONE) continue; + + floppy_device[drive].name = floppy_device_name; + floppy_device[drive].id = drive; + floppy_device[drive].dev.release = floppy_device_release; + + err = platform_device_register(&floppy_device[drive]); + if (err) + goto out_flush_work; + + device_create_file(&floppy_device[drive].dev,&dev_attr_cmos); /* to be cleaned up... */ disks[drive]->private_data = (void *)(long)drive; disks[drive]->queue = floppy_queue; disks[drive]->flags |= GENHD_FL_REMOVABLE; - disks[drive]->driverfs_dev = &floppy_device.dev; + disks[drive]->driverfs_dev = &floppy_device[drive].dev; add_disk(disks[drive]); } @@ -4603,10 +4617,11 @@ void cleanup_module(void) fdc_state[FDC(drive)].version != FDC_NONE) { del_gendisk(disks[drive]); unregister_devfs_entries(drive); + device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos); + platform_device_unregister(&floppy_device[drive]); } put_disk(disks[drive]); } - platform_device_unregister(&floppy_device); devfs_remove("floppy"); del_timer_sync(&fd_timeout); diff --git a/drivers/block/genhd.c b/drivers/block/genhd.c index 47fd3659a061..d42840cc0d1d 100644 --- a/drivers/block/genhd.c +++ b/drivers/block/genhd.c @@ -45,7 +45,7 @@ int get_blkdev_list(char *p, int used) struct blk_major_name *n; int i, len; - len = sprintf(p, "\nBlock devices:\n"); + len = snprintf(p, (PAGE_SIZE-used), "\nBlock devices:\n"); down(&block_subsys_sem); for (i = 0; i < ARRAY_SIZE(major_names); i++) { diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c index 3c818544475e..baedac522945 100644 --- a/drivers/block/ll_rw_blk.c +++ b/drivers/block/ll_rw_blk.c @@ -235,8 +235,8 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) * set defaults */ q->nr_requests = BLKDEV_MAX_RQ; - q->max_phys_segments = MAX_PHYS_SEGMENTS; - q->max_hw_segments = MAX_HW_SEGMENTS; + blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); + blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); q->make_request_fn = mfn; q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; q->backing_dev_info.state = 0; @@ -284,6 +284,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq) rq->special = NULL; rq->data_len = 0; rq->data = NULL; + rq->nr_phys_segments = 0; rq->sense = NULL; rq->end_io = NULL; rq->end_io_data = NULL; @@ -2115,7 +2116,7 @@ EXPORT_SYMBOL(blk_insert_request); /** * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage * @q: request queue where request should be inserted - * @rw: READ or WRITE data + * @rq: request structure to fill * @ubuf: the user buffer * @len: length of user data * @@ -2132,21 +2133,19 @@ EXPORT_SYMBOL(blk_insert_request); * original bio must be passed back in to blk_rq_unmap_user() for proper * unmapping. */ -struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf, - unsigned int len) +int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, + unsigned int len) { unsigned long uaddr; - struct request *rq; struct bio *bio; + int reading; if (len > (q->max_sectors << 9)) - return ERR_PTR(-EINVAL); - if ((!len && ubuf) || (len && !ubuf)) - return ERR_PTR(-EINVAL); + return -EINVAL; + if (!len || !ubuf) + return -EINVAL; - rq = blk_get_request(q, rw, __GFP_WAIT); - if (!rq) - return ERR_PTR(-ENOMEM); + reading = rq_data_dir(rq) == READ; /* * if alignment requirement is satisfied, map in user pages for @@ -2154,9 +2153,9 @@ struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf, */ uaddr = (unsigned long) ubuf; if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) - bio = bio_map_user(q, NULL, uaddr, len, rw == READ); + bio = bio_map_user(q, NULL, uaddr, len, reading); else - bio = bio_copy_user(q, uaddr, len, rw == READ); + bio = bio_copy_user(q, uaddr, len, reading); if (!IS_ERR(bio)) { rq->bio = rq->biotail = bio; @@ -2164,28 +2163,70 @@ struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf, rq->buffer = rq->data = NULL; rq->data_len = len; - return rq; + return 0; } /* * bio is the err-ptr */ - blk_put_request(rq); - return (struct request *) bio; + return PTR_ERR(bio); } EXPORT_SYMBOL(blk_rq_map_user); /** + * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage + * @q: request queue where request should be inserted + * @rq: request to map data to + * @iov: pointer to the iovec + * @iov_count: number of elements in the iovec + * + * Description: + * Data will be mapped directly for zero copy io, if possible. Otherwise + * a kernel bounce buffer is used. + * + * A matching blk_rq_unmap_user() must be issued at the end of io, while + * still in process context. + * + * Note: The mapped bio may need to be bounced through blk_queue_bounce() + * before being submitted to the device, as pages mapped may be out of + * reach. It's the callers responsibility to make sure this happens. The + * original bio must be passed back in to blk_rq_unmap_user() for proper + * unmapping. + */ +int blk_rq_map_user_iov(request_queue_t *q, struct request *rq, + struct sg_iovec *iov, int iov_count) +{ + struct bio *bio; + + if (!iov || iov_count <= 0) + return -EINVAL; + + /* we don't allow misaligned data like bio_map_user() does. If the + * user is using sg, they're expected to know the alignment constraints + * and respect them accordingly */ + bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ); + if (IS_ERR(bio)) + return PTR_ERR(bio); + + rq->bio = rq->biotail = bio; + blk_rq_bio_prep(q, rq, bio); + rq->buffer = rq->data = NULL; + rq->data_len = bio->bi_size; + return 0; +} + +EXPORT_SYMBOL(blk_rq_map_user_iov); + +/** * blk_rq_unmap_user - unmap a request with user data - * @rq: request to be unmapped - * @bio: bio for the request + * @bio: bio to be unmapped * @ulen: length of user buffer * * Description: - * Unmap a request previously mapped by blk_rq_map_user(). + * Unmap a bio previously mapped by blk_rq_map_user(). */ -int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen) +int blk_rq_unmap_user(struct bio *bio, unsigned int ulen) { int ret = 0; @@ -2196,31 +2237,89 @@ int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen) ret = bio_uncopy_user(bio); } - blk_put_request(rq); - return ret; + return 0; } EXPORT_SYMBOL(blk_rq_unmap_user); /** + * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage + * @q: request queue where request should be inserted + * @rq: request to fill + * @kbuf: the kernel buffer + * @len: length of user data + * @gfp_mask: memory allocation flags + */ +int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, + unsigned int len, unsigned int gfp_mask) +{ + struct bio *bio; + + if (len > (q->max_sectors << 9)) + return -EINVAL; + if (!len || !kbuf) + return -EINVAL; + + bio = bio_map_kern(q, kbuf, len, gfp_mask); + if (IS_ERR(bio)) + return PTR_ERR(bio); + + if (rq_data_dir(rq) == WRITE) + bio->bi_rw |= (1 << BIO_RW); + + rq->bio = rq->biotail = bio; + blk_rq_bio_prep(q, rq, bio); + + rq->buffer = rq->data = NULL; + rq->data_len = len; + return 0; +} + +EXPORT_SYMBOL(blk_rq_map_kern); + +/** + * blk_execute_rq_nowait - insert a request into queue for execution + * @q: queue to insert the request in + * @bd_disk: matching gendisk + * @rq: request to insert + * @at_head: insert request at head or tail of queue + * @done: I/O completion handler + * + * Description: + * Insert a fully prepared request at the back of the io scheduler queue + * for execution. Don't wait for completion. + */ +void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk, + struct request *rq, int at_head, + void (*done)(struct request *)) +{ + int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; + + rq->rq_disk = bd_disk; + rq->flags |= REQ_NOMERGE; + rq->end_io = done; + elv_add_request(q, rq, where, 1); + generic_unplug_device(q); +} + +/** * blk_execute_rq - insert a request into queue for execution * @q: queue to insert the request in * @bd_disk: matching gendisk * @rq: request to insert + * @at_head: insert request at head or tail of queue * * Description: * Insert a fully prepared request at the back of the io scheduler queue - * for execution. + * for execution and wait for completion. */ int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk, - struct request *rq) + struct request *rq, int at_head) { DECLARE_COMPLETION(wait); char sense[SCSI_SENSE_BUFFERSIZE]; int err = 0; - rq->rq_disk = bd_disk; - /* * we need an extra reference to the request, so we can look at * it after io completion @@ -2233,11 +2332,8 @@ int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk, rq->sense_len = 0; } - rq->flags |= REQ_NOMERGE; rq->waiting = &wait; - rq->end_io = blk_end_sync_rq; - elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1); - generic_unplug_device(q); + blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq); wait_for_completion(&wait); rq->waiting = NULL; diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 7289f67e9568..ac5ba462710b 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -516,8 +516,7 @@ static int pcd_tray_move(struct cdrom_device_info *cdi, int position) static void pcd_sleep(int cs) { - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(cs); + schedule_timeout_interruptible(cs); } static int pcd_reset(struct pcd_unit *cd) diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index 060b1f2a91dd..94af920465b5 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c @@ -507,8 +507,7 @@ static void pf_eject(struct pf_unit *pf) static void pf_sleep(int cs) { - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(cs); + schedule_timeout_interruptible(cs); } /* the ATAPI standard actually specifies the contents of all 7 registers @@ -751,6 +750,14 @@ static int pf_ready(void) static struct request_queue *pf_queue; +static void pf_end_request(int uptodate) +{ + if (pf_req) { + end_request(pf_req, uptodate); + pf_req = NULL; + } +} + static void do_pf_request(request_queue_t * q) { if (pf_busy) @@ -766,7 +773,7 @@ repeat: pf_count = pf_req->current_nr_sectors; if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) { - end_request(pf_req, 0); + pf_end_request(0); goto repeat; } @@ -781,7 +788,7 @@ repeat: pi_do_claimed(pf_current->pi, do_pf_write); else { pf_busy = 0; - end_request(pf_req, 0); + pf_end_request(0); goto repeat; } } @@ -799,9 +806,11 @@ static int pf_next_buf(void) if (!pf_count) return 1; spin_lock_irqsave(&pf_spin_lock, saved_flags); - end_request(pf_req, 1); - pf_count = pf_req->current_nr_sectors; - pf_buf = pf_req->buffer; + pf_end_request(1); + if (pf_req) { + pf_count = pf_req->current_nr_sectors; + pf_buf = pf_req->buffer; + } spin_unlock_irqrestore(&pf_spin_lock, saved_flags); return 1; } @@ -811,7 +820,7 @@ static inline void next_request(int success) unsigned long saved_flags; spin_lock_irqsave(&pf_spin_lock, saved_flags); - end_request(pf_req, success); + pf_end_request(success); pf_busy = 0; do_pf_request(pf_queue); spin_unlock_irqrestore(&pf_spin_lock, saved_flags); diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c index 84d8e291ed96..b3982395f22b 100644 --- a/drivers/block/paride/pg.c +++ b/drivers/block/paride/pg.c @@ -276,8 +276,7 @@ static inline u8 DRIVE(struct pg *dev) static void pg_sleep(int cs) { - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(cs); + schedule_timeout_interruptible(cs); } static int pg_wait(struct pg *dev, int go, int stop, unsigned long tmo, char *msg) diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c index 5fe8ee86f095..d8d35233cf49 100644 --- a/drivers/block/paride/pt.c +++ b/drivers/block/paride/pt.c @@ -383,8 +383,7 @@ static int pt_atapi(struct pt_unit *tape, char *cmd, int dlen, char *buf, char * static void pt_sleep(int cs) { - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(cs); + schedule_timeout_interruptible(cs); } static int pt_poll_dsc(struct pt_unit *tape, int pause, int tmo, char *msg) diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 7b838342f0a3..7e22a58926b8 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -5,29 +5,41 @@ * May be copied or modified under the terms of the GNU General Public * License. See linux/COPYING for more information. * - * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and - * DVD-RW devices (aka an exercise in block layer masturbation) + * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and + * DVD-RAM devices. * + * Theory of operation: * - * TODO: (circa order of when I will fix it) - * - Only able to write on CD-RW media right now. - * - check host application code on media and set it in write page - * - interface for UDF <-> packet to negotiate a new location when a write - * fails. - * - handle OPC, especially for -RW media + * At the lowest level, there is the standard driver for the CD/DVD device, + * typically ide-cd.c or sr.c. This driver can handle read and write requests, + * but it doesn't know anything about the special restrictions that apply to + * packet writing. One restriction is that write requests must be aligned to + * packet boundaries on the physical media, and the size of a write request + * must be equal to the packet size. Another restriction is that a + * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read + * command, if the previous command was a write. * - * Theory of operation: + * The purpose of the packet writing driver is to hide these restrictions from + * higher layers, such as file systems, and present a block device that can be + * randomly read and written using 2kB-sized blocks. + * + * The lowest layer in the packet writing driver is the packet I/O scheduler. + * Its data is defined by the struct packet_iosched and includes two bio + * queues with pending read and write requests. These queues are processed + * by the pkt_iosched_process_queue() function. The write requests in this + * queue are already properly aligned and sized. This layer is responsible for + * issuing the flush cache commands and scheduling the I/O in a good order. * - * We use a custom make_request_fn function that forwards reads directly to - * the underlying CD device. Write requests are either attached directly to - * a live packet_data object, or simply stored sequentially in a list for - * later processing by the kcdrwd kernel thread. This driver doesn't use - * any elevator functionally as defined by the elevator_s struct, but the - * underlying CD device uses a standard elevator. + * The next layer transforms unaligned write requests to aligned writes. This + * transformation requires reading missing pieces of data from the underlying + * block device, assembling the pieces to full packets and queuing them to the + * packet I/O scheduler. * - * This strategy makes it possible to do very late merging of IO requests. - * A new bio sent to pkt_make_request can be merged with a live packet_data - * object even if the object is in the data gathering state. + * At the top layer there is a custom make_request_fn function that forwards + * read requests directly to the iosched queue and puts write requests in the + * unaligned write queue. A kernel thread performs the necessary read + * gathering to convert the unaligned writes to aligned writes and then feeds + * them to the packet I/O scheduler. * *************************************************************************/ @@ -100,10 +112,9 @@ static struct bio *pkt_bio_alloc(int nr_iovecs) goto no_bio; bio_init(bio); - bvl = kmalloc(nr_iovecs * sizeof(struct bio_vec), GFP_KERNEL); + bvl = kcalloc(nr_iovecs, sizeof(struct bio_vec), GFP_KERNEL); if (!bvl) goto no_bvl; - memset(bvl, 0, nr_iovecs * sizeof(struct bio_vec)); bio->bi_max_vecs = nr_iovecs; bio->bi_io_vec = bvl; @@ -125,10 +136,9 @@ static struct packet_data *pkt_alloc_packet_data(void) int i; struct packet_data *pkt; - pkt = kmalloc(sizeof(struct packet_data), GFP_KERNEL); + pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL); if (!pkt) goto no_pkt; - memset(pkt, 0, sizeof(struct packet_data)); pkt->w_bio = pkt_bio_alloc(PACKET_MAX_SIZE); if (!pkt->w_bio) @@ -659,7 +669,6 @@ static void pkt_make_local_copy(struct packet_data *pkt, struct page **pages, in } offs += CD_FRAMESIZE; if (offs >= PAGE_SIZE) { - BUG_ON(offs > PAGE_SIZE); offs = 0; p++; } @@ -724,12 +733,6 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) atomic_set(&pkt->io_wait, 0); atomic_set(&pkt->io_errors, 0); - if (pkt->cache_valid) { - VPRINTK("pkt_gather_data: zone %llx cached\n", - (unsigned long long)pkt->sector); - goto out_account; - } - /* * Figure out which frames we need to read before we can write. */ @@ -738,6 +741,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) for (bio = pkt->orig_bios; bio; bio = bio->bi_next) { int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9); int num_frames = bio->bi_size / CD_FRAMESIZE; + pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); BUG_ON(first_frame < 0); BUG_ON(first_frame + num_frames > pkt->frames); for (f = first_frame; f < first_frame + num_frames; f++) @@ -745,6 +749,12 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) } spin_unlock(&pkt->lock); + if (pkt->cache_valid) { + VPRINTK("pkt_gather_data: zone %llx cached\n", + (unsigned long long)pkt->sector); + goto out_account; + } + /* * Schedule reads for missing parts of the packet. */ @@ -778,7 +788,6 @@ out_account: frames_read, (unsigned long long)pkt->sector); pd->stats.pkt_started++; pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9); - pd->stats.secs_w += pd->settings.size; } /* @@ -794,10 +803,11 @@ static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zo list_del_init(&pkt->list); if (pkt->sector != zone) pkt->cache_valid = 0; - break; + return pkt; } } - return pkt; + BUG(); + return NULL; } static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt) @@ -941,12 +951,10 @@ try_next_bio: } pkt = pkt_get_packet_data(pd, zone); - BUG_ON(!pkt); pd->current_sector = zone + pd->settings.size; pkt->sector = zone; pkt->frames = pd->settings.size >> 2; - BUG_ON(pkt->frames > PACKET_MAX_SIZE); pkt->write_size = 0; /* @@ -1636,6 +1644,10 @@ static int pkt_probe_settings(struct pktcdvd_device *pd) printk("pktcdvd: detected zero packet size!\n"); pd->settings.size = 128; } + if (pd->settings.size > PACKET_MAX_SECTORS) { + printk("pktcdvd: packet size is too big\n"); + return -ENXIO; + } pd->settings.fp = ti.fp; pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1); @@ -2198,7 +2210,6 @@ static int pkt_make_request(request_queue_t *q, struct bio *bio) * No matching packet found. Store the bio in the work queue. */ node = mempool_alloc(pd->rb_pool, GFP_NOIO); - BUG_ON(!node); node->bio = bio; spin_lock(&pd->lock); BUG_ON(pd->bio_queue_size < 0); @@ -2406,7 +2417,6 @@ static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, u struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data; VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode)); - BUG_ON(!pd); switch (cmd) { /* @@ -2477,10 +2487,9 @@ static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd) return -EBUSY; } - pd = kmalloc(sizeof(struct pktcdvd_device), GFP_KERNEL); + pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL); if (!pd) return ret; - memset(pd, 0, sizeof(struct pktcdvd_device)); pd->rb_pool = mempool_create(PKT_RB_POOL_SIZE, pkt_rb_alloc, pkt_rb_free, NULL); if (!pd->rb_pool) diff --git a/drivers/block/ps2esdi.c b/drivers/block/ps2esdi.c index 29548784cb7b..29d1518be72a 100644 --- a/drivers/block/ps2esdi.c +++ b/drivers/block/ps2esdi.c @@ -99,8 +99,7 @@ static DECLARE_WAIT_QUEUE_HEAD(ps2esdi_int); static int no_int_yet; static int ps2esdi_drives; static u_short io_base; -static struct timer_list esdi_timer = - TIMER_INITIALIZER(ps2esdi_reset_timer, 0, 0); +static DEFINE_TIMER(esdi_timer, ps2esdi_reset_timer, 0, 0); static int reset_status; static int ps2esdi_slot = -1; static int tp720esdi = 0; /* Is it Integrated ESDI of ThinkPad-720? */ diff --git a/drivers/block/scsi_ioctl.c b/drivers/block/scsi_ioctl.c index 681871ca5d60..079ec344eb47 100644 --- a/drivers/block/scsi_ioctl.c +++ b/drivers/block/scsi_ioctl.c @@ -123,6 +123,7 @@ static int verify_command(struct file *file, unsigned char *cmd) safe_for_read(READ_12), safe_for_read(READ_16), safe_for_read(READ_BUFFER), + safe_for_read(READ_DEFECT_DATA), safe_for_read(READ_LONG), safe_for_read(INQUIRY), safe_for_read(MODE_SENSE), @@ -167,6 +168,7 @@ static int verify_command(struct file *file, unsigned char *cmd) safe_for_write(WRITE_VERIFY_12), safe_for_write(WRITE_16), safe_for_write(WRITE_LONG), + safe_for_write(WRITE_LONG_2), safe_for_write(ERASE), safe_for_write(GPCMD_MODE_SELECT_10), safe_for_write(MODE_SELECT), @@ -216,7 +218,7 @@ static int sg_io(struct file *file, request_queue_t *q, struct gendisk *bd_disk, struct sg_io_hdr *hdr) { unsigned long start_time; - int reading, writing; + int writing = 0, ret = 0; struct request *rq; struct bio *bio; char sense[SCSI_SENSE_BUFFERSIZE]; @@ -231,38 +233,48 @@ static int sg_io(struct file *file, request_queue_t *q, if (verify_command(file, cmd)) return -EPERM; - /* - * we'll do that later - */ - if (hdr->iovec_count) - return -EOPNOTSUPP; - if (hdr->dxfer_len > (q->max_sectors << 9)) return -EIO; - reading = writing = 0; - if (hdr->dxfer_len) { + if (hdr->dxfer_len) switch (hdr->dxfer_direction) { default: return -EINVAL; case SG_DXFER_TO_FROM_DEV: - reading = 1; - /* fall through */ case SG_DXFER_TO_DEV: writing = 1; break; case SG_DXFER_FROM_DEV: - reading = 1; break; } - rq = blk_rq_map_user(q, writing ? WRITE : READ, hdr->dxferp, - hdr->dxfer_len); + rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); + if (!rq) + return -ENOMEM; + + if (hdr->iovec_count) { + const int size = sizeof(struct sg_iovec) * hdr->iovec_count; + struct sg_iovec *iov; + + iov = kmalloc(size, GFP_KERNEL); + if (!iov) { + ret = -ENOMEM; + goto out; + } + + if (copy_from_user(iov, hdr->dxferp, size)) { + kfree(iov); + ret = -EFAULT; + goto out; + } + + ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count); + kfree(iov); + } else if (hdr->dxfer_len) + ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len); - if (IS_ERR(rq)) - return PTR_ERR(rq); - } else - rq = blk_get_request(q, READ, __GFP_WAIT); + if (ret) + goto out; /* * fill in request structure @@ -298,7 +310,7 @@ static int sg_io(struct file *file, request_queue_t *q, * (if he doesn't check that is his problem). * N.B. a non-zero SCSI status is _not_ necessarily an error. */ - blk_execute_rq(q, bd_disk, rq); + blk_execute_rq(q, bd_disk, rq, 0); /* write to all output members */ hdr->status = 0xff & rq->errors; @@ -320,12 +332,14 @@ static int sg_io(struct file *file, request_queue_t *q, hdr->sb_len_wr = len; } - if (blk_rq_unmap_user(rq, bio, hdr->dxfer_len)) - return -EFAULT; + if (blk_rq_unmap_user(bio, hdr->dxfer_len)) + ret = -EFAULT; /* may not have succeeded, but output values written to control * structure (struct sg_io_hdr). */ - return 0; +out: + blk_put_request(rq); + return ret; } #define OMAX_SB_LEN 16 /* For backward compatibility */ @@ -408,7 +422,7 @@ static int sg_scsi_ioctl(struct file *file, request_queue_t *q, rq->data_len = bytes; rq->flags |= REQ_BLOCK_PC; - blk_execute_rq(q, bd_disk, rq); + blk_execute_rq(q, bd_disk, rq, 0); err = rq->errors & 0xff; /* only 8 bit SCSI status */ if (err) { if (rq->sense_len && rq->sense) { @@ -561,7 +575,7 @@ int scsi_cmd_ioctl(struct file *file, struct gendisk *bd_disk, unsigned int cmd, rq->cmd[0] = GPCMD_START_STOP_UNIT; rq->cmd[4] = 0x02 + (close != 0); rq->cmd_len = 6; - err = blk_execute_rq(q, bd_disk, rq); + err = blk_execute_rq(q, bd_disk, rq, 0); blk_put_request(rq); break; default: diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index e5f7494c00ee..e425ad3eebba 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -834,8 +834,7 @@ static int fd_eject(struct floppy_state *fs) break; } swim3_select(fs, RELAX); - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(1); + schedule_timeout_interruptible(1); if (swim3_readbit(fs, DISK_IN) == 0) break; } @@ -906,8 +905,7 @@ static int floppy_open(struct inode *inode, struct file *filp) break; } swim3_select(fs, RELAX); - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(1); + schedule_timeout_interruptible(1); } if (err == 0 && (swim3_readbit(fs, SEEK_COMPLETE) == 0 || swim3_readbit(fs, DISK_IN) == 0)) @@ -992,8 +990,7 @@ static int floppy_revalidate(struct gendisk *disk) if (signal_pending(current)) break; swim3_select(fs, RELAX); - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(1); + schedule_timeout_interruptible(1); } ret = swim3_readbit(fs, SEEK_COMPLETE) == 0 || swim3_readbit(fs, DISK_IN) == 0; diff --git a/drivers/block/swim_iop.c b/drivers/block/swim_iop.c index a1283f6dc018..89e3c2f8b776 100644 --- a/drivers/block/swim_iop.c +++ b/drivers/block/swim_iop.c @@ -338,8 +338,7 @@ static int swimiop_eject(struct floppy_state *fs) err = -EINTR; break; } - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(1); + schedule_timeout_interruptible(1); } release_drive(fs); return cmd->error; diff --git a/drivers/block/ub.c b/drivers/block/ub.c index a026567f5d18..ed4d5006fe62 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -16,9 +16,10 @@ * -- verify the 13 conditions and do bulk resets * -- kill last_pipe and simply do two-state clearing on both pipes * -- verify protocol (bulk) from USB descriptors (maybe...) - * -- highmem and sg + * -- highmem * -- move top_sense and work_bcs into separate allocations (if they survive) * for cache purists and esoteric architectures. + * -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ? * -- prune comments, they are too volumnous * -- Exterminate P3 printks * -- Resove XXX's @@ -171,7 +172,7 @@ struct bulk_cs_wrap { */ struct ub_dev; -#define UB_MAX_REQ_SG 1 +#define UB_MAX_REQ_SG 9 /* cdrecord requires 32KB and maybe a header */ #define UB_MAX_SECTORS 64 /* @@ -234,13 +235,10 @@ struct ub_scsi_cmd { int stat_count; /* Retries getting status. */ - /* - * We do not support transfers from highmem pages - * because the underlying USB framework does not do what we need. - */ - char *data; /* Requested buffer */ unsigned int len; /* Requested length */ - // struct scatterlist sgv[UB_MAX_REQ_SG]; + unsigned int current_sg; + unsigned int nsg; /* sgv[nsg] */ + struct scatterlist sgv[UB_MAX_REQ_SG]; struct ub_lun *lun; void (*done)(struct ub_dev *, struct ub_scsi_cmd *); @@ -389,17 +387,18 @@ struct ub_dev { struct bulk_cs_wrap work_bcs; struct usb_ctrlrequest work_cr; + int sg_stat[6]; struct ub_scsi_trace tr; }; /* */ static void ub_cleanup(struct ub_dev *sc); -static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq); +static int ub_request_fn_1(struct ub_lun *lun, struct request *rq); static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, struct ub_scsi_cmd *cmd, struct request *rq); -static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd, - struct request *rq); +static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, + struct ub_scsi_cmd *cmd, struct request *rq); static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); static void ub_end_rq(struct request *rq, int uptodate); static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); @@ -407,6 +406,7 @@ static void ub_urb_complete(struct urb *urb, struct pt_regs *pt); static void ub_scsi_action(unsigned long _dev); static void ub_scsi_dispatch(struct ub_dev *sc); static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd); +static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd); static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc); static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); @@ -500,7 +500,8 @@ static void ub_cmdtr_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd, } } -static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr, char *page) +static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr, + char *page) { struct usb_interface *intf; struct ub_dev *sc; @@ -523,6 +524,14 @@ static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr, c cnt += sprintf(page + cnt, "qlen %d qmax %d\n", sc->cmd_queue.qlen, sc->cmd_queue.qmax); + cnt += sprintf(page + cnt, + "sg %d %d %d %d %d .. %d\n", + sc->sg_stat[0], + sc->sg_stat[1], + sc->sg_stat[2], + sc->sg_stat[3], + sc->sg_stat[4], + sc->sg_stat[5]); list_for_each (p, &sc->luns) { lun = list_entry(p, struct ub_lun, link); @@ -744,20 +753,20 @@ static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc) * The request function is our main entry point */ -static void ub_bd_rq_fn(request_queue_t *q) +static void ub_request_fn(request_queue_t *q) { struct ub_lun *lun = q->queuedata; struct request *rq; while ((rq = elv_next_request(q)) != NULL) { - if (ub_bd_rq_fn_1(lun, rq) != 0) { + if (ub_request_fn_1(lun, rq) != 0) { blk_stop_queue(q); break; } } } -static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq) +static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) { struct ub_dev *sc = lun->udev; struct ub_scsi_cmd *cmd; @@ -774,9 +783,8 @@ static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq) memset(cmd, 0, sizeof(struct ub_scsi_cmd)); blkdev_dequeue_request(rq); - if (blk_pc_request(rq)) { - rc = ub_cmd_build_packet(sc, cmd, rq); + rc = ub_cmd_build_packet(sc, lun, cmd, rq); } else { rc = ub_cmd_build_block(sc, lun, cmd, rq); } @@ -791,7 +799,7 @@ static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq) cmd->back = rq; cmd->tag = sc->tagcnt++; - if ((rc = ub_submit_scsi(sc, cmd)) != 0) { + if (ub_submit_scsi(sc, cmd) != 0) { ub_put_cmd(lun, cmd); ub_end_rq(rq, 0); return 0; @@ -804,58 +812,31 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, struct ub_scsi_cmd *cmd, struct request *rq) { int ub_dir; -#if 0 /* We use rq->buffer for now */ - struct scatterlist *sg; int n_elem; -#endif unsigned int block, nblks; if (rq_data_dir(rq) == WRITE) ub_dir = UB_DIR_WRITE; else ub_dir = UB_DIR_READ; + cmd->dir = ub_dir; /* * get scatterlist from block layer */ -#if 0 /* We use rq->buffer for now */ - sg = &cmd->sgv[0]; - n_elem = blk_rq_map_sg(q, rq, sg); + n_elem = blk_rq_map_sg(lun->disk->queue, rq, &cmd->sgv[0]); if (n_elem <= 0) { - ub_put_cmd(lun, cmd); - ub_end_rq(rq, 0); - blk_start_queue(q); - return 0; /* request with no s/g entries? */ + printk(KERN_INFO "%s: failed request map (%d)\n", + sc->name, n_elem); /* P3 */ + return -1; /* request with no s/g entries? */ } - - if (n_elem != 1) { /* Paranoia */ + if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */ printk(KERN_WARNING "%s: request with %d segments\n", sc->name, n_elem); - ub_put_cmd(lun, cmd); - ub_end_rq(rq, 0); - blk_start_queue(q); - return 0; - } -#endif - - /* - * XXX Unfortunately, this check does not work. It is quite possible - * to get bogus non-null rq->buffer if you allow sg by mistake. - */ - if (rq->buffer == NULL) { - /* - * This must not happen if we set the queue right. - * The block level must create bounce buffers for us. - */ - static int do_print = 1; - if (do_print) { - printk(KERN_WARNING "%s: unmapped block request" - " flags 0x%lx sectors %lu\n", - sc->name, rq->flags, rq->nr_sectors); - do_print = 0; - } return -1; } + cmd->nsg = n_elem; + sc->sg_stat[n_elem < 5 ? n_elem : 5]++; /* * build the command @@ -876,30 +857,15 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, cmd->cdb[8] = nblks; cmd->cdb_len = 10; - cmd->dir = ub_dir; - cmd->data = rq->buffer; cmd->len = rq->nr_sectors * 512; return 0; } -static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd, - struct request *rq) +static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, + struct ub_scsi_cmd *cmd, struct request *rq) { - - if (rq->data_len != 0 && rq->data == NULL) { - static int do_print = 1; - if (do_print) { - printk(KERN_WARNING "%s: unmapped packet request" - " flags 0x%lx length %d\n", - sc->name, rq->flags, rq->data_len); - do_print = 0; - } - return -1; - } - - memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); - cmd->cdb_len = rq->cmd_len; + int n_elem; if (rq->data_len == 0) { cmd->dir = UB_DIR_NONE; @@ -908,8 +874,29 @@ static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd, cmd->dir = UB_DIR_WRITE; else cmd->dir = UB_DIR_READ; + + } + + /* + * get scatterlist from block layer + */ + n_elem = blk_rq_map_sg(lun->disk->queue, rq, &cmd->sgv[0]); + if (n_elem < 0) { + printk(KERN_INFO "%s: failed request map (%d)\n", + sc->name, n_elem); /* P3 */ + return -1; + } + if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */ + printk(KERN_WARNING "%s: request with %d segments\n", + sc->name, n_elem); + return -1; } - cmd->data = rq->data; + cmd->nsg = n_elem; + sc->sg_stat[n_elem < 5 ? n_elem : 5]++; + + memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); + cmd->cdb_len = rq->cmd_len; + cmd->len = rq->data_len; return 0; @@ -919,24 +906,34 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) { struct request *rq = cmd->back; struct ub_lun *lun = cmd->lun; - struct gendisk *disk = lun->disk; - request_queue_t *q = disk->queue; int uptodate; - if (blk_pc_request(rq)) { - /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */ - memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE); - rq->sense_len = UB_SENSE_SIZE; - } - - if (cmd->error == 0) + if (cmd->error == 0) { uptodate = 1; - else + + if (blk_pc_request(rq)) { + if (cmd->act_len >= rq->data_len) + rq->data_len = 0; + else + rq->data_len -= cmd->act_len; + } + } else { uptodate = 0; + if (blk_pc_request(rq)) { + /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */ + memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE); + rq->sense_len = UB_SENSE_SIZE; + if (sc->top_sense[0] != 0) + rq->errors = SAM_STAT_CHECK_CONDITION; + else + rq->errors = DID_ERROR << 16; + } + } + ub_put_cmd(lun, cmd); ub_end_rq(rq, uptodate); - blk_start_queue(q); + blk_start_queue(lun->disk->queue); } static void ub_end_rq(struct request *rq, int uptodate) @@ -1014,7 +1011,6 @@ static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) sc->last_pipe = sc->send_bulk_pipe; usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe, bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc); - sc->work_urb.transfer_flags = URB_ASYNC_UNLINK; /* Fill what we shouldn't be filling, because usb-storage did so. */ sc->work_urb.actual_length = 0; @@ -1023,7 +1019,6 @@ static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { /* XXX Clear stalls */ - printk("ub: cmd #%d start failed (%d)\n", cmd->tag, rc); /* P3 */ ub_complete(&sc->work_done); return rc; } @@ -1103,7 +1098,6 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) { struct urb *urb = &sc->work_urb; struct bulk_cs_wrap *bcs; - int pipe; int rc; if (atomic_read(&sc->poison)) { @@ -1195,47 +1189,20 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) return; } if (urb->status != 0) { - printk("ub: cmd #%d cmd status (%d)\n", cmd->tag, urb->status); /* P3 */ goto Bad_End; } if (urb->actual_length != US_BULK_CB_WRAP_LEN) { - printk("ub: cmd #%d xferred %d\n", cmd->tag, urb->actual_length); /* P3 */ /* XXX Must do reset here to unconfuse the device */ goto Bad_End; } - if (cmd->dir == UB_DIR_NONE) { + if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) { ub_state_stat(sc, cmd); return; } - UB_INIT_COMPLETION(sc->work_done); - - if (cmd->dir == UB_DIR_READ) - pipe = sc->recv_bulk_pipe; - else - pipe = sc->send_bulk_pipe; - sc->last_pipe = pipe; - usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, - cmd->data, cmd->len, ub_urb_complete, sc); - sc->work_urb.transfer_flags = URB_ASYNC_UNLINK; - sc->work_urb.actual_length = 0; - sc->work_urb.error_count = 0; - sc->work_urb.status = 0; - - if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { - /* XXX Clear stalls */ - printk("ub: data #%d submit failed (%d)\n", cmd->tag, rc); /* P3 */ - ub_complete(&sc->work_done); - ub_state_done(sc, cmd, rc); - return; - } - - sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT; - add_timer(&sc->work_timer); - - cmd->state = UB_CMDST_DATA; - ub_cmdtr_state(sc, cmd); + // udelay(125); // usb-storage has this + ub_data_start(sc, cmd); } else if (cmd->state == UB_CMDST_DATA) { if (urb->status == -EPIPE) { @@ -1257,16 +1224,22 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) if (urb->status == -EOVERFLOW) { /* * A babble? Failure, but we must transfer CSW now. + * XXX This is going to end in perpetual babble. Reset. */ cmd->error = -EOVERFLOW; /* A cheap trick... */ - } else { - if (urb->status != 0) - goto Bad_End; + ub_state_stat(sc, cmd); + return; } + if (urb->status != 0) + goto Bad_End; - cmd->act_len = urb->actual_length; + cmd->act_len += urb->actual_length; ub_cmdtr_act_len(sc, cmd); + if (++cmd->current_sg < cmd->nsg) { + ub_data_start(sc, cmd); + return; + } ub_state_stat(sc, cmd); } else if (cmd->state == UB_CMDST_STAT) { @@ -1401,6 +1374,44 @@ Bad_End: /* Little Excel is dead */ /* * Factorization helper for the command state machine: + * Initiate a data segment transfer. + */ +static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) +{ + struct scatterlist *sg = &cmd->sgv[cmd->current_sg]; + int pipe; + int rc; + + UB_INIT_COMPLETION(sc->work_done); + + if (cmd->dir == UB_DIR_READ) + pipe = sc->recv_bulk_pipe; + else + pipe = sc->send_bulk_pipe; + sc->last_pipe = pipe; + usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, + page_address(sg->page) + sg->offset, sg->length, + ub_urb_complete, sc); + sc->work_urb.actual_length = 0; + sc->work_urb.error_count = 0; + sc->work_urb.status = 0; + + if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { + /* XXX Clear stalls */ + ub_complete(&sc->work_done); + ub_state_done(sc, cmd, rc); + return; + } + + sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT; + add_timer(&sc->work_timer); + + cmd->state = UB_CMDST_DATA; + ub_cmdtr_state(sc, cmd); +} + +/* + * Factorization helper for the command state machine: * Finish the command. */ static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc) @@ -1426,7 +1437,6 @@ static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) sc->last_pipe = sc->recv_bulk_pipe; usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe, &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc); - sc->work_urb.transfer_flags = URB_ASYNC_UNLINK; sc->work_urb.actual_length = 0; sc->work_urb.error_count = 0; sc->work_urb.status = 0; @@ -1484,6 +1494,7 @@ static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd) static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) { struct ub_scsi_cmd *scmd; + struct scatterlist *sg; int rc; if (cmd->cdb[0] == REQUEST_SENSE) { @@ -1492,12 +1503,17 @@ static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) } scmd = &sc->top_rqs_cmd; + memset(scmd, 0, sizeof(struct ub_scsi_cmd)); scmd->cdb[0] = REQUEST_SENSE; scmd->cdb[4] = UB_SENSE_SIZE; scmd->cdb_len = 6; scmd->dir = UB_DIR_READ; scmd->state = UB_CMDST_INIT; - scmd->data = sc->top_sense; + scmd->nsg = 1; + sg = &scmd->sgv[0]; + sg->page = virt_to_page(sc->top_sense); + sg->offset = (unsigned int)sc->top_sense & (PAGE_SIZE-1); + sg->length = UB_SENSE_SIZE; scmd->len = UB_SENSE_SIZE; scmd->lun = cmd->lun; scmd->done = ub_top_sense_done; @@ -1541,7 +1557,6 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, (unsigned char*) cr, NULL, 0, ub_urb_complete, sc); - sc->work_urb.transfer_flags = URB_ASYNC_UNLINK; sc->work_urb.actual_length = 0; sc->work_urb.error_count = 0; sc->work_urb.status = 0; @@ -1560,7 +1575,7 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, */ static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd) { - unsigned char *sense = scmd->data; + unsigned char *sense = sc->top_sense; struct ub_scsi_cmd *cmd; /* @@ -1852,6 +1867,7 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, struct ub_capacity *ret) { struct ub_scsi_cmd *cmd; + struct scatterlist *sg; char *p; enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 }; unsigned long flags; @@ -1872,7 +1888,11 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, cmd->cdb_len = 10; cmd->dir = UB_DIR_READ; cmd->state = UB_CMDST_INIT; - cmd->data = p; + cmd->nsg = 1; + sg = &cmd->sgv[0]; + sg->page = virt_to_page(p); + sg->offset = (unsigned int)p & (PAGE_SIZE-1); + sg->length = 8; cmd->len = 8; cmd->lun = lun; cmd->done = ub_probe_done; @@ -1973,17 +1993,16 @@ static int ub_sync_getmaxlun(struct ub_dev *sc) usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe, (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl); - sc->work_urb.transfer_flags = 0; sc->work_urb.actual_length = 0; sc->work_urb.error_count = 0; sc->work_urb.status = 0; if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { if (rc == -EPIPE) { - printk("%s: Stall at GetMaxLUN, using 1 LUN\n", + printk("%s: Stall submitting GetMaxLUN, using 1 LUN\n", sc->name); /* P3 */ } else { - printk(KERN_WARNING + printk(KERN_NOTICE "%s: Unable to submit GetMaxLUN (%d)\n", sc->name, rc); } @@ -2001,6 +2020,18 @@ static int ub_sync_getmaxlun(struct ub_dev *sc) del_timer_sync(&timer); usb_kill_urb(&sc->work_urb); + if ((rc = sc->work_urb.status) < 0) { + if (rc == -EPIPE) { + printk("%s: Stall at GetMaxLUN, using 1 LUN\n", + sc->name); /* P3 */ + } else { + printk(KERN_NOTICE + "%s: Error at GetMaxLUN (%d)\n", + sc->name, rc); + } + goto err_io; + } + if (sc->work_urb.actual_length != 1) { printk("%s: GetMaxLUN returned %d bytes\n", sc->name, sc->work_urb.actual_length); /* P3 */ @@ -2021,6 +2052,7 @@ static int ub_sync_getmaxlun(struct ub_dev *sc) kfree(p); return nluns; +err_io: err_submit: kfree(p); err_alloc: @@ -2053,7 +2085,6 @@ static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe) usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl); - sc->work_urb.transfer_flags = 0; sc->work_urb.actual_length = 0; sc->work_urb.error_count = 0; sc->work_urb.status = 0; @@ -2186,8 +2217,10 @@ static int ub_probe(struct usb_interface *intf, * This is needed to clear toggles. It is a problem only if we do * `rmmod ub && modprobe ub` without disconnects, but we like that. */ +#if 0 /* iPod Mini fails if we do this (big white iPod works) */ ub_probe_clear_stall(sc, sc->recv_bulk_pipe); ub_probe_clear_stall(sc, sc->send_bulk_pipe); +#endif /* * The way this is used by the startup code is a little specific. @@ -2214,10 +2247,10 @@ static int ub_probe(struct usb_interface *intf, for (i = 0; i < 3; i++) { if ((rc = ub_sync_getmaxlun(sc)) < 0) { /* - * Some devices (i.e. Iomega Zip100) need this -- - * apparently the bulk pipes get STALLed when the - * GetMaxLUN request is processed. - * XXX I have a ZIP-100, verify it does this. + * This segment is taken from usb-storage. They say + * that ZIP-100 needs this, but my own ZIP-100 works + * fine without this. + * Still, it does not seem to hurt anything. */ if (rc == -EPIPE) { ub_probe_clear_stall(sc, sc->recv_bulk_pipe); @@ -2286,10 +2319,10 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum) disk->first_minor = lun->id * UB_MINORS_PER_MAJOR; disk->fops = &ub_bd_fops; disk->private_data = lun; - disk->driverfs_dev = &sc->intf->dev; /* XXX Many to one ok? */ + disk->driverfs_dev = &sc->intf->dev; rc = -ENOMEM; - if ((q = blk_init_queue(ub_bd_rq_fn, &sc->lock)) == NULL) + if ((q = blk_init_queue(ub_request_fn, &sc->lock)) == NULL) goto err_blkqinit; disk->queue = q; @@ -2439,9 +2472,6 @@ static int __init ub_init(void) { int rc; - /* P3 */ printk("ub: sizeof ub_scsi_cmd %zu ub_dev %zu ub_lun %zu\n", - sizeof(struct ub_scsi_cmd), sizeof(struct ub_dev), sizeof(struct ub_lun)); - if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0) goto err_regblkdev; devfs_mk_dir(DEVFS_NAME); diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 0c4c121d2e79..0f48301342da 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -34,6 +34,7 @@ * - set initialised bit then. */ +//#define DEBUG /* uncomment if you want debugging info (pr_debug) */ #include <linux/config.h> #include <linux/sched.h> #include <linux/fs.h> @@ -58,10 +59,6 @@ #include <asm/uaccess.h> #include <asm/io.h> -#define PRINTK(x...) do {} while (0) -#define dprintk(x...) do {} while (0) -/*#define dprintk(x...) printk(x) */ - #define MM_MAXCARDS 4 #define MM_RAHEAD 2 /* two sectors */ #define MM_BLKSIZE 1024 /* 1k blocks */ @@ -299,7 +296,7 @@ static void mm_start_io(struct cardinfo *card) /* make the last descriptor end the chain */ page = &card->mm_pages[card->Active]; - PRINTK("start_io: %d %d->%d\n", card->Active, page->headcnt, page->cnt-1); + pr_debug("start_io: %d %d->%d\n", card->Active, page->headcnt, page->cnt-1); desc = &page->desc[page->cnt-1]; desc->control_bits |= cpu_to_le32(DMASCR_CHAIN_COMP_EN); @@ -532,7 +529,7 @@ static void process_page(unsigned long data) activate(card); } else { /* haven't finished with this one yet */ - PRINTK("do some more\n"); + pr_debug("do some more\n"); mm_start_io(card); } out_unlock: @@ -555,7 +552,7 @@ static void process_page(unsigned long data) static int mm_make_request(request_queue_t *q, struct bio *bio) { struct cardinfo *card = q->queuedata; - PRINTK("mm_make_request %ld %d\n", bh->b_rsector, bh->b_size); + pr_debug("mm_make_request %ld %d\n", bh->b_rsector, bh->b_size); bio->bi_phys_segments = bio->bi_idx; /* count of completed segments*/ spin_lock_irq(&card->lock); diff --git a/drivers/block/xd.c b/drivers/block/xd.c index 1676033da6c6..68b6d7b154cf 100644 --- a/drivers/block/xd.c +++ b/drivers/block/xd.c @@ -47,6 +47,7 @@ #include <linux/wait.h> #include <linux/blkdev.h> #include <linux/blkpg.h> +#include <linux/delay.h> #include <asm/system.h> #include <asm/io.h> @@ -62,7 +63,7 @@ static int xd[5] = { -1,-1,-1,-1, }; #define XD_DONT_USE_DMA 0 /* Initial value. may be overriden using "nodma" module option */ -#define XD_INIT_DISK_DELAY (30*HZ/1000) /* 30 ms delay during disk initialization */ +#define XD_INIT_DISK_DELAY (30) /* 30 ms delay during disk initialization */ /* Above may need to be increased if a problem with the 2nd drive detection (ST11M controller) or resetting a controller (WD) appears */ @@ -529,10 +530,8 @@ static inline u_char xd_waitport (u_short port,u_char flags,u_char mask,u_long t int success; xdc_busy = 1; - while ((success = ((inb(port) & mask) != flags)) && time_before(jiffies, expiry)) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(1); - } + while ((success = ((inb(port) & mask) != flags)) && time_before(jiffies, expiry)) + schedule_timeout_uninterruptible(1); xdc_busy = 0; return (success); } @@ -633,14 +632,12 @@ static u_char __init xd_initdrives (void (*init_drive)(u_char drive)) for (i = 0; i < XD_MAXDRIVES; i++) { xd_build(cmdblk,CMD_TESTREADY,i,0,0,0,0,0); if (!xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT*8)) { - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(XD_INIT_DISK_DELAY); + msleep_interruptible(XD_INIT_DISK_DELAY); init_drive(count); count++; - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(XD_INIT_DISK_DELAY); + msleep_interruptible(XD_INIT_DISK_DELAY); } } return (count); @@ -761,8 +758,7 @@ static void __init xd_wd_init_controller (unsigned int address) outb(0,XD_RESET); /* reset the controller */ - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(XD_INIT_DISK_DELAY); + msleep(XD_INIT_DISK_DELAY); } static void __init xd_wd_init_drive (u_char drive) @@ -936,8 +932,7 @@ If you need non-standard settings use the xd=... command */ xd_maxsectors = 0x01; outb(0,XD_RESET); /* reset the controller */ - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(XD_INIT_DISK_DELAY); + msleep(XD_INIT_DISK_DELAY); } static void __init xd_xebec_init_drive (u_char drive) diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index 007f6a662439..bb5e8d665a2a 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c @@ -296,7 +296,7 @@ z2_open( struct inode *inode, struct file *filp ) return 0; err_out_kfree: - kfree( z2ram_map ); + kfree(z2ram_map); err_out: return rc; } |