From 7945619794314414a5c44df11fca4d3f2a3389cf Mon Sep 17 00:00:00 2001 From: Jody McIntyre Date: Mon, 7 Nov 2005 06:29:39 -0500 Subject: sbp2_command_orb_lock must be held when accessing the _orb_inuse list. Fixes an oops in sbp2util_find_command_for_SCpnt after sbp2scsi_abort: https://bugzilla.novell.com/show_bug.cgi?id=113734 Signed-off-by: Jody McIntyre Signed-off-by: Stefan Richter --- drivers/ieee1394/sbp2.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index 12cec7c4a342..f7e18ccc5c0a 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c @@ -2350,6 +2350,7 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest struct scsi_cmnd *SCpnt = NULL; u32 scsi_status = SBP2_SCSI_STATUS_GOOD; struct sbp2_command_info *command; + unsigned long flags; SBP2_DEBUG("sbp2_handle_status_write"); @@ -2451,9 +2452,11 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest * null out last orb so that next time around we write directly to the orb pointer... * Quick start saves one 1394 bus transaction. */ + spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags); if (list_empty(&scsi_id->sbp2_command_orb_inuse)) { scsi_id->last_orb = NULL; } + spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); } else { @@ -2563,9 +2566,11 @@ static void sbp2scsi_complete_all_commands(struct scsi_id_instance_data *scsi_id struct sbp2scsi_host_info *hi = scsi_id->hi; struct list_head *lh; struct sbp2_command_info *command; + unsigned long flags; SBP2_DEBUG("sbp2scsi_complete_all_commands"); + spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags); while (!list_empty(&scsi_id->sbp2_command_orb_inuse)) { SBP2_DEBUG("Found pending command to complete"); lh = scsi_id->sbp2_command_orb_inuse.next; @@ -2582,6 +2587,7 @@ static void sbp2scsi_complete_all_commands(struct scsi_id_instance_data *scsi_id command->Current_done(command->Current_SCpnt); } } + spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); return; } -- cgit v1.2.3 From 365c786f0be44ee92e018773cb0bc4b19080b6aa Mon Sep 17 00:00:00 2001 From: Ben Collins Date: Mon, 7 Nov 2005 06:31:24 -0500 Subject: sbp2: Merge TYPE_RBC and 10byte removal patch from scsi maintainers. Added more cleanups to remove unused code. Signed-off-by: Ben Collins Signed-off-by: Stefan Richter Signed-off-by: Jody McIntyre --- drivers/ieee1394/sbp2.c | 151 +----------------------------------------------- drivers/ieee1394/sbp2.h | 1 - 2 files changed, 2 insertions(+), 150 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index f7e18ccc5c0a..d53c8cfe6610 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c @@ -1088,16 +1088,6 @@ static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid, quadlet_ * SBP-2 protocol related section **************************************/ -/* - * This function determines if we should convert scsi commands for a particular sbp2 device type - */ -static __inline__ int sbp2_command_conversion_device_type(u8 device_type) -{ - return (((device_type == TYPE_DISK) || - (device_type == TYPE_RBC) || - (device_type == TYPE_ROM)) ? 1:0); -} - /* * This function queries the device for the maximum concurrent logins it * supports. @@ -2106,11 +2096,6 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id, sbp2_create_command_orb(scsi_id, command, cmd, SCpnt->use_sg, request_bufflen, SCpnt->request_buffer, SCpnt->sc_data_direction); - /* - * Update our cdb if necessary (to handle sbp2 RBC command set - * differences). This is where the command set hacks go! =) - */ - sbp2_check_sbp2_command(scsi_id, command->command_orb.cdb); sbp2util_packet_dump(&command->command_orb, sizeof(struct sbp2_command_orb), "sbp2 command orb", command->command_orb_dma); @@ -2129,110 +2114,6 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id, } -/* - * This function deals with command set differences between Linux scsi - * command set and sbp2 RBC command set. - */ -static void sbp2_check_sbp2_command(struct scsi_id_instance_data *scsi_id, unchar *cmd) -{ - unchar new_cmd[16]; - u8 device_type = SBP2_DEVICE_TYPE (scsi_id->sbp2_device_type_and_lun); - - SBP2_DEBUG("sbp2_check_sbp2_command"); - - switch (*cmd) { - - case READ_6: - - if (sbp2_command_conversion_device_type(device_type)) { - - SBP2_DEBUG("Convert READ_6 to READ_10"); - - /* - * Need to turn read_6 into read_10 - */ - new_cmd[0] = 0x28; - new_cmd[1] = (cmd[1] & 0xe0); - new_cmd[2] = 0x0; - new_cmd[3] = (cmd[1] & 0x1f); - new_cmd[4] = cmd[2]; - new_cmd[5] = cmd[3]; - new_cmd[6] = 0x0; - new_cmd[7] = 0x0; - new_cmd[8] = cmd[4]; - new_cmd[9] = cmd[5]; - - memcpy(cmd, new_cmd, 10); - - } - - break; - - case WRITE_6: - - if (sbp2_command_conversion_device_type(device_type)) { - - SBP2_DEBUG("Convert WRITE_6 to WRITE_10"); - - /* - * Need to turn write_6 into write_10 - */ - new_cmd[0] = 0x2a; - new_cmd[1] = (cmd[1] & 0xe0); - new_cmd[2] = 0x0; - new_cmd[3] = (cmd[1] & 0x1f); - new_cmd[4] = cmd[2]; - new_cmd[5] = cmd[3]; - new_cmd[6] = 0x0; - new_cmd[7] = 0x0; - new_cmd[8] = cmd[4]; - new_cmd[9] = cmd[5]; - - memcpy(cmd, new_cmd, 10); - - } - - break; - - case MODE_SENSE: - - if (sbp2_command_conversion_device_type(device_type)) { - - SBP2_DEBUG("Convert MODE_SENSE_6 to MODE_SENSE_10"); - - /* - * Need to turn mode_sense_6 into mode_sense_10 - */ - new_cmd[0] = 0x5a; - new_cmd[1] = cmd[1]; - new_cmd[2] = cmd[2]; - new_cmd[3] = 0x0; - new_cmd[4] = 0x0; - new_cmd[5] = 0x0; - new_cmd[6] = 0x0; - new_cmd[7] = 0x0; - new_cmd[8] = cmd[4]; - new_cmd[9] = cmd[5]; - - memcpy(cmd, new_cmd, 10); - - } - - break; - - case MODE_SELECT: - - /* - * TODO. Probably need to change mode select to 10 byte version - */ - - default: - break; - } - - return; -} - /* * Translates SBP-2 status into SCSI sense data for check conditions */ @@ -2271,7 +2152,6 @@ static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id, struct scsi_cmnd *SCpnt) { u8 *scsi_buf = SCpnt->request_buffer; - u8 device_type = SBP2_DEVICE_TYPE (scsi_id->sbp2_device_type_and_lun); SBP2_DEBUG("sbp2_check_sbp2_response"); @@ -2295,14 +2175,6 @@ static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id, scsi_buf[4] = 36 - 5; } - /* - * Check for Simple Direct Access Device and change it to TYPE_DISK - */ - if ((scsi_buf[0] & 0x1f) == TYPE_RBC) { - SBP2_DEBUG("Changing TYPE_RBC to TYPE_DISK"); - scsi_buf[0] &= 0xe0; - } - /* * Fix ansi revision and response data format */ @@ -2311,27 +2183,6 @@ static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id, break; - case MODE_SENSE: - - if (sbp2_command_conversion_device_type(device_type)) { - - SBP2_DEBUG("Modify mode sense response (10 byte version)"); - - scsi_buf[0] = scsi_buf[1]; /* Mode data length */ - scsi_buf[1] = scsi_buf[2]; /* Medium type */ - scsi_buf[2] = scsi_buf[3]; /* Device specific parameter */ - scsi_buf[3] = scsi_buf[7]; /* Block descriptor length */ - memcpy(scsi_buf + 4, scsi_buf + 8, scsi_buf[0]); - } - - break; - - case MODE_SELECT: - - /* - * TODO. Probably need to change mode select to 10 byte version - */ - default: break; } @@ -2713,6 +2564,8 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev) static int sbp2scsi_slave_configure(struct scsi_device *sdev) { blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); + sdev->use_10_for_rw = 1; + sdev->use_10_for_ms = 1; return 0; } diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h index cd425be74841..cb111d751143 100644 --- a/drivers/ieee1394/sbp2.h +++ b/drivers/ieee1394/sbp2.h @@ -469,7 +469,6 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id, struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)); static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense_data); -static void sbp2_check_sbp2_command(struct scsi_id_instance_data *scsi_id, unchar *cmd); static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id, struct scsi_cmnd *SCpnt); static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, -- cgit v1.2.3 From e309fc6d71d61bb0f049ab6d0da10c845da9513f Mon Sep 17 00:00:00 2001 From: Ben Collins Date: Mon, 7 Nov 2005 06:31:34 -0500 Subject: sbp2: Remove our tracking of device type, since we no longer need to worry about it. Depends on patch "ieee1394: remove sbp2's TYPE_RBC and 10byte handling". Signed-off-by: Ben Collins Signed-off-by: Stefan Richter Signed-off-by: Jody McIntyre --- drivers/ieee1394/sbp2.c | 34 ++++++---------------------------- drivers/ieee1394/sbp2.h | 7 +------ 2 files changed, 7 insertions(+), 34 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index d53c8cfe6610..747dbd1c3c9c 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c @@ -735,7 +735,7 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_completed); INIT_LIST_HEAD(&scsi_id->scsi_list); spin_lock_init(&scsi_id->sbp2_command_orb_lock); - scsi_id->sbp2_device_type_and_lun = SBP2_DEVICE_TYPE_LUN_UNINITIALIZED; + scsi_id->sbp2_lun = 0; ud->device.driver_data = scsi_id; @@ -1110,11 +1110,7 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id) scsi_id->query_logins_orb->lun_misc = ORB_SET_FUNCTION(SBP2_QUERY_LOGINS_REQUEST); scsi_id->query_logins_orb->lun_misc |= ORB_SET_NOTIFY(1); - if (scsi_id->sbp2_device_type_and_lun != SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) { - scsi_id->query_logins_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun); - SBP2_DEBUG("sbp2_query_logins: set lun to %d", - ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun)); - } + scsi_id->query_logins_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_lun); SBP2_DEBUG("sbp2_query_logins: lun_misc initialized"); scsi_id->query_logins_orb->reserved_resp_length = @@ -1223,12 +1219,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id) scsi_id->login_orb->lun_misc |= ORB_SET_RECONNECT(0); /* One second reconnect time */ scsi_id->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(exclusive_login); /* Exclusive access to device */ scsi_id->login_orb->lun_misc |= ORB_SET_NOTIFY(1); /* Notify us of login complete */ - /* Set the lun if we were able to pull it from the device's unit directory */ - if (scsi_id->sbp2_device_type_and_lun != SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) { - scsi_id->login_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun); - SBP2_DEBUG("sbp2_query_logins: set lun to %d", - ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun)); - } + scsi_id->login_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_lun); SBP2_DEBUG("sbp2_login_device: lun_misc initialized"); scsi_id->login_orb->passwd_resp_lengths = @@ -1543,7 +1534,7 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, SBP2_DEBUG("sbp2_management_agent_addr = %x", (unsigned int) management_agent_addr); } else if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) { - scsi_id->sbp2_device_type_and_lun = kv->value.immediate; + scsi_id->sbp2_lun = ORB_SET_LUN(kv->value.immediate); } break; @@ -1636,7 +1627,7 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, scsi_id->sbp2_firmware_revision = firmware_revision; scsi_id->workarounds = workarounds; if (ud->flags & UNIT_DIRECTORY_HAS_LUN) - scsi_id->sbp2_device_type_and_lun = ud->lun; + scsi_id->sbp2_lun = ORB_SET_LUN(ud->lun); } } @@ -2158,16 +2149,6 @@ static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id, switch (SCpnt->cmnd[0]) { case INQUIRY: - - /* - * If scsi_id->sbp2_device_type_and_lun is uninitialized, then fill - * this information in from the inquiry response data. Lun is set to zero. - */ - if (scsi_id->sbp2_device_type_and_lun == SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) { - SBP2_DEBUG("Creating sbp2_device_type_and_lun from scsi inquiry data"); - scsi_id->sbp2_device_type_and_lun = (scsi_buf[0] & 0x1f) << 16; - } - /* * Make sure data length is ok. Minimum length is 36 bytes */ @@ -2665,10 +2646,7 @@ static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_att if (!(scsi_id = (struct scsi_id_instance_data *)sdev->host->hostdata[0])) return 0; - if (scsi_id->sbp2_device_type_and_lun == SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) - lun = 0; - else - lun = ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun); + lun = ORB_SET_LUN(scsi_id->sbp2_lun); return sprintf(buf, "%016Lx:%d:%d\n", (unsigned long long)scsi_id->ne->guid, scsi_id->ud->id, lun); diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h index cb111d751143..890be1365523 100644 --- a/drivers/ieee1394/sbp2.h +++ b/drivers/ieee1394/sbp2.h @@ -229,9 +229,6 @@ struct sbp2_status_block { #define SBP2_DEVICE_TYPE_AND_LUN_KEY 0x14 #define SBP2_FIRMWARE_REVISION_KEY 0x3c -#define SBP2_DEVICE_TYPE(q) (((q) >> 16) & 0x1f) -#define SBP2_DEVICE_LUN(q) ((q) & 0xffff) - #define SBP2_AGENT_STATE_OFFSET 0x00ULL #define SBP2_AGENT_RESET_OFFSET 0x04ULL #define SBP2_ORB_POINTER_OFFSET 0x08ULL @@ -256,8 +253,6 @@ struct sbp2_status_block { */ #define SBP2_128KB_BROKEN_FIRMWARE 0xa0b800 -#define SBP2_DEVICE_TYPE_LUN_UNINITIALIZED 0xffffffff - /* * SCSI specific stuff */ @@ -379,7 +374,7 @@ struct scsi_id_instance_data { u32 sbp2_command_set_spec_id; u32 sbp2_command_set; u32 sbp2_unit_characteristics; - u32 sbp2_device_type_and_lun; + u32 sbp2_lun; u32 sbp2_firmware_revision; /* -- cgit v1.2.3 From a237f35fdd81d85037dccdacd2e94028227b59fb Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Mon, 7 Nov 2005 06:31:39 -0500 Subject: sbp2, ohci1394 cleanups: sbp2: various code formatting cleanups ohci1394: remove form feed characters Signed-off-by: Stefan Richter Signed-off-by: Jody McIntyre --- drivers/ieee1394/ohci1394.c | 5 - drivers/ieee1394/sbp2.c | 433 ++++++++++++++++++++++---------------------- drivers/ieee1394/sbp2.h | 15 +- 3 files changed, 224 insertions(+), 229 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c index 4cf9b8f3e336..dcb5776e5c44 100644 --- a/drivers/ieee1394/ohci1394.c +++ b/drivers/ieee1394/ohci1394.c @@ -3201,8 +3201,6 @@ static struct hpsb_host_driver ohci1394_driver = { .hw_csr_reg = ohci_hw_csr_reg, }; - - /*********************************** * PCI Driver Interface functions * ***********************************/ @@ -3606,8 +3604,6 @@ static struct pci_driver ohci1394_pci_driver = { .suspend = ohci1394_pci_suspend, }; - - /*********************************** * OHCI1394 Video Interface * ***********************************/ @@ -3714,7 +3710,6 @@ EXPORT_SYMBOL(ohci1394_init_iso_tasklet); EXPORT_SYMBOL(ohci1394_register_iso_tasklet); EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet); - /*********************************** * General module initialization * ***********************************/ diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index 747dbd1c3c9c..073ede9d3b48 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c @@ -151,18 +151,15 @@ static int force_inquiry_hack; module_param(force_inquiry_hack, int, 0444); MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)"); - /* * Export information about protocols/devices supported by this driver. */ static struct ieee1394_device_id sbp2_id_table[] = { { - .match_flags =IEEE1394_MATCH_SPECIFIER_ID | - IEEE1394_MATCH_VERSION, - .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff, - .version = SBP2_SW_VERSION_ENTRY & 0xffffff - }, - { } + .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, + .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff, + .version = SBP2_SW_VERSION_ENTRY & 0xffffff}, + {} }; MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table); @@ -221,7 +218,6 @@ static u32 global_outstanding_dmas = 0; #define SBP2_ERR(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args) - /* * Globals */ @@ -254,8 +250,8 @@ static struct hpsb_address_ops sbp2_ops = { #ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA static struct hpsb_address_ops sbp2_physdma_ops = { - .read = sbp2_handle_physdma_read, - .write = sbp2_handle_physdma_write, + .read = sbp2_handle_physdma_read, + .write = sbp2_handle_physdma_write, }; #endif @@ -287,7 +283,6 @@ static u32 sbp2_broken_inquiry_list[] = { * General utility functions **************************************/ - #ifndef __BIG_ENDIAN /* * Converts a buffer from be32 to cpu byte ordering. Length is in bytes. @@ -324,7 +319,8 @@ static __inline__ void sbp2util_cpu_to_be32_buffer(void *buffer, int length) /* * Debug packet dump routine. Length is in bytes. */ -static void sbp2util_packet_dump(void *buffer, int length, char *dump_name, u32 dump_phys_addr) +static void sbp2util_packet_dump(void *buffer, int length, char *dump_name, + u32 dump_phys_addr) { int i; unsigned char *dump = buffer; @@ -345,7 +341,7 @@ static void sbp2util_packet_dump(void *buffer, int length, char *dump_name, u32 printk(" "); if ((i & 0xf) == 0) printk("\n "); - printk("%02x ", (int) dump[i]); + printk("%02x ", (int)dump[i]); } printk("\n"); @@ -364,9 +360,9 @@ static int sbp2util_down_timeout(atomic_t *done, int timeout) for (i = timeout; (i > 0 && atomic_read(done) == 0); i-= HZ/10) { if (msleep_interruptible(100)) /* 100ms */ - return(1); + return 1; } - return ((i > 0) ? 0:1); + return (i > 0) ? 0 : 1; } /* Free's an allocated packet */ @@ -380,21 +376,22 @@ static void sbp2_free_packet(struct hpsb_packet *packet) * subaction and returns immediately. Can be used from interrupts. */ static int sbp2util_node_write_no_wait(struct node_entry *ne, u64 addr, - quadlet_t *buffer, size_t length) + quadlet_t *buffer, size_t length) { struct hpsb_packet *packet; packet = hpsb_make_writepacket(ne->host, ne->nodeid, addr, buffer, length); - if (!packet) - return -ENOMEM; + if (!packet) + return -ENOMEM; - hpsb_set_packet_complete_task(packet, (void (*)(void*))sbp2_free_packet, + hpsb_set_packet_complete_task(packet, + (void (*)(void *))sbp2_free_packet, packet); hpsb_node_fill_packet(ne, packet); - if (hpsb_send_packet(packet) < 0) { + if (hpsb_send_packet(packet) < 0) { sbp2_free_packet(packet); return -EIO; } @@ -420,19 +417,21 @@ static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_i command = (struct sbp2_command_info *) kmalloc(sizeof(struct sbp2_command_info), GFP_ATOMIC); if (!command) { - spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); - return(-ENOMEM); + spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, + flags); + return -ENOMEM; } memset(command, '\0', sizeof(struct sbp2_command_info)); command->command_orb_dma = - pci_map_single (hi->host->pdev, &command->command_orb, - sizeof(struct sbp2_command_orb), - PCI_DMA_BIDIRECTIONAL); + pci_map_single(hi->host->pdev, &command->command_orb, + sizeof(struct sbp2_command_orb), + PCI_DMA_BIDIRECTIONAL); SBP2_DMA_ALLOC("single command orb DMA"); command->sge_dma = - pci_map_single (hi->host->pdev, &command->scatter_gather_element, - sizeof(command->scatter_gather_element), - PCI_DMA_BIDIRECTIONAL); + pci_map_single(hi->host->pdev, + &command->scatter_gather_element, + sizeof(command->scatter_gather_element), + PCI_DMA_BIDIRECTIONAL); SBP2_DMA_ALLOC("scatter_gather_element"); INIT_LIST_HEAD(&command->list); list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed); @@ -488,7 +487,7 @@ static struct sbp2_command_info *sbp2util_find_command_for_orb( list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) { if (command->command_orb_dma == orb) { spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); - return (command); + return command; } } } @@ -496,7 +495,7 @@ static struct sbp2_command_info *sbp2util_find_command_for_orb( SBP2_ORB_DEBUG("could not match command orb %x", (unsigned int)orb); - return(NULL); + return NULL; } /* @@ -513,12 +512,12 @@ static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(struct scsi_id_ list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) { if (command->Current_SCpnt == SCpnt) { spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); - return (command); + return command; } } } spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); - return(NULL); + return NULL; } /* @@ -545,7 +544,7 @@ static struct sbp2_command_info *sbp2util_allocate_command_orb( SBP2_ERR("sbp2util_allocate_command_orb - No orbs available!"); } spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); - return (command); + return command; } /* Free our DMA's */ @@ -587,7 +586,8 @@ static void sbp2util_free_command_dma(struct sbp2_command_info *command) /* * This function moves a command to the completed orb list. */ -static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_id, struct sbp2_command_info *command) +static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_id, + struct sbp2_command_info *command) { unsigned long flags; @@ -606,8 +606,6 @@ static inline int sbp2util_node_is_available(struct scsi_id_instance_data *scsi_ return scsi_id && scsi_id->ne && !scsi_id->ne->in_limbo; } - - /********************************************* * IEEE-1394 core driver stack related section *********************************************/ @@ -627,14 +625,14 @@ static int sbp2_probe(struct device *dev) if (ud->flags & UNIT_DIRECTORY_HAS_LUN_DIRECTORY) return -ENODEV; - scsi_id = sbp2_alloc_device(ud); + scsi_id = sbp2_alloc_device(ud); - if (!scsi_id) - return -ENOMEM; + if (!scsi_id) + return -ENOMEM; - sbp2_parse_unit_directory(scsi_id, ud); + sbp2_parse_unit_directory(scsi_id, ud); - return sbp2_start_device(scsi_id); + return sbp2_start_device(scsi_id); } static int sbp2_remove(struct device *dev) @@ -769,7 +767,7 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud /* Register our host with the SCSI stack. */ scsi_host = scsi_host_alloc(&scsi_driver_template, - sizeof (unsigned long)); + sizeof(unsigned long)); if (!scsi_host) { SBP2_ERR("failed to register scsi host"); goto failed_alloc; @@ -790,7 +788,6 @@ failed_alloc: return NULL; } - static void sbp2_host_reset(struct hpsb_host *host) { struct sbp2scsi_host_info *hi; @@ -804,7 +801,6 @@ static void sbp2_host_reset(struct hpsb_host *host) } } - /* * This function is where we first pull the node unique ids, and then * allocate memory and register a SBP-2 device. @@ -818,7 +814,8 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id) /* Login FIFO DMA */ scsi_id->login_response = - pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_login_response), + pci_alloc_consistent(hi->host->pdev, + sizeof(struct sbp2_login_response), &scsi_id->login_response_dma); if (!scsi_id->login_response) goto alloc_fail; @@ -826,7 +823,8 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id) /* Query logins ORB DMA */ scsi_id->query_logins_orb = - pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_query_logins_orb), + pci_alloc_consistent(hi->host->pdev, + sizeof(struct sbp2_query_logins_orb), &scsi_id->query_logins_orb_dma); if (!scsi_id->query_logins_orb) goto alloc_fail; @@ -834,7 +832,8 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id) /* Query logins response DMA */ scsi_id->query_logins_response = - pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_query_logins_response), + pci_alloc_consistent(hi->host->pdev, + sizeof(struct sbp2_query_logins_response), &scsi_id->query_logins_response_dma); if (!scsi_id->query_logins_response) goto alloc_fail; @@ -842,7 +841,8 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id) /* Reconnect ORB DMA */ scsi_id->reconnect_orb = - pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_reconnect_orb), + pci_alloc_consistent(hi->host->pdev, + sizeof(struct sbp2_reconnect_orb), &scsi_id->reconnect_orb_dma); if (!scsi_id->reconnect_orb) goto alloc_fail; @@ -850,7 +850,8 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id) /* Logout ORB DMA */ scsi_id->logout_orb = - pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_logout_orb), + pci_alloc_consistent(hi->host->pdev, + sizeof(struct sbp2_logout_orb), &scsi_id->logout_orb_dma); if (!scsi_id->logout_orb) goto alloc_fail; @@ -858,7 +859,8 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id) /* Login ORB DMA */ scsi_id->login_orb = - pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_login_orb), + pci_alloc_consistent(hi->host->pdev, + sizeof(struct sbp2_login_orb), &scsi_id->login_orb_dma); if (!scsi_id->login_orb) { alloc_fail: @@ -880,25 +882,25 @@ alloc_fail: if (scsi_id->logout_orb) { pci_free_consistent(hi->host->pdev, - sizeof(struct sbp2_logout_orb), - scsi_id->logout_orb, - scsi_id->logout_orb_dma); + sizeof(struct sbp2_logout_orb), + scsi_id->logout_orb, + scsi_id->logout_orb_dma); SBP2_DMA_FREE("logout ORB DMA"); } if (scsi_id->reconnect_orb) { pci_free_consistent(hi->host->pdev, - sizeof(struct sbp2_reconnect_orb), - scsi_id->reconnect_orb, - scsi_id->reconnect_orb_dma); + sizeof(struct sbp2_reconnect_orb), + scsi_id->reconnect_orb, + scsi_id->reconnect_orb_dma); SBP2_DMA_FREE("reconnect ORB DMA"); } if (scsi_id->login_response) { pci_free_consistent(hi->host->pdev, - sizeof(struct sbp2_login_response), - scsi_id->login_response, - scsi_id->login_response_dma); + sizeof(struct sbp2_login_response), + scsi_id->login_response, + scsi_id->login_response_dma); SBP2_DMA_FREE("login FIFO DMA"); } @@ -906,7 +908,7 @@ alloc_fail: kfree(scsi_id); - SBP2_ERR ("Could not allocate memory for scsi_id"); + SBP2_ERR("Could not allocate memory for scsi_id"); return -ENOMEM; } @@ -935,7 +937,7 @@ alloc_fail: sbp2_remove_device(scsi_id); return -EINTR; } - + /* * Login to the sbp-2 device */ @@ -1054,36 +1056,39 @@ static void sbp2_remove_device(struct scsi_id_instance_data *scsi_id) * This function deals with physical dma write requests (for adapters that do not support * physical dma in hardware). Mostly just here for debugging... */ -static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid, int destid, quadlet_t *data, - u64 addr, size_t length, u16 flags) +static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid, + int destid, quadlet_t *data, u64 addr, + size_t length, u16 flags) { - /* - * Manually put the data in the right place. - */ - memcpy(bus_to_virt((u32)addr), data, length); - sbp2util_packet_dump(data, length, "sbp2 phys dma write by device", (u32)addr); - return(RCODE_COMPLETE); + /* + * Manually put the data in the right place. + */ + memcpy(bus_to_virt((u32) addr), data, length); + sbp2util_packet_dump(data, length, "sbp2 phys dma write by device", + (u32) addr); + return RCODE_COMPLETE; } /* * This function deals with physical dma read requests (for adapters that do not support * physical dma in hardware). Mostly just here for debugging... */ -static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid, quadlet_t *data, - u64 addr, size_t length, u16 flags) +static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid, + quadlet_t *data, u64 addr, size_t length, + u16 flags) { - /* - * Grab data from memory and send a read response. - */ - memcpy(data, bus_to_virt((u32)addr), length); - sbp2util_packet_dump(data, length, "sbp2 phys dma read by device", (u32)addr); - return(RCODE_COMPLETE); + /* + * Grab data from memory and send a read response. + */ + memcpy(data, bus_to_virt((u32) addr), length); + sbp2util_packet_dump(data, length, "sbp2 phys dma read by device", + (u32) addr); + return RCODE_COMPLETE; } #endif - /************************************** * SBP-2 protocol related section **************************************/ @@ -1147,12 +1152,12 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id) if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 2*HZ)) { SBP2_INFO("Error querying logins to SBP-2 device - timed out"); - return(-EIO); + return -EIO; } if (scsi_id->status_block.ORB_offset_lo != scsi_id->query_logins_orb_dma) { SBP2_INFO("Error querying logins to SBP-2 device - timed out"); - return(-EIO); + return -EIO; } if (STATUS_GET_RESP(scsi_id->status_block.ORB_offset_hi_misc) || @@ -1160,7 +1165,7 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id) STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) { SBP2_INFO("Error querying logins to SBP-2 device - timed out"); - return(-EIO); + return -EIO; } sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_response, sizeof(struct sbp2_query_logins_response)); @@ -1177,7 +1182,7 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id) SBP2_DEBUG("Number of active logins: %d", active_logins); if (active_logins >= max_logins) { - return(-EIO); + return -EIO; } return 0; @@ -1196,13 +1201,13 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id) if (!scsi_id->login_orb) { SBP2_DEBUG("sbp2_login_device: login_orb not alloc'd!"); - return(-EIO); + return -EIO; } if (!exclusive_login) { if (sbp2_query_logins(scsi_id)) { SBP2_INFO("Device does not support any more concurrent logins"); - return(-EIO); + return -EIO; } } @@ -1269,7 +1274,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id) */ if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 20*HZ)) { SBP2_ERR("Error logging into SBP-2 device - login timed-out"); - return(-EIO); + return -EIO; } /* @@ -1277,7 +1282,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id) */ if (scsi_id->status_block.ORB_offset_lo != scsi_id->login_orb_dma) { SBP2_ERR("Error logging into SBP-2 device - login timed-out"); - return(-EIO); + return -EIO; } /* @@ -1288,7 +1293,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id) STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) { SBP2_ERR("Error logging into SBP-2 device - login failed"); - return(-EIO); + return -EIO; } /* @@ -1312,7 +1317,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id) SBP2_INFO("Logged into SBP-2 device"); - return(0); + return 0; } @@ -1366,8 +1371,7 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id) atomic_set(&scsi_id->sbp2_login_complete, 0); error = hpsb_node_write(scsi_id->ne, - scsi_id->sbp2_management_agent_addr, - data, 8); + scsi_id->sbp2_management_agent_addr, data, 8); if (error) return error; @@ -1377,7 +1381,7 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id) SBP2_INFO("Logged out of SBP-2 device"); - return(0); + return 0; } @@ -1437,8 +1441,7 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id) atomic_set(&scsi_id->sbp2_login_complete, 0); error = hpsb_node_write(scsi_id->ne, - scsi_id->sbp2_management_agent_addr, - data, 8); + scsi_id->sbp2_management_agent_addr, data, 8); if (error) return error; @@ -1447,7 +1450,7 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id) */ if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, HZ)) { SBP2_ERR("Error reconnecting to SBP-2 device - reconnect timed-out"); - return(-EIO); + return -EIO; } /* @@ -1455,7 +1458,7 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id) */ if (scsi_id->status_block.ORB_offset_lo != scsi_id->reconnect_orb_dma) { SBP2_ERR("Error reconnecting to SBP-2 device - reconnect timed-out"); - return(-EIO); + return -EIO; } /* @@ -1466,12 +1469,12 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id) STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) { SBP2_ERR("Error reconnecting to SBP-2 device - reconnect failed"); - return(-EIO); + return -EIO; } HPSB_DEBUG("Reconnected to SBP-2 device"); - return(0); + return 0; } @@ -1494,10 +1497,9 @@ static int sbp2_set_busy_timeout(struct scsi_id_instance_data *scsi_id) SBP2_ERR("sbp2_set_busy_timeout error"); } - return(0); + return 0; } - /* * This function is called to parse sbp2 device's config rom unit * directory. Used to determine things like sbp2 management agent offset, @@ -1510,7 +1512,7 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, struct csr1212_dentry *dentry; u64 management_agent_addr; u32 command_set_spec_id, command_set, unit_characteristics, - firmware_revision, workarounds; + firmware_revision, workarounds; int i; SBP2_DEBUG("sbp2_parse_unit_directory"); @@ -1528,13 +1530,14 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, if (kv->key.type == CSR1212_KV_TYPE_CSR_OFFSET) { /* Save off the management agent address */ management_agent_addr = - CSR1212_REGISTER_SPACE_BASE + - (kv->value.csr_offset << 2); + CSR1212_REGISTER_SPACE_BASE + + (kv->value.csr_offset << 2); SBP2_DEBUG("sbp2_management_agent_addr = %x", - (unsigned int) management_agent_addr); + (unsigned int)management_agent_addr); } else if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) { - scsi_id->sbp2_lun = ORB_SET_LUN(kv->value.immediate); + scsi_id->sbp2_lun = + ORB_SET_LUN(kv->value.immediate); } break; @@ -1542,14 +1545,14 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, /* Command spec organization */ command_set_spec_id = kv->value.immediate; SBP2_DEBUG("sbp2_command_set_spec_id = %x", - (unsigned int) command_set_spec_id); + (unsigned int)command_set_spec_id); break; case SBP2_COMMAND_SET_KEY: /* Command set used by sbp2 device */ command_set = kv->value.immediate; SBP2_DEBUG("sbp2_command_set = %x", - (unsigned int) command_set); + (unsigned int)command_set); break; case SBP2_UNIT_CHARACTERISTICS_KEY: @@ -1559,7 +1562,7 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, */ unit_characteristics = kv->value.immediate; SBP2_DEBUG("sbp2_unit_characteristics = %x", - (unsigned int) unit_characteristics); + (unsigned int)unit_characteristics); break; case SBP2_FIRMWARE_REVISION_KEY: @@ -1567,9 +1570,10 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, firmware_revision = kv->value.immediate; if (force_inquiry_hack) SBP2_INFO("sbp2_firmware_revision = %x", - (unsigned int) firmware_revision); - else SBP2_DEBUG("sbp2_firmware_revision = %x", - (unsigned int) firmware_revision); + (unsigned int)firmware_revision); + else + SBP2_DEBUG("sbp2_firmware_revision = %x", + (unsigned int)firmware_revision); break; default: @@ -1647,8 +1651,9 @@ static int sbp2_max_speed_and_size(struct scsi_id_instance_data *scsi_id) SBP2_DEBUG("sbp2_max_speed_and_size"); /* Initial setting comes from the hosts speed map */ - scsi_id->speed_code = hi->host->speed_map[NODEID_TO_NODE(hi->host->node_id) * 64 - + NODEID_TO_NODE(scsi_id->ne->nodeid)]; + scsi_id->speed_code = + hi->host->speed_map[NODEID_TO_NODE(hi->host->node_id) * 64 + + NODEID_TO_NODE(scsi_id->ne->nodeid)]; /* Bump down our speed if the user requested it */ if (scsi_id->speed_code > max_speed) { @@ -1659,15 +1664,16 @@ static int sbp2_max_speed_and_size(struct scsi_id_instance_data *scsi_id) /* Payload size is the lesser of what our speed supports and what * our host supports. */ - scsi_id->max_payload_size = min(sbp2_speedto_max_payload[scsi_id->speed_code], - (u8)(hi->host->csr.max_rec - 1)); + scsi_id->max_payload_size = + min(sbp2_speedto_max_payload[scsi_id->speed_code], + (u8) (hi->host->csr.max_rec - 1)); HPSB_DEBUG("Node " NODE_BUS_FMT ": Max speed [%s] - Max payload [%u]", NODE_BUS_ARGS(hi->host, scsi_id->ne->nodeid), hpsb_speedto_str[scsi_id->speed_code], - 1 << ((u32)scsi_id->max_payload_size + 2)); + 1 << ((u32) scsi_id->max_payload_size + 2)); - return(0); + return 0; } /* @@ -1702,7 +1708,7 @@ static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait) */ scsi_id->last_orb = NULL; - return(0); + return 0; } /* @@ -1716,10 +1722,9 @@ static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id, unsigned int scsi_request_bufflen, void *scsi_request_buffer, enum dma_data_direction dma_dir) - { struct sbp2scsi_host_info *hi = scsi_id->hi; - struct scatterlist *sgpnt = (struct scatterlist *) scsi_request_buffer; + struct scatterlist *sgpnt = (struct scatterlist *)scsi_request_buffer; struct sbp2_command_orb *command_orb = &command->command_orb; struct sbp2_unrestricted_page_table *scatter_gather_element = &command->scatter_gather_element[0]; @@ -1739,30 +1744,30 @@ static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id, command_orb->next_ORB_lo = 0x0; command_orb->misc = ORB_SET_MAX_PAYLOAD(scsi_id->max_payload_size); command_orb->misc |= ORB_SET_SPEED(scsi_id->speed_code); - command_orb->misc |= ORB_SET_NOTIFY(1); /* Notify us when complete */ + command_orb->misc |= ORB_SET_NOTIFY(1); /* Notify us when complete */ /* * Get the direction of the transfer. If the direction is unknown, then use our * goofy table as a back-up. */ switch (dma_dir) { - case DMA_NONE: - orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER; - break; - case DMA_TO_DEVICE: - orb_direction = ORB_DIRECTION_WRITE_TO_MEDIA; - break; - case DMA_FROM_DEVICE: - orb_direction = ORB_DIRECTION_READ_FROM_MEDIA; - break; - case DMA_BIDIRECTIONAL: - default: - SBP2_ERR("SCSI data transfer direction not specified. " - "Update the SBP2 direction table in sbp2.h if " - "necessary for your application"); - __scsi_print_command(scsi_cmd); - orb_direction = sbp2scsi_direction_table[*scsi_cmd]; - break; + case DMA_NONE: + orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER; + break; + case DMA_TO_DEVICE: + orb_direction = ORB_DIRECTION_WRITE_TO_MEDIA; + break; + case DMA_FROM_DEVICE: + orb_direction = ORB_DIRECTION_READ_FROM_MEDIA; + break; + case DMA_BIDIRECTIONAL: + default: + SBP2_ERR("SCSI data transfer direction not specified. " + "Update the SBP2 direction table in sbp2.h if " + "necessary for your application"); + __scsi_print_command(scsi_cmd); + orb_direction = sbp2scsi_direction_table[*scsi_cmd]; + break; } /* @@ -1865,9 +1870,9 @@ static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id, command->dma_dir = dma_dir; command->dma_size = scsi_request_bufflen; command->dma_type = CMD_DMA_SINGLE; - command->cmd_dma = pci_map_single (hi->host->pdev, scsi_request_buffer, - command->dma_size, - command->dma_dir); + command->cmd_dma = + pci_map_single(hi->host->pdev, scsi_request_buffer, + command->dma_size, command->dma_dir); SBP2_DMA_ALLOC("single bulk"); /* @@ -1954,7 +1959,7 @@ static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id, memset(command_orb->cdb, 0, 12); memcpy(command_orb->cdb, scsi_cmd, COMMAND_SIZE(*scsi_cmd)); - return(0); + return 0; } /* @@ -1970,7 +1975,7 @@ static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id, outstanding_orb_incr; SBP2_ORB_DEBUG("sending command orb %p, total orbs = %x", - command_orb, global_outstanding_command_orbs); + command_orb, global_outstanding_command_orbs); pci_dma_sync_single_for_device(hi->host->pdev, command->command_orb_dma, sizeof(struct sbp2_command_orb), @@ -2015,10 +2020,11 @@ static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id, * both by the sbp2 device and us. */ scsi_id->last_orb->next_ORB_lo = - cpu_to_be32(command->command_orb_dma); + cpu_to_be32(command->command_orb_dma); /* Tells hardware that this pointer is valid */ scsi_id->last_orb->next_ORB_hi = 0x0; - pci_dma_sync_single_for_device(hi->host->pdev, scsi_id->last_orb_dma, + pci_dma_sync_single_for_device(hi->host->pdev, + scsi_id->last_orb_dma, sizeof(struct sbp2_command_orb), PCI_DMA_BIDIRECTIONAL); @@ -2032,14 +2038,14 @@ static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id, if (sbp2util_node_write_no_wait(ne, addr, &data, 4) < 0) { SBP2_ERR("sbp2util_node_write_no_wait failed"); - return(-EIO); + return -EIO; } scsi_id->last_orb = command_orb; scsi_id->last_orb_dma = command->command_orb_dma; } - return(0); + return 0; } /* @@ -2066,7 +2072,7 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id, */ command = sbp2util_allocate_command_orb(scsi_id, SCpnt, done); if (!command) { - return(-EIO); + return -EIO; } /* @@ -2101,10 +2107,9 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id, */ sbp2_link_orb_command(scsi_id, command); - return(0); + return 0; } - /* * Translates SBP-2 status into SCSI sense data for check conditions */ @@ -2132,14 +2137,14 @@ static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense sense_data[14] = sbp2_status[20]; sense_data[15] = sbp2_status[21]; - return(sbp2_status[8] & 0x3f); /* return scsi status */ + return sbp2_status[8] & 0x3f; /* return scsi status */ } /* * This function is called after a command is completed, in order to do any necessary SBP-2 * response data translations for the SCSI stack */ -static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id, +static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id, struct scsi_cmnd *SCpnt) { u8 *scsi_buf = SCpnt->request_buffer; @@ -2148,24 +2153,24 @@ static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id, switch (SCpnt->cmnd[0]) { - case INQUIRY: - /* - * Make sure data length is ok. Minimum length is 36 bytes - */ - if (scsi_buf[4] == 0) { - scsi_buf[4] = 36 - 5; - } + case INQUIRY: + /* + * Make sure data length is ok. Minimum length is 36 bytes + */ + if (scsi_buf[4] == 0) { + scsi_buf[4] = 36 - 5; + } - /* - * Fix ansi revision and response data format - */ - scsi_buf[2] |= 2; - scsi_buf[3] = (scsi_buf[3] & 0xf0) | 2; + /* + * Fix ansi revision and response data format + */ + scsi_buf[2] |= 2; + scsi_buf[3] = (scsi_buf[3] & 0xf0) | 2; - break; + break; - default: - break; + default: + break; } return; } @@ -2190,14 +2195,14 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest if (!host) { SBP2_ERR("host is NULL - this is bad!"); - return(RCODE_ADDRESS_ERROR); + return RCODE_ADDRESS_ERROR; } hi = hpsb_get_hostinfo(&sbp2_highlevel, host); if (!hi) { SBP2_ERR("host info is NULL - this is bad!"); - return(RCODE_ADDRESS_ERROR); + return RCODE_ADDRESS_ERROR; } /* @@ -2214,7 +2219,7 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest if (!scsi_id) { SBP2_ERR("scsi_id is NULL - device is gone?"); - return(RCODE_ADDRESS_ERROR); + return RCODE_ADDRESS_ERROR; } /* @@ -2312,10 +2317,9 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest SBP2_ORB_DEBUG("command orb completed"); } - return(RCODE_COMPLETE); + return RCODE_COMPLETE; } - /************************************** * SCSI interface related section **************************************/ @@ -2448,55 +2452,56 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id, * complete the command, just let it get retried at the end of the * bus reset. */ - if (!hpsb_node_entry_valid(scsi_id->ne) && (scsi_status != SBP2_SCSI_STATUS_GOOD)) { + if (!hpsb_node_entry_valid(scsi_id->ne) + && (scsi_status != SBP2_SCSI_STATUS_GOOD)) { SBP2_ERR("Bus reset in progress - retry command later"); return; } - + /* * Switch on scsi status */ switch (scsi_status) { - case SBP2_SCSI_STATUS_GOOD: - SCpnt->result = DID_OK; - break; + case SBP2_SCSI_STATUS_GOOD: + SCpnt->result = DID_OK; + break; - case SBP2_SCSI_STATUS_BUSY: - SBP2_ERR("SBP2_SCSI_STATUS_BUSY"); - SCpnt->result = DID_BUS_BUSY << 16; - break; + case SBP2_SCSI_STATUS_BUSY: + SBP2_ERR("SBP2_SCSI_STATUS_BUSY"); + SCpnt->result = DID_BUS_BUSY << 16; + break; - case SBP2_SCSI_STATUS_CHECK_CONDITION: - SBP2_DEBUG("SBP2_SCSI_STATUS_CHECK_CONDITION"); - SCpnt->result = CHECK_CONDITION << 1; + case SBP2_SCSI_STATUS_CHECK_CONDITION: + SBP2_DEBUG("SBP2_SCSI_STATUS_CHECK_CONDITION"); + SCpnt->result = CHECK_CONDITION << 1; - /* - * Debug stuff - */ + /* + * Debug stuff + */ #if CONFIG_IEEE1394_SBP2_DEBUG >= 1 - scsi_print_command(SCpnt); - scsi_print_sense("bh", SCpnt); + scsi_print_command(SCpnt); + scsi_print_sense("bh", SCpnt); #endif - break; + break; - case SBP2_SCSI_STATUS_SELECTION_TIMEOUT: - SBP2_ERR("SBP2_SCSI_STATUS_SELECTION_TIMEOUT"); - SCpnt->result = DID_NO_CONNECT << 16; - scsi_print_command(SCpnt); - break; + case SBP2_SCSI_STATUS_SELECTION_TIMEOUT: + SBP2_ERR("SBP2_SCSI_STATUS_SELECTION_TIMEOUT"); + SCpnt->result = DID_NO_CONNECT << 16; + scsi_print_command(SCpnt); + break; - case SBP2_SCSI_STATUS_CONDITION_MET: - case SBP2_SCSI_STATUS_RESERVATION_CONFLICT: - case SBP2_SCSI_STATUS_COMMAND_TERMINATED: - SBP2_ERR("Bad SCSI status = %x", scsi_status); - SCpnt->result = DID_ERROR << 16; - scsi_print_command(SCpnt); - break; + case SBP2_SCSI_STATUS_CONDITION_MET: + case SBP2_SCSI_STATUS_RESERVATION_CONFLICT: + case SBP2_SCSI_STATUS_COMMAND_TERMINATED: + SBP2_ERR("Bad SCSI status = %x", scsi_status); + SCpnt->result = DID_ERROR << 16; + scsi_print_command(SCpnt); + break; - default: - SBP2_ERR("Unsupported SCSI status = %x", scsi_status); - SCpnt->result = DID_ERROR << 16; + default: + SBP2_ERR("Unsupported SCSI status = %x", scsi_status); + SCpnt->result = DID_ERROR << 16; } /* @@ -2510,7 +2515,8 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id, * If a bus reset is in progress and there was an error, complete * the command as busy so that it will get retried. */ - if (!hpsb_node_entry_valid(scsi_id->ne) && (scsi_status != SBP2_SCSI_STATUS_GOOD)) { + if (!hpsb_node_entry_valid(scsi_id->ne) + && (scsi_status != SBP2_SCSI_STATUS_GOOD)) { SBP2_ERR("Completing command with busy (bus reset)"); SCpnt->result = DID_BUS_BUSY << 16; } @@ -2531,17 +2537,15 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id, /* * Tell scsi stack that we're done with this command */ - done (SCpnt); + done(SCpnt); } - static int sbp2scsi_slave_alloc(struct scsi_device *sdev) { ((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = sdev; return 0; } - static int sbp2scsi_slave_configure(struct scsi_device *sdev) { blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); @@ -2550,14 +2554,12 @@ static int sbp2scsi_slave_configure(struct scsi_device *sdev) return 0; } - static void sbp2scsi_slave_destroy(struct scsi_device *sdev) { ((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = NULL; return; } - /* * Called by scsi stack when something has really gone wrong. Usually * called when a command has timed-out for some reason. @@ -2603,7 +2605,7 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt) sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY); } - return(SUCCESS); + return SUCCESS; } /* @@ -2629,12 +2631,14 @@ static int sbp2scsi_reset(struct scsi_cmnd *SCpnt) return SUCCESS; } -static const char *sbp2scsi_info (struct Scsi_Host *host) +static const char *sbp2scsi_info(struct Scsi_Host *host) { - return "SCSI emulation for IEEE-1394 SBP-2 Devices"; + return "SCSI emulation for IEEE-1394 SBP-2 Devices"; } -static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev, + struct device_attribute *attr, + char *buf) { struct scsi_device *sdev; struct scsi_id_instance_data *scsi_id; @@ -2705,7 +2709,6 @@ static int sbp2_module_init(void) /* Set max sectors (module load option). Default is 255 sectors. */ scsi_driver_template.max_sectors = max_sectors; - /* Register our high level driver with 1394 stack */ hpsb_register_highlevel(&sbp2_highlevel); diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h index 890be1365523..abc647bae5b1 100644 --- a/drivers/ieee1394/sbp2.h +++ b/drivers/ieee1394/sbp2.h @@ -119,8 +119,8 @@ struct sbp2_query_logins_response { struct sbp2_reconnect_orb { u32 reserved1; u32 reserved2; - u32 reserved3; - u32 reserved4; + u32 reserved3; + u32 reserved4; u32 login_ID_misc; u32 reserved5; u32 status_FIFO_hi; @@ -130,8 +130,8 @@ struct sbp2_reconnect_orb { struct sbp2_logout_orb { u32 reserved1; u32 reserved2; - u32 reserved3; - u32 reserved4; + u32 reserved3; + u32 reserved4; u32 login_ID_misc; u32 reserved5; u32 status_FIFO_hi; @@ -188,7 +188,7 @@ struct sbp2_unrestricted_page_table { struct sbp2_status_block { u32 ORB_offset_hi_misc; u32 ORB_offset_lo; - u8 command_set_dependent[24]; + u8 command_set_dependent[24]; }; /* @@ -211,7 +211,7 @@ struct sbp2_status_block { * specified for write posting, where the ohci controller will * automatically send an ack_complete when the status is written by the * sbp2 device... saving a split transaction. =) - */ + */ #define SBP2_STATUS_FIFO_ADDRESS 0xfffe00000000ULL #define SBP2_STATUS_FIFO_ADDRESS_HI 0xfffe #define SBP2_STATUS_FIFO_ADDRESS_LO 0x0 @@ -333,10 +333,8 @@ struct sbp2_command_info { #define SBP2_BREAKAGE_128K_MAX_TRANSFER 0x1 #define SBP2_BREAKAGE_INQUIRY_HACK 0x2 - struct sbp2scsi_host_info; - /* * Information needed on a per scsi id basis (one for each sbp2 device) */ @@ -406,7 +404,6 @@ struct scsi_id_instance_data { u32 workarounds; }; - /* Sbp2 host data structure (one per IEEE1394 host) */ struct sbp2scsi_host_info { struct hpsb_host *host; /* IEEE1394 host */ -- cgit v1.2.3 From 7afa1467761f06bd9649efd66a4a6b3ff9f29a1f Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Mon, 7 Nov 2005 06:31:42 -0500 Subject: Remove version strings from eth1394, ohci1394, sbp2. Their version information is not trustworthy. Signed-off-by: Stefan Richter Signed-off-by: Jody McIntyre --- drivers/ieee1394/eth1394.c | 8 -------- drivers/ieee1394/ohci1394.c | 8 -------- drivers/ieee1394/sbp2.c | 5 ----- 3 files changed, 21 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c index c9e92d85c893..6984a921b59b 100644 --- a/drivers/ieee1394/eth1394.c +++ b/drivers/ieee1394/eth1394.c @@ -88,9 +88,6 @@ printk(KERN_ERR "%s:%s[%d]: " fmt "\n", driver_name, __FUNCTION__, __LINE__, ## args) #define TRACE() printk(KERN_ERR "%s:%s[%d] ---- TRACE\n", driver_name, __FUNCTION__, __LINE__) -static char version[] __devinitdata = - "$Rev: 1312 $ Ben Collins "; - struct fragment_info { struct list_head list; int offset; @@ -566,7 +563,6 @@ static void ether1394_add_host (struct hpsb_host *host) struct eth1394_host_info *hi = NULL; struct net_device *dev = NULL; struct eth1394_priv *priv; - static int version_printed = 0; u64 fifo_addr; if (!(host->config_roms & HPSB_CONFIG_ROM_ENTRY_IP1394)) @@ -581,9 +577,6 @@ static void ether1394_add_host (struct hpsb_host *host) if (fifo_addr == ~0ULL) goto out; - if (version_printed++ == 0) - ETH1394_PRINT_G (KERN_INFO, "%s\n", version); - /* We should really have our own alloc_hpsbdev() function in * net_init.c instead of calling the one for ethernet then hijacking * it for ourselves. That way we'd be a real networking device. */ @@ -1768,7 +1761,6 @@ fail: static void ether1394_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy (info->driver, driver_name); - strcpy (info->version, "$Rev: 1312 $"); /* FIXME XXX provide sane businfo */ strcpy (info->bus_info, "ieee1394"); } diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c index dcb5776e5c44..8355068b0696 100644 --- a/drivers/ieee1394/ohci1394.c +++ b/drivers/ieee1394/ohci1394.c @@ -161,9 +161,6 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args) #define PRINT(level, fmt, args...) \ printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args) -static char version[] __devinitdata = - "$Rev: 1313 $ Ben Collins "; - /* Module Parameters */ static int phys_dma = 1; module_param(phys_dma, int, 0644); @@ -3215,15 +3212,10 @@ do { \ static int __devinit ohci1394_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { - static int version_printed = 0; - struct hpsb_host *host; struct ti_ohci *ohci; /* shortcut to currently handled device */ unsigned long ohci_base; - if (version_printed++ == 0) - PRINT_G(KERN_INFO, "%s", version); - if (pci_enable_device(dev)) FAIL(-ENXIO, "Failed to enable OHCI hardware"); pci_set_master(dev); diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index 073ede9d3b48..b871116fa4d1 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c @@ -80,9 +80,6 @@ #include "ieee1394_transactions.h" #include "sbp2.h" -static char version[] __devinitdata = - "$Rev: 1306 $ Ben Collins "; - /* * Module load parameter definitions */ @@ -2696,8 +2693,6 @@ static int sbp2_module_init(void) SBP2_DEBUG("sbp2_module_init"); - printk(KERN_INFO "sbp2: %s\n", version); - /* Module load debug option to force one command at a time (serializing I/O) */ if (serialize_io) { SBP2_INFO("Driver forced to serialize I/O (serialize_io=1)"); -- cgit v1.2.3 From 8551158abc8ef45a7f473a87e69624d05ebfd684 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Mon, 7 Nov 2005 06:31:45 -0500 Subject: kmalloc/kzalloc changes: dv1394, eth1394, ieee1394, ohci1394, pcilynx, raw1394, sbp2c, video1394: - use kzalloc - provide safer size arguments to kmalloc and kzalloc - omit some casts Signed-off-by: Stefan Richter Signed-off-by: Jody McIntyre --- drivers/ieee1394/csr1212.c | 11 +++--- drivers/ieee1394/csr1212.h | 2 +- drivers/ieee1394/dv1394.c | 4 +-- drivers/ieee1394/eth1394.c | 12 +++---- drivers/ieee1394/highlevel.c | 18 ++++------ drivers/ieee1394/hosts.c | 6 ++-- drivers/ieee1394/nodemgr.c | 27 +++++++-------- drivers/ieee1394/ohci1394.c | 20 ++++------- drivers/ieee1394/pcilynx.c | 2 +- drivers/ieee1394/raw1394.c | 38 +++++++++------------ drivers/ieee1394/sbp2.c | 7 ++-- drivers/ieee1394/video1394.c | 81 ++++++++++++++------------------------------ 12 files changed, 85 insertions(+), 143 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/csr1212.c b/drivers/ieee1394/csr1212.c index 61ddd5d37eff..c0f8ed6fca8a 100644 --- a/drivers/ieee1394/csr1212.c +++ b/drivers/ieee1394/csr1212.c @@ -1261,7 +1261,7 @@ static int csr1212_parse_bus_info_block(struct csr1212_csr *csr) return CSR1212_EINVAL; #endif - cr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region)); + cr = CSR1212_MALLOC(sizeof(*cr)); if (!cr) return CSR1212_ENOMEM; @@ -1393,8 +1393,7 @@ int csr1212_parse_keyval(struct csr1212_keyval *kv, case CSR1212_KV_TYPE_LEAF: if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) { kv->value.leaf.data = CSR1212_MALLOC(quads_to_bytes(kvi_len)); - if (!kv->value.leaf.data) - { + if (!kv->value.leaf.data) { ret = CSR1212_ENOMEM; goto fail; } @@ -1462,7 +1461,7 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv) cache->next = NULL; csr->cache_tail = cache; cache->filled_head = - CSR1212_MALLOC(sizeof(struct csr1212_cache_region)); + CSR1212_MALLOC(sizeof(*cache->filled_head)); if (!cache->filled_head) { return CSR1212_ENOMEM; } @@ -1484,7 +1483,7 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv) /* Now seach read portions of the cache to see if it is there. */ for (cr = cache->filled_head; cr; cr = cr->next) { if (cache_index < cr->offset_start) { - newcr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region)); + newcr = CSR1212_MALLOC(sizeof(*newcr)); if (!newcr) return CSR1212_ENOMEM; @@ -1508,7 +1507,7 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv) if (!cr) { cr = cache->filled_tail; - newcr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region)); + newcr = CSR1212_MALLOC(sizeof(*newcr)); if (!newcr) return CSR1212_ENOMEM; diff --git a/drivers/ieee1394/csr1212.h b/drivers/ieee1394/csr1212.h index 28c5f4b726e2..cecd5871f2de 100644 --- a/drivers/ieee1394/csr1212.h +++ b/drivers/ieee1394/csr1212.h @@ -646,7 +646,7 @@ static inline struct csr1212_csr_rom_cache *csr1212_rom_cache_malloc(u_int32_t o { struct csr1212_csr_rom_cache *cache; - cache = CSR1212_MALLOC(sizeof(struct csr1212_csr_rom_cache) + size); + cache = CSR1212_MALLOC(sizeof(*cache) + size); if (!cache) return NULL; diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c index cbbbe14b8849..d204ec772c0a 100644 --- a/drivers/ieee1394/dv1394.c +++ b/drivers/ieee1394/dv1394.c @@ -2218,14 +2218,12 @@ static int dv1394_init(struct ti_ohci *ohci, enum pal_or_ntsc format, enum modes unsigned long flags; int i; - video = kmalloc(sizeof(struct video_card), GFP_KERNEL); + video = kzalloc(sizeof(*video), GFP_KERNEL); if (!video) { printk(KERN_ERR "dv1394: cannot allocate video_card\n"); goto err; } - memset(video, 0, sizeof(struct video_card)); - video->ohci = ohci; /* lower 2 bits of id indicate which of four "plugs" per host */ diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c index 6984a921b59b..30fa0d43a43a 100644 --- a/drivers/ieee1394/eth1394.c +++ b/drivers/ieee1394/eth1394.c @@ -352,12 +352,12 @@ static int eth1394_probe(struct device *dev) if (!hi) return -ENOENT; - new_node = kmalloc(sizeof(struct eth1394_node_ref), + new_node = kmalloc(sizeof(*new_node), in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); if (!new_node) return -ENOMEM; - node_info = kmalloc(sizeof(struct eth1394_node_info), + node_info = kmalloc(sizeof(*node_info), in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); if (!node_info) { kfree(new_node); @@ -433,12 +433,12 @@ static int eth1394_update(struct unit_directory *ud) node = eth1394_find_node(&priv->ip_node_list, ud); if (!node) { - node = kmalloc(sizeof(struct eth1394_node_ref), + node = kmalloc(sizeof(*node), in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); if (!node) return -ENOMEM; - node_info = kmalloc(sizeof(struct eth1394_node_info), + node_info = kmalloc(sizeof(*node_info), in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); if (!node_info) { kfree(node); @@ -1014,7 +1014,7 @@ static inline int new_fragment(struct list_head *frag_info, int offset, int len) } } - new = kmalloc(sizeof(struct fragment_info), GFP_ATOMIC); + new = kmalloc(sizeof(*new), GFP_ATOMIC); if (!new) return -ENOMEM; @@ -1033,7 +1033,7 @@ static inline int new_partial_datagram(struct net_device *dev, { struct partial_datagram *new; - new = kmalloc(sizeof(struct partial_datagram), GFP_ATOMIC); + new = kmalloc(sizeof(*new), GFP_ATOMIC); if (!new) return -ENOMEM; diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c index 997e1bf6297f..734b121a0554 100644 --- a/drivers/ieee1394/highlevel.c +++ b/drivers/ieee1394/highlevel.c @@ -101,12 +101,10 @@ void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host, return NULL; } - hi = kmalloc(sizeof(*hi) + data_size, GFP_ATOMIC); + hi = kzalloc(sizeof(*hi) + data_size, GFP_ATOMIC); if (!hi) return NULL; - memset(hi, 0, sizeof(*hi) + data_size); - if (data_size) { data = hi->data = hi + 1; hi->size = data_size; @@ -326,11 +324,9 @@ u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl, return retval; } - as = (struct hpsb_address_serve *) - kmalloc(sizeof(struct hpsb_address_serve), GFP_KERNEL); - if (as == NULL) { + as = kmalloc(sizeof(*as), GFP_KERNEL); + if (!as) return retval; - } INIT_LIST_HEAD(&as->host_list); INIT_LIST_HEAD(&as->hl_list); @@ -383,11 +379,9 @@ int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host, return 0; } - as = (struct hpsb_address_serve *) - kmalloc(sizeof(struct hpsb_address_serve), GFP_ATOMIC); - if (as == NULL) { - return 0; - } + as = kmalloc(sizeof(*as), GFP_ATOMIC); + if (!as) + return 0; INIT_LIST_HEAD(&as->host_list); INIT_LIST_HEAD(&as->hl_list); diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c index aeeaeb670d03..d245abe4033c 100644 --- a/drivers/ieee1394/hosts.c +++ b/drivers/ieee1394/hosts.c @@ -114,9 +114,9 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra, int i; int hostnum = 0; - h = kmalloc(sizeof(struct hpsb_host) + extra, SLAB_KERNEL); - if (!h) return NULL; - memset(h, 0, sizeof(struct hpsb_host) + extra); + h = kzalloc(sizeof(*h) + extra, SLAB_KERNEL); + if (!h) + return NULL; h->csr.rom = csr1212_create_csr(&csr_bus_ops, CSR_BUS_INFO_SIZE, h); if (!h->csr.rom) { diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c index 7fff5a1d2ea4..3f0917bbd7f5 100644 --- a/drivers/ieee1394/nodemgr.c +++ b/drivers/ieee1394/nodemgr.c @@ -743,21 +743,20 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr unsigned int generation) { struct hpsb_host *host = hi->host; - struct node_entry *ne; - - ne = kmalloc(sizeof(struct node_entry), GFP_KERNEL); - if (!ne) return NULL; + struct node_entry *ne; - memset(ne, 0, sizeof(struct node_entry)); + ne = kzalloc(sizeof(*ne), GFP_KERNEL); + if (!ne) + return NULL; ne->tpool = &host->tpool[nodeid & NODE_MASK]; - ne->host = host; - ne->nodeid = nodeid; + ne->host = host; + ne->nodeid = nodeid; ne->generation = generation; ne->needs_probe = 1; - ne->guid = guid; + ne->guid = guid; ne->guid_vendor_id = (guid >> 40) & 0xffffff; ne->guid_vendor_oui = nodemgr_find_oui_name(ne->guid_vendor_id); ne->csr = csr; @@ -787,7 +786,7 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr (host->node_id == nodeid) ? "Host" : "Node", NODE_BUS_ARGS(host, nodeid), (unsigned long long)guid); - return ne; + return ne; } @@ -872,12 +871,10 @@ static struct unit_directory *nodemgr_process_unit_directory struct csr1212_keyval *kv; u8 last_key_id = 0; - ud = kmalloc(sizeof(struct unit_directory), GFP_KERNEL); + ud = kzalloc(sizeof(*ud), GFP_KERNEL); if (!ud) goto unit_directory_error; - memset (ud, 0, sizeof(struct unit_directory)); - ud->ne = ne; ud->ignore_driver = ignore_drivers; ud->address = ud_kv->offset + CSR1212_CONFIG_ROM_SPACE_BASE; @@ -937,10 +934,10 @@ static struct unit_directory *nodemgr_process_unit_directory /* Logical Unit Number */ if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) { if (ud->flags & UNIT_DIRECTORY_HAS_LUN) { - ud_child = kmalloc(sizeof(struct unit_directory), GFP_KERNEL); + ud_child = kmalloc(sizeof(*ud_child), GFP_KERNEL); if (!ud_child) goto unit_directory_error; - memcpy(ud_child, ud, sizeof(struct unit_directory)); + memcpy(ud_child, ud, sizeof(*ud_child)); nodemgr_register_device(ne, ud_child, &ne->device); ud_child = NULL; @@ -1200,7 +1197,7 @@ static void nodemgr_node_scan_one(struct host_info *hi, struct csr1212_csr *csr; struct nodemgr_csr_info *ci; - ci = kmalloc(sizeof(struct nodemgr_csr_info), GFP_KERNEL); + ci = kmalloc(sizeof(*ci), GFP_KERNEL); if (!ci) return; diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c index 8355068b0696..97b6f48033c4 100644 --- a/drivers/ieee1394/ohci1394.c +++ b/drivers/ieee1394/ohci1394.c @@ -2957,28 +2957,23 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d, d->ctrlClear = 0; d->cmdPtr = 0; - d->buf_cpu = kmalloc(d->num_desc * sizeof(quadlet_t*), GFP_ATOMIC); - d->buf_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_ATOMIC); + d->buf_cpu = kzalloc(d->num_desc * sizeof(*d->buf_cpu), GFP_ATOMIC); + d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC); if (d->buf_cpu == NULL || d->buf_bus == NULL) { PRINT(KERN_ERR, "Failed to allocate dma buffer"); free_dma_rcv_ctx(d); return -ENOMEM; } - memset(d->buf_cpu, 0, d->num_desc * sizeof(quadlet_t*)); - memset(d->buf_bus, 0, d->num_desc * sizeof(dma_addr_t)); - d->prg_cpu = kmalloc(d->num_desc * sizeof(struct dma_cmd*), - GFP_ATOMIC); - d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_ATOMIC); + d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_ATOMIC); + d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC); if (d->prg_cpu == NULL || d->prg_bus == NULL) { PRINT(KERN_ERR, "Failed to allocate dma prg"); free_dma_rcv_ctx(d); return -ENOMEM; } - memset(d->prg_cpu, 0, d->num_desc * sizeof(struct dma_cmd*)); - memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t)); d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC); @@ -3090,17 +3085,14 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d, d->ctrlClear = 0; d->cmdPtr = 0; - d->prg_cpu = kmalloc(d->num_desc * sizeof(struct at_dma_prg*), - GFP_KERNEL); - d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL); + d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_KERNEL); + d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL); if (d->prg_cpu == NULL || d->prg_bus == NULL) { PRINT(KERN_ERR, "Failed to allocate at dma prg"); free_dma_trm_ctx(d); return -ENOMEM; } - memset(d->prg_cpu, 0, d->num_desc * sizeof(struct at_dma_prg*)); - memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t)); len = sprintf(pool_name, "ohci1394_trm_prg"); sprintf(pool_name+len, "%d", num_allocs); diff --git a/drivers/ieee1394/pcilynx.c b/drivers/ieee1394/pcilynx.c index 6b1ab875333b..e2edc41e1b6f 100644 --- a/drivers/ieee1394/pcilynx.c +++ b/drivers/ieee1394/pcilynx.c @@ -1435,7 +1435,7 @@ static int __devinit add_card(struct pci_dev *dev, struct i2c_algo_bit_data i2c_adapter_data; error = -ENOMEM; - i2c_ad = kmalloc(sizeof(struct i2c_adapter), SLAB_KERNEL); + i2c_ad = kmalloc(sizeof(*i2c_ad), SLAB_KERNEL); if (!i2c_ad) FAIL("failed to allocate I2C adapter memory"); memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter)); diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c index 24411e666b21..0278dc5d5ef9 100644 --- a/drivers/ieee1394/raw1394.c +++ b/drivers/ieee1394/raw1394.c @@ -102,12 +102,9 @@ static struct pending_request *__alloc_pending_request(gfp_t flags) { struct pending_request *req; - req = (struct pending_request *)kmalloc(sizeof(struct pending_request), - flags); - if (req != NULL) { - memset(req, 0, sizeof(struct pending_request)); + req = kzalloc(sizeof(*req), flags); + if (req) INIT_LIST_HEAD(&req->list); - } return req; } @@ -192,9 +189,9 @@ static void add_host(struct hpsb_host *host) struct host_info *hi; unsigned long flags; - hi = (struct host_info *)kmalloc(sizeof(struct host_info), GFP_KERNEL); + hi = kmalloc(sizeof(*hi), GFP_KERNEL); - if (hi != NULL) { + if (hi) { INIT_LIST_HEAD(&hi->list); hi->host = host; INIT_LIST_HEAD(&hi->file_info_list); @@ -315,8 +312,8 @@ static void iso_receive(struct hpsb_host *host, int channel, quadlet_t * data, break; if (!ibs) { - ibs = kmalloc(sizeof(struct iso_block_store) - + length, SLAB_ATOMIC); + ibs = kmalloc(sizeof(*ibs) + length, + SLAB_ATOMIC); if (!ibs) { kfree(req); break; @@ -376,8 +373,8 @@ static void fcp_request(struct hpsb_host *host, int nodeid, int direction, break; if (!ibs) { - ibs = kmalloc(sizeof(struct iso_block_store) - + length, SLAB_ATOMIC); + ibs = kmalloc(sizeof(*ibs) + length, + SLAB_ATOMIC); if (!ibs) { kfree(req); break; @@ -502,10 +499,9 @@ static int state_initialized(struct file_info *fi, struct pending_request *req) switch (req->req.type) { case RAW1394_REQ_LIST_CARDS: spin_lock_irqsave(&host_info_lock, flags); - khl = kmalloc(sizeof(struct raw1394_khost_list) * host_count, - SLAB_ATOMIC); + khl = kmalloc(sizeof(*khl) * host_count, SLAB_ATOMIC); - if (khl != NULL) { + if (khl) { req->req.misc = host_count; req->data = (quadlet_t *) khl; @@ -517,7 +513,7 @@ static int state_initialized(struct file_info *fi, struct pending_request *req) } spin_unlock_irqrestore(&host_info_lock, flags); - if (khl != NULL) { + if (khl) { req->req.error = RAW1394_ERROR_NONE; req->req.length = min(req->req.length, (u32) (sizeof @@ -1647,13 +1643,13 @@ static int arm_register(struct file_info *fi, struct pending_request *req) return (-EINVAL); } /* addr-list-entry for fileinfo */ - addr = (struct arm_addr *)kmalloc(sizeof(struct arm_addr), SLAB_KERNEL); + addr = kmalloc(sizeof(*addr), SLAB_KERNEL); if (!addr) { req->req.length = 0; return (-ENOMEM); } /* allocation of addr_space_buffer */ - addr->addr_space_buffer = (u8 *) vmalloc(req->req.length); + addr->addr_space_buffer = vmalloc(req->req.length); if (!(addr->addr_space_buffer)) { kfree(addr); req->req.length = 0; @@ -2122,8 +2118,7 @@ static int modify_config_rom(struct file_info *fi, struct pending_request *req) return -ENOMEM; } - cache->filled_head = - kmalloc(sizeof(struct csr1212_cache_region), GFP_KERNEL); + cache->filled_head = kmalloc(sizeof(*cache->filled_head), GFP_KERNEL); if (!cache->filled_head) { csr1212_release_keyval(fi->csr1212_dirs[dr]); fi->csr1212_dirs[dr] = NULL; @@ -2684,11 +2679,10 @@ static int raw1394_open(struct inode *inode, struct file *file) { struct file_info *fi; - fi = kmalloc(sizeof(struct file_info), SLAB_KERNEL); - if (fi == NULL) + fi = kzalloc(sizeof(*fi), SLAB_KERNEL); + if (!fi) return -ENOMEM; - memset(fi, 0, sizeof(struct file_info)); fi->notification = (u8) RAW1394_NOTIFY_ON; /* busreset notification */ INIT_LIST_HEAD(&fi->list); diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index b871116fa4d1..84875cd5a70d 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c @@ -411,14 +411,12 @@ static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_i spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags); for (i = 0; i < orbs; i++) { - command = (struct sbp2_command_info *) - kmalloc(sizeof(struct sbp2_command_info), GFP_ATOMIC); + command = kzalloc(sizeof(*command), GFP_ATOMIC); if (!command) { spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); return -ENOMEM; } - memset(command, '\0', sizeof(struct sbp2_command_info)); command->command_orb_dma = pci_map_single(hi->host->pdev, &command->command_orb, sizeof(struct sbp2_command_orb), @@ -714,12 +712,11 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud SBP2_DEBUG("sbp2_alloc_device"); - scsi_id = kmalloc(sizeof(*scsi_id), GFP_KERNEL); + scsi_id = kzalloc(sizeof(*scsi_id), GFP_KERNEL); if (!scsi_id) { SBP2_ERR("failed to create scsi_id"); goto failed_alloc; } - memset(scsi_id, 0, sizeof(*scsi_id)); scsi_id->ne = ud->ne; scsi_id->ud = ud; diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c index 23911da50154..2ad30cd7c5aa 100644 --- a/drivers/ieee1394/video1394.c +++ b/drivers/ieee1394/video1394.c @@ -206,14 +206,12 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc, struct dma_iso_ctx *d; int i; - d = kmalloc(sizeof(struct dma_iso_ctx), GFP_KERNEL); - if (d == NULL) { + d = kzalloc(sizeof(*d), GFP_KERNEL); + if (!d) { PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma_iso_ctx"); return NULL; } - memset(d, 0, sizeof *d); - d->ohci = ohci; d->type = type; d->channel = channel; @@ -251,9 +249,8 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc, } d->ctx = d->iso_tasklet.context; - d->prg_reg = kmalloc(d->num_desc * sizeof(struct dma_prog_region), - GFP_KERNEL); - if (d->prg_reg == NULL) { + d->prg_reg = kmalloc(d->num_desc * sizeof(*d->prg_reg), GFP_KERNEL); + if (!d->prg_reg) { PRINT(KERN_ERR, ohci->host->id, "Failed to allocate ir prg regs"); free_dma_iso_ctx(d); return NULL; @@ -268,15 +265,14 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc, d->cmdPtr = OHCI1394_IsoRcvCommandPtr+32*d->ctx; d->ctxMatch = OHCI1394_IsoRcvContextMatch+32*d->ctx; - d->ir_prg = kmalloc(d->num_desc * sizeof(struct dma_cmd *), + d->ir_prg = kzalloc(d->num_desc * sizeof(*d->ir_prg), GFP_KERNEL); - if (d->ir_prg == NULL) { + if (!d->ir_prg) { PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma ir prg"); free_dma_iso_ctx(d); return NULL; } - memset(d->ir_prg, 0, d->num_desc * sizeof(struct dma_cmd *)); d->nb_cmd = d->buf_size / PAGE_SIZE + 1; d->left_size = (d->frame_size % PAGE_SIZE) ? @@ -297,16 +293,15 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc, d->ctrlClear = OHCI1394_IsoXmitContextControlClear+16*d->ctx; d->cmdPtr = OHCI1394_IsoXmitCommandPtr+16*d->ctx; - d->it_prg = kmalloc(d->num_desc * sizeof(struct it_dma_prg *), + d->it_prg = kzalloc(d->num_desc * sizeof(*d->it_prg), GFP_KERNEL); - if (d->it_prg == NULL) { + if (!d->it_prg) { PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma it prg"); free_dma_iso_ctx(d); return NULL; } - memset(d->it_prg, 0, d->num_desc*sizeof(struct it_dma_prg *)); d->packet_size = packet_size; @@ -337,47 +332,24 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc, } } - d->buffer_status = kmalloc(d->num_desc * sizeof(unsigned int), - GFP_KERNEL); - d->buffer_prg_assignment = kmalloc(d->num_desc * sizeof(unsigned int), - GFP_KERNEL); - d->buffer_time = kmalloc(d->num_desc * sizeof(struct timeval), - GFP_KERNEL); - d->last_used_cmd = kmalloc(d->num_desc * sizeof(unsigned int), - GFP_KERNEL); - d->next_buffer = kmalloc(d->num_desc * sizeof(int), - GFP_KERNEL); - - if (d->buffer_status == NULL) { - PRINT(KERN_ERR, ohci->host->id, "Failed to allocate buffer_status"); - free_dma_iso_ctx(d); - return NULL; - } - if (d->buffer_prg_assignment == NULL) { - PRINT(KERN_ERR, ohci->host->id, "Failed to allocate buffer_prg_assignment"); - free_dma_iso_ctx(d); - return NULL; - } - if (d->buffer_time == NULL) { - PRINT(KERN_ERR, ohci->host->id, "Failed to allocate buffer_time"); - free_dma_iso_ctx(d); - return NULL; - } - if (d->last_used_cmd == NULL) { - PRINT(KERN_ERR, ohci->host->id, "Failed to allocate last_used_cmd"); - free_dma_iso_ctx(d); - return NULL; - } - if (d->next_buffer == NULL) { - PRINT(KERN_ERR, ohci->host->id, "Failed to allocate next_buffer"); + d->buffer_status = + kzalloc(d->num_desc * sizeof(*d->buffer_status), GFP_KERNEL); + d->buffer_prg_assignment = + kzalloc(d->num_desc * sizeof(*d->buffer_prg_assignment), GFP_KERNEL); + d->buffer_time = + kzalloc(d->num_desc * sizeof(*d->buffer_time), GFP_KERNEL); + d->last_used_cmd = + kzalloc(d->num_desc * sizeof(*d->last_used_cmd), GFP_KERNEL); + d->next_buffer = + kzalloc(d->num_desc * sizeof(*d->next_buffer), GFP_KERNEL); + + if (!d->buffer_status || !d->buffer_prg_assignment || !d->buffer_time || + !d->last_used_cmd || !d->next_buffer) { + PRINT(KERN_ERR, ohci->host->id, + "Failed to allocate dma_iso_ctx member"); free_dma_iso_ctx(d); return NULL; } - memset(d->buffer_status, 0, d->num_desc * sizeof(unsigned int)); - memset(d->buffer_prg_assignment, 0, d->num_desc * sizeof(unsigned int)); - memset(d->buffer_time, 0, d->num_desc * sizeof(struct timeval)); - memset(d->last_used_cmd, 0, d->num_desc * sizeof(unsigned int)); - memset(d->next_buffer, -1, d->num_desc * sizeof(int)); spin_lock_init(&d->lock); @@ -1085,7 +1057,7 @@ static int __video1394_ioctl(struct file *file, } if (d->flags & VIDEO1394_VARIABLE_PACKET_SIZE) { - int buf_size = d->nb_cmd * sizeof(unsigned int); + int buf_size = d->nb_cmd * sizeof(*psizes); struct video1394_queue_variable __user *p = argp; unsigned int __user *qv; @@ -1251,13 +1223,12 @@ static int video1394_open(struct inode *inode, struct file *file) if (ohci == NULL) return -EIO; - ctx = kmalloc(sizeof(struct file_ctx), GFP_KERNEL); - if (ctx == NULL) { + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) { PRINT(KERN_ERR, ohci->host->id, "Cannot malloc file_ctx"); return -ENOMEM; } - memset(ctx, 0, sizeof(struct file_ctx)); ctx->ohci = ohci; INIT_LIST_HEAD(&ctx->context_list); ctx->current_ctx = NULL; -- cgit v1.2.3 From ef797546a93fffa9d8508e7c8539b352b6678568 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Mon, 7 Nov 2005 06:31:50 -0500 Subject: Remove definitions of unreferenced macros virt_to_page and vmalloc_32 from dv1394 and video1394. Signed-off-by: Stefan Richter Signed-off-by: Jody McIntyre --- drivers/ieee1394/dv1394.c | 9 --------- drivers/ieee1394/video1394.c | 8 -------- 2 files changed, 17 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c index d204ec772c0a..196db7439272 100644 --- a/drivers/ieee1394/dv1394.c +++ b/drivers/ieee1394/dv1394.c @@ -123,15 +123,6 @@ #include "ohci1394.h" -#ifndef virt_to_page -#define virt_to_page(x) MAP_NR(x) -#endif - -#ifndef vmalloc_32 -#define vmalloc_32(x) vmalloc(x) -#endif - - /* DEBUG LEVELS: 0 - no debugging messages 1 - some debugging messages, but none during DMA frame transmission diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c index 2ad30cd7c5aa..07050f0e4098 100644 --- a/drivers/ieee1394/video1394.c +++ b/drivers/ieee1394/video1394.c @@ -77,14 +77,6 @@ #define ISO_CHANNELS 64 -#ifndef virt_to_page -#define virt_to_page(x) MAP_NR(x) -#endif - -#ifndef vmalloc_32 -#define vmalloc_32(x) vmalloc(x) -#endif - struct it_dma_prg { struct dma_cmd begin; quadlet_t data[4]; -- cgit v1.2.3 From 7301c8d3a05dc52d33598364da7c4eb6ab6357eb Mon Sep 17 00:00:00 2001 From: Jody McIntyre Date: Fri, 18 Nov 2005 00:16:26 -0500 Subject: Remove amdtp, cmp drivers. Remove the Audio and Music Data Transmission Protocol driver and the Connection Management Procedures driver. These are incomplete, have never worked, and are better implemented in userland via raw1394 (see http://freebob.sourceforge.net/ for example.) Signed-off-by: Jody McIntyre Cc: Adrian Bunk --- drivers/ieee1394/Kconfig | 23 ----------------------- drivers/ieee1394/Makefile | 2 -- drivers/ieee1394/ieee1394-ioctl.h | 8 -------- drivers/ieee1394/ohci1394.h | 4 ++-- 4 files changed, 2 insertions(+), 35 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig index 25103a0ef9b3..39142e2f804b 100644 --- a/drivers/ieee1394/Kconfig +++ b/drivers/ieee1394/Kconfig @@ -169,27 +169,4 @@ config IEEE1394_RAWIO To compile this driver as a module, say M here: the module will be called raw1394. -config IEEE1394_CMP - tristate "IEC61883-1 Plug support" - depends on IEEE1394 - help - This option enables the Connection Management Procedures - (IEC61883-1) driver, which implements input and output plugs. - - To compile this driver as a module, say M here: the - module will be called cmp. - -config IEEE1394_AMDTP - tristate "IEC61883-6 (Audio transmission) support" - depends on IEEE1394 && IEEE1394_OHCI1394 && IEEE1394_CMP - help - This option enables the Audio & Music Data Transmission Protocol - (IEC61883-6) driver, which implements audio transmission over - IEEE1394. - - The userspace interface is documented in amdtp.h. - - To compile this driver as a module, say M here: the - module will be called amdtp. - endmenu diff --git a/drivers/ieee1394/Makefile b/drivers/ieee1394/Makefile index e8b4d48d376e..6f53611fe255 100644 --- a/drivers/ieee1394/Makefile +++ b/drivers/ieee1394/Makefile @@ -14,8 +14,6 @@ obj-$(CONFIG_IEEE1394_RAWIO) += raw1394.o obj-$(CONFIG_IEEE1394_SBP2) += sbp2.o obj-$(CONFIG_IEEE1394_DV1394) += dv1394.o obj-$(CONFIG_IEEE1394_ETH1394) += eth1394.o -obj-$(CONFIG_IEEE1394_AMDTP) += amdtp.o -obj-$(CONFIG_IEEE1394_CMP) += cmp.o quiet_cmd_oui2c = OUI2C $@ cmd_oui2c = $(CONFIG_SHELL) $(srctree)/$(src)/oui2c.sh < $< > $@ diff --git a/drivers/ieee1394/ieee1394-ioctl.h b/drivers/ieee1394/ieee1394-ioctl.h index f92b566363d5..156703986348 100644 --- a/drivers/ieee1394/ieee1394-ioctl.h +++ b/drivers/ieee1394/ieee1394-ioctl.h @@ -7,14 +7,6 @@ #include #include - -/* AMDTP Gets 6 */ -#define AMDTP_IOC_CHANNEL _IOW('#', 0x00, struct amdtp_ioctl) -#define AMDTP_IOC_PLUG _IOW('#', 0x01, struct amdtp_ioctl) -#define AMDTP_IOC_PING _IOW('#', 0x02, struct amdtp_ioctl) -#define AMDTP_IOC_ZAP _IO ('#', 0x03) - - /* DV1394 Gets 10 */ /* Get the driver ready to transmit video. pass a struct dv1394_init* as diff --git a/drivers/ieee1394/ohci1394.h b/drivers/ieee1394/ohci1394.h index cc66c1cae250..7df0962144e3 100644 --- a/drivers/ieee1394/ohci1394.h +++ b/drivers/ieee1394/ohci1394.h @@ -219,8 +219,8 @@ struct ti_ohci { int self_id_errors; - /* Tasklets for iso receive and transmit, used by video1394, - * amdtp and dv1394 */ + /* Tasklets for iso receive and transmit, used by video1394 + * and dv1394 */ struct list_head iso_tasklet_list; spinlock_t iso_tasklet_list_lock; -- cgit v1.2.3 From e27d3014f301e6aee7b65b62ad1da2940e1fd8de Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Sat, 19 Nov 2005 21:23:48 -0500 Subject: Every file should #include the headers containing the prototypes for it's global functions. Signed-off-by: Adrian Bunk Signed-off-by: Jody McIntyre --- drivers/ieee1394/ieee1394_transactions.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/ieee1394/ieee1394_transactions.c b/drivers/ieee1394/ieee1394_transactions.c index 0aa876360f9b..81b983c7a7d0 100644 --- a/drivers/ieee1394/ieee1394_transactions.c +++ b/drivers/ieee1394/ieee1394_transactions.c @@ -22,6 +22,7 @@ #include "ieee1394_core.h" #include "highlevel.h" #include "nodemgr.h" +#include "ieee1394_transactions.h" #define PREP_ASYNC_HEAD_ADDRESS(tc) \ -- cgit v1.2.3 From e4cda1654e5c0be4b68e29011e8dc04977286df9 Mon Sep 17 00:00:00 2001 From: Damien Douxchamps Date: Sat, 19 Nov 2005 21:32:03 -0500 Subject: Fix incorrect video1394 timestamps. This patch fixes the incoherent timestamps generated by video1394 since the single-buffer patch was applied in 2.6.11. Credits have also been removed from the header and a "//" comment was changed to "/* */". Signed-off-by: Damien Douxchamps Signed-off-by: Jody McIntyre --- drivers/ieee1394/video1394.c | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c index 07050f0e4098..608479b2df14 100644 --- a/drivers/ieee1394/video1394.c +++ b/drivers/ieee1394/video1394.c @@ -19,12 +19,6 @@ * * NOTES: * - * jds -- add private data to file to keep track of iso contexts associated - * with each open -- so release won't kill all iso transfers. - * - * Damien Douxchamps: Fix failure when the number of DMA pages per frame is - * one. - * * ioctl return codes: * EFAULT is only for invalid address for the argp * EINVAL for out of range values @@ -34,12 +28,6 @@ * ENOTTY for unsupported ioctl request * */ - -/* Markus Tavenrath : - - fixed checks for valid buffer-numbers in video1394_icotl - - changed the ways the dma prg's are used, now it's possible to use - even a single dma buffer -*/ #include #include #include @@ -503,7 +491,7 @@ static void wakeup_dma_ir_ctx(unsigned long l) if (d->ir_prg[i][d->nb_cmd-1].status & cpu_to_le32(0xFFFF0000)) { reset_ir_status(d, i); d->buffer_status[d->buffer_prg_assignment[i]] = VIDEO1394_BUFFER_READY; - do_gettimeofday(&d->buffer_time[i]); + do_gettimeofday(&d->buffer_time[d->buffer_prg_assignment[i]]); } } @@ -1010,7 +998,6 @@ static int __video1394_ioctl(struct file *file, /* set time of buffer */ v.filltime = d->buffer_time[v.buffer]; -// printk("Buffer %d time %d\n", v.buffer, (d->buffer_time[v.buffer]).tv_usec); /* * Look ahead to see how many more buffers have been received @@ -1068,7 +1055,7 @@ static int __video1394_ioctl(struct file *file, spin_lock_irqsave(&d->lock,flags); - // last_buffer is last_prg + /* last_buffer is last_prg */ next_prg = (d->last_buffer + 1) % d->num_desc; if (d->buffer_status[v.buffer]!=VIDEO1394_BUFFER_FREE) { PRINT(KERN_ERR, ohci->host->id, -- cgit v1.2.3 From 977545e35289b13981614a57fd6c9b82d55e3b4a Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sat, 19 Nov 2005 21:35:22 -0500 Subject: sbp2: slimmer interface to scsi_mod - sbp2scsi_reset does not need to take host_lock - sbp2scsi_reset, as our device reset handler, does not need to stand in as bus reset or host reset handler - let scsi_mod use scsi_host_template.name instead of .info (sbp2 is not an emulation anway) Signed-off-by: Stefan Richter Signed-off-by: Jody McIntyre --- drivers/ieee1394/sbp2.c | 13 ------------- 1 file changed, 13 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index 84875cd5a70d..f0763b797238 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c @@ -2609,27 +2609,17 @@ static int sbp2scsi_reset(struct scsi_cmnd *SCpnt) { struct scsi_id_instance_data *scsi_id = (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0]; - unsigned long flags; SBP2_ERR("reset requested"); - spin_lock_irqsave(SCpnt->device->host->host_lock, flags); - if (sbp2util_node_is_available(scsi_id)) { SBP2_ERR("Generating sbp2 fetch agent reset"); sbp2_agent_reset(scsi_id, 0); } - spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags); - return SUCCESS; } -static const char *sbp2scsi_info(struct Scsi_Host *host) -{ - return "SCSI emulation for IEEE-1394 SBP-2 Devices"; -} - static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -2666,12 +2656,9 @@ static struct scsi_host_template scsi_driver_template = { .module = THIS_MODULE, .name = "SBP-2 IEEE-1394", .proc_name = SBP2_DEVICE_NAME, - .info = sbp2scsi_info, .queuecommand = sbp2scsi_queuecommand, .eh_abort_handler = sbp2scsi_abort, .eh_device_reset_handler = sbp2scsi_reset, - .eh_bus_reset_handler = sbp2scsi_reset, - .eh_host_reset_handler = sbp2scsi_reset, .slave_alloc = sbp2scsi_slave_alloc, .slave_configure = sbp2scsi_slave_configure, .slave_destroy = sbp2scsi_slave_destroy, -- cgit v1.2.3 From d734f92b0dc4c04daa2e0106354972cbbc2e0fbe Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Mon, 21 Nov 2005 17:32:14 -0500 Subject: drivers/ieee1394/raw1394.c: fix a NULL pointer The coverity checker spotted that this was a NULL pointer dereference in the "if (copy_from_user(...))" case since the next step is to kfree(cache->filled_head). There's no need to free cache at this point, and it's getting free'd later. Signed-off-by: Adrian Bunk Signed-off-by: Jody McIntyre --- drivers/ieee1394/raw1394.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c index 0278dc5d5ef9..99b2ce15db83 100644 --- a/drivers/ieee1394/raw1394.c +++ b/drivers/ieee1394/raw1394.c @@ -2131,7 +2131,6 @@ static int modify_config_rom(struct file_info *fi, struct pending_request *req) req->req.length)) { csr1212_release_keyval(fi->csr1212_dirs[dr]); fi->csr1212_dirs[dr] = NULL; - CSR1212_FREE(cache); ret = -EFAULT; } else { cache->len = req->req.length; -- cgit v1.2.3 From b12479ddce4aed112e0018fdf8bbb7cfb349ebdc Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Mon, 21 Nov 2005 17:32:18 -0500 Subject: raw1394: fix memory deallocation in modify_config_rom raw1394: use correct deallocation macro for CSR cache Signed-off-by: Stefan Richter Signed-off-by: Jody McIntyre --- drivers/ieee1394/raw1394.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c index 99b2ce15db83..89cac1f55807 100644 --- a/drivers/ieee1394/raw1394.c +++ b/drivers/ieee1394/raw1394.c @@ -2166,7 +2166,7 @@ static int modify_config_rom(struct file_info *fi, struct pending_request *req) } } kfree(cache->filled_head); - kfree(cache); + CSR1212_FREE(cache); if (ret >= 0) { /* we have to free the request, because we queue no response, -- cgit v1.2.3 From 5303a986c33ae6c75d5ffb57d06ccf9246a8725a Mon Sep 17 00:00:00 2001 From: Jody McIntyre Date: Tue, 22 Nov 2005 12:17:11 -0500 Subject: csr1212: check results of keyval reads csr1212_parse_csr() did not properly check return values when reading keyvals. Fix this by using _csr1212_read_keyval() instead of csr1212_get_keyval() and checking the return code. Signed-off-by: Jody McIntyre --- drivers/ieee1394/csr1212.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/csr1212.c b/drivers/ieee1394/csr1212.c index c0f8ed6fca8a..4812d59e8976 100644 --- a/drivers/ieee1394/csr1212.c +++ b/drivers/ieee1394/csr1212.c @@ -1610,15 +1610,16 @@ int csr1212_parse_csr(struct csr1212_csr *csr) csr->root_kv->valid = 0; csr->root_kv->next = csr->root_kv; csr->root_kv->prev = csr->root_kv; - csr1212_get_keyval(csr, csr->root_kv); + ret = _csr1212_read_keyval(csr, csr->root_kv); + if (ret != CSR1212_SUCCESS) + return ret; /* Scan through the Root directory finding all extended ROM regions * and make cache regions for them */ for (dentry = csr->root_kv->value.directory.dentries_head; dentry; dentry = dentry->next) { if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM) { - csr1212_get_keyval(csr, dentry->kv); - + ret = _csr1212_read_keyval(csr, dentry->kv); if (ret != CSR1212_SUCCESS) return ret; } -- cgit v1.2.3 From a96074e76f87a4f658af4ecfd95edc89cfd61fc1 Mon Sep 17 00:00:00 2001 From: Jody McIntyre Date: Tue, 22 Nov 2005 12:17:14 -0500 Subject: csr1212: add check for !valid Don't read the keyval if there's already a valid one in place. May not be necessary but shouldn't hurt. Signed-off-by: Jody McIntyre --- drivers/ieee1394/csr1212.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/ieee1394/csr1212.c b/drivers/ieee1394/csr1212.c index 4812d59e8976..15773544234b 100644 --- a/drivers/ieee1394/csr1212.c +++ b/drivers/ieee1394/csr1212.c @@ -1618,7 +1618,8 @@ int csr1212_parse_csr(struct csr1212_csr *csr) * and make cache regions for them */ for (dentry = csr->root_kv->value.directory.dentries_head; dentry; dentry = dentry->next) { - if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM) { + if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM && + !dentry->kv->valid) { ret = _csr1212_read_keyval(csr, dentry->kv); if (ret != CSR1212_SUCCESS) return ret; -- cgit v1.2.3 From 6649e92d792efa00a823781bcee2dba7f21199ba Mon Sep 17 00:00:00 2001 From: Jens-Michael Hoffmann Date: Tue, 22 Nov 2005 12:18:28 -0500 Subject: ieee1394/dma: LIndent fixes This patch contains fixes by LIndent. Signed-off-by: Jens-Michael Hoffmann Signed-off-by: Jody McIntyre --- drivers/ieee1394/dma.c | 73 +++++++++++++++++++++++++++++++------------------- 1 file changed, 45 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/dma.c b/drivers/ieee1394/dma.c index b79ddb43e746..9fb2769d9abc 100644 --- a/drivers/ieee1394/dma.c +++ b/drivers/ieee1394/dma.c @@ -23,7 +23,8 @@ void dma_prog_region_init(struct dma_prog_region *prog) prog->bus_addr = 0; } -int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, struct pci_dev *dev) +int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, + struct pci_dev *dev) { /* round up to page size */ n_bytes = PAGE_ALIGN(n_bytes); @@ -32,7 +33,8 @@ int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, prog->kvirt = pci_alloc_consistent(dev, n_bytes, &prog->bus_addr); if (!prog->kvirt) { - printk(KERN_ERR "dma_prog_region_alloc: pci_alloc_consistent() failed\n"); + printk(KERN_ERR + "dma_prog_region_alloc: pci_alloc_consistent() failed\n"); dma_prog_region_free(prog); return -ENOMEM; } @@ -45,7 +47,8 @@ int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, void dma_prog_region_free(struct dma_prog_region *prog) { if (prog->kvirt) { - pci_free_consistent(prog->dev, prog->n_pages << PAGE_SHIFT, prog->kvirt, prog->bus_addr); + pci_free_consistent(prog->dev, prog->n_pages << PAGE_SHIFT, + prog->kvirt, prog->bus_addr); } prog->kvirt = NULL; @@ -65,7 +68,8 @@ void dma_region_init(struct dma_region *dma) dma->sglist = NULL; } -int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_dev *dev, int direction) +int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, + struct pci_dev *dev, int direction) { unsigned int i; @@ -95,14 +99,16 @@ int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_d /* fill scatter/gather list with pages */ for (i = 0; i < dma->n_pages; i++) { - unsigned long va = (unsigned long) dma->kvirt + (i << PAGE_SHIFT); + unsigned long va = + (unsigned long)dma->kvirt + (i << PAGE_SHIFT); dma->sglist[i].page = vmalloc_to_page((void *)va); dma->sglist[i].length = PAGE_SIZE; } /* map sglist to the IOMMU */ - dma->n_dma_pages = pci_map_sg(dev, dma->sglist, dma->n_pages, direction); + dma->n_dma_pages = + pci_map_sg(dev, dma->sglist, dma->n_pages, direction); if (dma->n_dma_pages == 0) { printk(KERN_ERR "dma_region_alloc: pci_map_sg() failed\n"); @@ -114,7 +120,7 @@ int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_d return 0; -err: + err: dma_region_free(dma); return -ENOMEM; } @@ -122,7 +128,8 @@ err: void dma_region_free(struct dma_region *dma) { if (dma->n_dma_pages) { - pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages, dma->direction); + pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages, + dma->direction); dma->n_dma_pages = 0; dma->dev = NULL; } @@ -137,7 +144,8 @@ void dma_region_free(struct dma_region *dma) /* find the scatterlist index and remaining offset corresponding to a given offset from the beginning of the buffer */ -static inline int dma_region_find(struct dma_region *dma, unsigned long offset, unsigned long *rem) +static inline int dma_region_find(struct dma_region *dma, unsigned long offset, + unsigned long *rem) { int i; unsigned long off = offset; @@ -156,15 +164,18 @@ static inline int dma_region_find(struct dma_region *dma, unsigned long offset, return i; } -dma_addr_t dma_region_offset_to_bus(struct dma_region *dma, unsigned long offset) +dma_addr_t dma_region_offset_to_bus(struct dma_region * dma, + unsigned long offset) { unsigned long rem = 0; - struct scatterlist *sg = &dma->sglist[dma_region_find(dma, offset, &rem)]; + struct scatterlist *sg = + &dma->sglist[dma_region_find(dma, offset, &rem)]; return sg_dma_address(sg) + rem; } -void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset, unsigned long len) +void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset, + unsigned long len) { int first, last; unsigned long rem; @@ -175,10 +186,12 @@ void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset, unsig first = dma_region_find(dma, offset, &rem); last = dma_region_find(dma, offset + len - 1, &rem); - pci_dma_sync_sg_for_cpu(dma->dev, &dma->sglist[first], last - first + 1, dma->direction); + pci_dma_sync_sg_for_cpu(dma->dev, &dma->sglist[first], last - first + 1, + dma->direction); } -void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, unsigned long len) +void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, + unsigned long len) { int first, last; unsigned long rem; @@ -189,44 +202,47 @@ void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, un first = dma_region_find(dma, offset, &rem); last = dma_region_find(dma, offset + len - 1, &rem); - pci_dma_sync_sg_for_device(dma->dev, &dma->sglist[first], last - first + 1, dma->direction); + pci_dma_sync_sg_for_device(dma->dev, &dma->sglist[first], + last - first + 1, dma->direction); } #ifdef CONFIG_MMU /* nopage() handler for mmap access */ -static struct page* -dma_region_pagefault(struct vm_area_struct *area, unsigned long address, int *type) +static struct page *dma_region_pagefault(struct vm_area_struct *area, + unsigned long address, int *type) { unsigned long offset; unsigned long kernel_virt_addr; struct page *ret = NOPAGE_SIGBUS; - struct dma_region *dma = (struct dma_region*) area->vm_private_data; + struct dma_region *dma = (struct dma_region *)area->vm_private_data; if (!dma->kvirt) goto out; - if ( (address < (unsigned long) area->vm_start) || - (address > (unsigned long) area->vm_start + (dma->n_pages << PAGE_SHIFT)) ) + if ((address < (unsigned long)area->vm_start) || + (address > + (unsigned long)area->vm_start + (dma->n_pages << PAGE_SHIFT))) goto out; if (type) *type = VM_FAULT_MINOR; offset = address - area->vm_start; - kernel_virt_addr = (unsigned long) dma->kvirt + offset; - ret = vmalloc_to_page((void*) kernel_virt_addr); + kernel_virt_addr = (unsigned long)dma->kvirt + offset; + ret = vmalloc_to_page((void *)kernel_virt_addr); get_page(ret); -out: + out: return ret; } static struct vm_operations_struct dma_region_vm_ops = { - .nopage = dma_region_pagefault, + .nopage = dma_region_pagefault, }; -int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma) +int dma_region_mmap(struct dma_region *dma, struct file *file, + struct vm_area_struct *vma) { unsigned long size; @@ -250,11 +266,12 @@ int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_st return 0; } -#else /* CONFIG_MMU */ +#else /* CONFIG_MMU */ -int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma) +int dma_region_mmap(struct dma_region *dma, struct file *file, + struct vm_area_struct *vma) { return -EINVAL; } -#endif /* CONFIG_MMU */ +#endif /* CONFIG_MMU */ -- cgit v1.2.3 From 16c333a34a1a0441c54c4fe5cf6052716f95c2fa Mon Sep 17 00:00:00 2001 From: Jens-Michael Hoffmann Date: Tue, 22 Nov 2005 12:34:16 -0500 Subject: ieee1394/ieee1394_transactions: LIndent fixes This patch contains fixes by LIndent. Signed-off-by: Jens-Michael Hoffmann Signed-off-by: Jody McIntyre --- drivers/ieee1394/ieee1394_transactions.c | 388 ++++++++++++++++--------------- 1 file changed, 195 insertions(+), 193 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/ieee1394_transactions.c b/drivers/ieee1394/ieee1394_transactions.c index 81b983c7a7d0..3fe2f6c4a253 100644 --- a/drivers/ieee1394/ieee1394_transactions.c +++ b/drivers/ieee1394/ieee1394_transactions.c @@ -24,7 +24,6 @@ #include "nodemgr.h" #include "ieee1394_transactions.h" - #define PREP_ASYNC_HEAD_ADDRESS(tc) \ packet->tcode = tc; \ packet->header[0] = (packet->node_id << 16) | (packet->tlabel << 10) \ @@ -32,80 +31,82 @@ packet->header[1] = (packet->host->node_id << 16) | (addr >> 32); \ packet->header[2] = addr & 0xffffffff - static void fill_async_readquad(struct hpsb_packet *packet, u64 addr) { - PREP_ASYNC_HEAD_ADDRESS(TCODE_READQ); - packet->header_size = 12; - packet->data_size = 0; - packet->expect_response = 1; + PREP_ASYNC_HEAD_ADDRESS(TCODE_READQ); + packet->header_size = 12; + packet->data_size = 0; + packet->expect_response = 1; } -static void fill_async_readblock(struct hpsb_packet *packet, u64 addr, int length) +static void fill_async_readblock(struct hpsb_packet *packet, u64 addr, + int length) { - PREP_ASYNC_HEAD_ADDRESS(TCODE_READB); - packet->header[3] = length << 16; - packet->header_size = 16; - packet->data_size = 0; - packet->expect_response = 1; + PREP_ASYNC_HEAD_ADDRESS(TCODE_READB); + packet->header[3] = length << 16; + packet->header_size = 16; + packet->data_size = 0; + packet->expect_response = 1; } -static void fill_async_writequad(struct hpsb_packet *packet, u64 addr, quadlet_t data) +static void fill_async_writequad(struct hpsb_packet *packet, u64 addr, + quadlet_t data) { - PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEQ); - packet->header[3] = data; - packet->header_size = 16; - packet->data_size = 0; - packet->expect_response = 1; + PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEQ); + packet->header[3] = data; + packet->header_size = 16; + packet->data_size = 0; + packet->expect_response = 1; } -static void fill_async_writeblock(struct hpsb_packet *packet, u64 addr, int length) +static void fill_async_writeblock(struct hpsb_packet *packet, u64 addr, + int length) { - PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEB); - packet->header[3] = length << 16; - packet->header_size = 16; - packet->expect_response = 1; - packet->data_size = length + (length % 4 ? 4 - (length % 4) : 0); + PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEB); + packet->header[3] = length << 16; + packet->header_size = 16; + packet->expect_response = 1; + packet->data_size = length + (length % 4 ? 4 - (length % 4) : 0); } static void fill_async_lock(struct hpsb_packet *packet, u64 addr, int extcode, - int length) + int length) { - PREP_ASYNC_HEAD_ADDRESS(TCODE_LOCK_REQUEST); - packet->header[3] = (length << 16) | extcode; - packet->header_size = 16; - packet->data_size = length; - packet->expect_response = 1; + PREP_ASYNC_HEAD_ADDRESS(TCODE_LOCK_REQUEST); + packet->header[3] = (length << 16) | extcode; + packet->header_size = 16; + packet->data_size = length; + packet->expect_response = 1; } static void fill_iso_packet(struct hpsb_packet *packet, int length, int channel, - int tag, int sync) + int tag, int sync) { - packet->header[0] = (length << 16) | (tag << 14) | (channel << 8) - | (TCODE_ISO_DATA << 4) | sync; + packet->header[0] = (length << 16) | (tag << 14) | (channel << 8) + | (TCODE_ISO_DATA << 4) | sync; - packet->header_size = 4; - packet->data_size = length; - packet->type = hpsb_iso; - packet->tcode = TCODE_ISO_DATA; + packet->header_size = 4; + packet->data_size = length; + packet->type = hpsb_iso; + packet->tcode = TCODE_ISO_DATA; } static void fill_phy_packet(struct hpsb_packet *packet, quadlet_t data) { - packet->header[0] = data; - packet->header[1] = ~data; - packet->header_size = 8; - packet->data_size = 0; - packet->expect_response = 0; - packet->type = hpsb_raw; /* No CRC added */ - packet->speed_code = IEEE1394_SPEED_100; /* Force speed to be 100Mbps */ + packet->header[0] = data; + packet->header[1] = ~data; + packet->header_size = 8; + packet->data_size = 0; + packet->expect_response = 0; + packet->type = hpsb_raw; /* No CRC added */ + packet->speed_code = IEEE1394_SPEED_100; /* Force speed to be 100Mbps */ } static void fill_async_stream_packet(struct hpsb_packet *packet, int length, int channel, int tag, int sync) { packet->header[0] = (length << 16) | (tag << 14) | (channel << 8) - | (TCODE_STREAM_DATA << 4) | sync; + | (TCODE_STREAM_DATA << 4) | sync; packet->header_size = 4; packet->data_size = length; @@ -172,99 +173,96 @@ int hpsb_get_tlabel(struct hpsb_packet *packet) */ void hpsb_free_tlabel(struct hpsb_packet *packet) { - unsigned long flags; + unsigned long flags; struct hpsb_tlabel_pool *tp; tp = &packet->host->tpool[packet->node_id & NODE_MASK]; BUG_ON(packet->tlabel > 63 || packet->tlabel < 0); - spin_lock_irqsave(&tp->lock, flags); + spin_lock_irqsave(&tp->lock, flags); BUG_ON(!test_and_clear_bit(packet->tlabel, tp->pool)); - spin_unlock_irqrestore(&tp->lock, flags); + spin_unlock_irqrestore(&tp->lock, flags); up(&tp->count); } - - int hpsb_packet_success(struct hpsb_packet *packet) { - switch (packet->ack_code) { - case ACK_PENDING: - switch ((packet->header[1] >> 12) & 0xf) { - case RCODE_COMPLETE: - return 0; - case RCODE_CONFLICT_ERROR: - return -EAGAIN; - case RCODE_DATA_ERROR: - return -EREMOTEIO; - case RCODE_TYPE_ERROR: - return -EACCES; - case RCODE_ADDRESS_ERROR: - return -EINVAL; - default: - HPSB_ERR("received reserved rcode %d from node %d", - (packet->header[1] >> 12) & 0xf, - packet->node_id); - return -EAGAIN; - } - HPSB_PANIC("reached unreachable code 1 in %s", __FUNCTION__); - - case ACK_BUSY_X: - case ACK_BUSY_A: - case ACK_BUSY_B: - return -EBUSY; - - case ACK_TYPE_ERROR: - return -EACCES; - - case ACK_COMPLETE: - if (packet->tcode == TCODE_WRITEQ - || packet->tcode == TCODE_WRITEB) { - return 0; - } else { - HPSB_ERR("impossible ack_complete from node %d " - "(tcode %d)", packet->node_id, packet->tcode); - return -EAGAIN; - } - - - case ACK_DATA_ERROR: - if (packet->tcode == TCODE_WRITEB - || packet->tcode == TCODE_LOCK_REQUEST) { - return -EAGAIN; - } else { - HPSB_ERR("impossible ack_data_error from node %d " - "(tcode %d)", packet->node_id, packet->tcode); - return -EAGAIN; - } - - case ACK_ADDRESS_ERROR: - return -EINVAL; - - case ACK_TARDY: - case ACK_CONFLICT_ERROR: - case ACKX_NONE: - case ACKX_SEND_ERROR: - case ACKX_ABORTED: - case ACKX_TIMEOUT: - /* error while sending */ - return -EAGAIN; - - default: - HPSB_ERR("got invalid ack %d from node %d (tcode %d)", - packet->ack_code, packet->node_id, packet->tcode); - return -EAGAIN; - } - - HPSB_PANIC("reached unreachable code 2 in %s", __FUNCTION__); + switch (packet->ack_code) { + case ACK_PENDING: + switch ((packet->header[1] >> 12) & 0xf) { + case RCODE_COMPLETE: + return 0; + case RCODE_CONFLICT_ERROR: + return -EAGAIN; + case RCODE_DATA_ERROR: + return -EREMOTEIO; + case RCODE_TYPE_ERROR: + return -EACCES; + case RCODE_ADDRESS_ERROR: + return -EINVAL; + default: + HPSB_ERR("received reserved rcode %d from node %d", + (packet->header[1] >> 12) & 0xf, + packet->node_id); + return -EAGAIN; + } + HPSB_PANIC("reached unreachable code 1 in %s", __FUNCTION__); + + case ACK_BUSY_X: + case ACK_BUSY_A: + case ACK_BUSY_B: + return -EBUSY; + + case ACK_TYPE_ERROR: + return -EACCES; + + case ACK_COMPLETE: + if (packet->tcode == TCODE_WRITEQ + || packet->tcode == TCODE_WRITEB) { + return 0; + } else { + HPSB_ERR("impossible ack_complete from node %d " + "(tcode %d)", packet->node_id, packet->tcode); + return -EAGAIN; + } + + case ACK_DATA_ERROR: + if (packet->tcode == TCODE_WRITEB + || packet->tcode == TCODE_LOCK_REQUEST) { + return -EAGAIN; + } else { + HPSB_ERR("impossible ack_data_error from node %d " + "(tcode %d)", packet->node_id, packet->tcode); + return -EAGAIN; + } + + case ACK_ADDRESS_ERROR: + return -EINVAL; + + case ACK_TARDY: + case ACK_CONFLICT_ERROR: + case ACKX_NONE: + case ACKX_SEND_ERROR: + case ACKX_ABORTED: + case ACKX_TIMEOUT: + /* error while sending */ + return -EAGAIN; + + default: + HPSB_ERR("got invalid ack %d from node %d (tcode %d)", + packet->ack_code, packet->node_id, packet->tcode); + return -EAGAIN; + } + + HPSB_PANIC("reached unreachable code 2 in %s", __FUNCTION__); } struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node, u64 addr, size_t length) { - struct hpsb_packet *packet; + struct hpsb_packet *packet; if (length == 0) return NULL; @@ -289,8 +287,9 @@ struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node, return packet; } -struct hpsb_packet *hpsb_make_writepacket (struct hpsb_host *host, nodeid_t node, - u64 addr, quadlet_t *buffer, size_t length) +struct hpsb_packet *hpsb_make_writepacket(struct hpsb_host *host, nodeid_t node, + u64 addr, quadlet_t * buffer, + size_t length) { struct hpsb_packet *packet; @@ -301,7 +300,7 @@ struct hpsb_packet *hpsb_make_writepacket (struct hpsb_host *host, nodeid_t node if (!packet) return NULL; - if (length % 4) { /* zero padding bytes */ + if (length % 4) { /* zero padding bytes */ packet->data[length >> 2] = 0; } packet->host = host; @@ -323,8 +322,9 @@ struct hpsb_packet *hpsb_make_writepacket (struct hpsb_host *host, nodeid_t node return packet; } -struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer, int length, - int channel, int tag, int sync) +struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 * buffer, + int length, int channel, int tag, + int sync) { struct hpsb_packet *packet; @@ -335,7 +335,7 @@ struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer, i if (!packet) return NULL; - if (length % 4) { /* zero padding bytes */ + if (length % 4) { /* zero padding bytes */ packet->data[length >> 2] = 0; } packet->host = host; @@ -353,14 +353,15 @@ struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer, i } struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node, - u64 addr, int extcode, quadlet_t *data, - quadlet_t arg) + u64 addr, int extcode, + quadlet_t * data, quadlet_t arg) { struct hpsb_packet *p; u32 length; p = hpsb_alloc_packet(8); - if (!p) return NULL; + if (!p) + return NULL; p->host = host; p->node_id = node; @@ -389,15 +390,16 @@ struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node, return p; } -struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host, nodeid_t node, - u64 addr, int extcode, octlet_t *data, - octlet_t arg) +struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host, + nodeid_t node, u64 addr, int extcode, + octlet_t * data, octlet_t arg) { struct hpsb_packet *p; u32 length; p = hpsb_alloc_packet(16); - if (!p) return NULL; + if (!p) + return NULL; p->host = host; p->node_id = node; @@ -430,18 +432,18 @@ struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host, nodeid_t node return p; } -struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host, - quadlet_t data) +struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host, quadlet_t data) { - struct hpsb_packet *p; + struct hpsb_packet *p; - p = hpsb_alloc_packet(0); - if (!p) return NULL; + p = hpsb_alloc_packet(0); + if (!p) + return NULL; - p->host = host; - fill_phy_packet(p, data); + p->host = host; + fill_phy_packet(p, data); - return p; + return p; } struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host, @@ -451,7 +453,8 @@ struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host, struct hpsb_packet *p; p = hpsb_alloc_packet(length); - if (!p) return NULL; + if (!p) + return NULL; p->host = host; fill_iso_packet(p, length, channel, tag, sync); @@ -467,47 +470,46 @@ struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host, */ int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation, - u64 addr, quadlet_t *buffer, size_t length) + u64 addr, quadlet_t * buffer, size_t length) { - struct hpsb_packet *packet; - int retval = 0; + struct hpsb_packet *packet; + int retval = 0; - if (length == 0) - return -EINVAL; + if (length == 0) + return -EINVAL; - BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet + BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet packet = hpsb_make_readpacket(host, node, addr, length); - if (!packet) { - return -ENOMEM; - } + if (!packet) { + return -ENOMEM; + } packet->generation = generation; - retval = hpsb_send_packet_and_wait(packet); + retval = hpsb_send_packet_and_wait(packet); if (retval < 0) goto hpsb_read_fail; - retval = hpsb_packet_success(packet); + retval = hpsb_packet_success(packet); - if (retval == 0) { - if (length == 4) { - *buffer = packet->header[3]; - } else { - memcpy(buffer, packet->data, length); - } - } + if (retval == 0) { + if (length == 4) { + *buffer = packet->header[3]; + } else { + memcpy(buffer, packet->data, length); + } + } -hpsb_read_fail: - hpsb_free_tlabel(packet); - hpsb_free_packet(packet); + hpsb_read_fail: + hpsb_free_tlabel(packet); + hpsb_free_packet(packet); - return retval; + return retval; } - int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation, - u64 addr, quadlet_t *buffer, size_t length) + u64 addr, quadlet_t * buffer, size_t length) { struct hpsb_packet *packet; int retval; @@ -515,62 +517,61 @@ int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation, if (length == 0) return -EINVAL; - BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet + BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet - packet = hpsb_make_writepacket (host, node, addr, buffer, length); + packet = hpsb_make_writepacket(host, node, addr, buffer, length); if (!packet) return -ENOMEM; packet->generation = generation; - retval = hpsb_send_packet_and_wait(packet); + retval = hpsb_send_packet_and_wait(packet); if (retval < 0) goto hpsb_write_fail; - retval = hpsb_packet_success(packet); + retval = hpsb_packet_success(packet); -hpsb_write_fail: - hpsb_free_tlabel(packet); - hpsb_free_packet(packet); + hpsb_write_fail: + hpsb_free_tlabel(packet); + hpsb_free_packet(packet); - return retval; + return retval; } #if 0 int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation, - u64 addr, int extcode, quadlet_t *data, quadlet_t arg) + u64 addr, int extcode, quadlet_t * data, quadlet_t arg) { - struct hpsb_packet *packet; - int retval = 0; + struct hpsb_packet *packet; + int retval = 0; - BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet + BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet packet = hpsb_make_lockpacket(host, node, addr, extcode, data, arg); - if (!packet) - return -ENOMEM; + if (!packet) + return -ENOMEM; packet->generation = generation; - retval = hpsb_send_packet_and_wait(packet); + retval = hpsb_send_packet_and_wait(packet); if (retval < 0) goto hpsb_lock_fail; - retval = hpsb_packet_success(packet); + retval = hpsb_packet_success(packet); - if (retval == 0) { - *data = packet->data[0]; - } + if (retval == 0) { + *data = packet->data[0]; + } -hpsb_lock_fail: - hpsb_free_tlabel(packet); - hpsb_free_packet(packet); + hpsb_lock_fail: + hpsb_free_tlabel(packet); + hpsb_free_packet(packet); - return retval; + return retval; } - int hpsb_send_gasp(struct hpsb_host *host, int channel, unsigned int generation, - quadlet_t *buffer, size_t length, u32 specifier_id, + quadlet_t * buffer, size_t length, u32 specifier_id, unsigned int version) { struct hpsb_packet *packet; @@ -587,7 +588,8 @@ int hpsb_send_gasp(struct hpsb_host *host, int channel, unsigned int generation, return -ENOMEM; packet->data[0] = cpu_to_be32((host->node_id << 16) | specifier_id_hi); - packet->data[1] = cpu_to_be32((specifier_id_lo << 24) | (version & 0x00ffffff)); + packet->data[1] = + cpu_to_be32((specifier_id_lo << 24) | (version & 0x00ffffff)); memcpy(&(packet->data[2]), buffer, length - 8); @@ -602,4 +604,4 @@ int hpsb_send_gasp(struct hpsb_host *host, int channel, unsigned int generation, return retval; } -#endif /* 0 */ +#endif /* 0 */ -- cgit v1.2.3 From 066ef9c2fb30a22eca7724326e210f0405c51f29 Mon Sep 17 00:00:00 2001 From: Jens-Michael Hoffmann Date: Tue, 22 Nov 2005 12:35:23 -0500 Subject: ieee1394/iso: LIndent fixes This patch contains fixes by LIndent. Signed-off-by: Jens-Michael Hoffmann Signed-off-by: Jody McIntyre --- drivers/ieee1394/iso.c | 102 +++++++++++++++++++++++++++++-------------------- 1 file changed, 60 insertions(+), 42 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/iso.c b/drivers/ieee1394/iso.c index 615541b8b90f..f26680ebef7c 100644 --- a/drivers/ieee1394/iso.c +++ b/drivers/ieee1394/iso.c @@ -36,20 +36,22 @@ void hpsb_iso_shutdown(struct hpsb_iso *iso) kfree(iso); } -static struct hpsb_iso* hpsb_iso_common_init(struct hpsb_host *host, enum hpsb_iso_type type, +static struct hpsb_iso *hpsb_iso_common_init(struct hpsb_host *host, + enum hpsb_iso_type type, unsigned int data_buf_size, unsigned int buf_packets, - int channel, - int dma_mode, + int channel, int dma_mode, int irq_interval, - void (*callback)(struct hpsb_iso*)) + void (*callback) (struct hpsb_iso + *)) { struct hpsb_iso *iso; int dma_direction; /* make sure driver supports the ISO API */ if (!host->driver->isoctl) { - printk(KERN_INFO "ieee1394: host driver '%s' does not support the rawiso API\n", + printk(KERN_INFO + "ieee1394: host driver '%s' does not support the rawiso API\n", host->driver->name); return NULL; } @@ -59,12 +61,13 @@ static struct hpsb_iso* hpsb_iso_common_init(struct hpsb_host *host, enum hpsb_i if (buf_packets < 2) buf_packets = 2; - if ((dma_mode < HPSB_ISO_DMA_DEFAULT) || (dma_mode > HPSB_ISO_DMA_PACKET_PER_BUFFER)) - dma_mode=HPSB_ISO_DMA_DEFAULT; + if ((dma_mode < HPSB_ISO_DMA_DEFAULT) + || (dma_mode > HPSB_ISO_DMA_PACKET_PER_BUFFER)) + dma_mode = HPSB_ISO_DMA_DEFAULT; if ((irq_interval < 0) || (irq_interval > buf_packets / 4)) - irq_interval = buf_packets / 4; - if (irq_interval == 0) /* really interrupt for each packet*/ + irq_interval = buf_packets / 4; + if (irq_interval == 0) /* really interrupt for each packet */ irq_interval = 1; if (channel < -1 || channel >= 64) @@ -76,7 +79,10 @@ static struct hpsb_iso* hpsb_iso_common_init(struct hpsb_host *host, enum hpsb_i /* allocate and write the struct hpsb_iso */ - iso = kmalloc(sizeof(*iso) + buf_packets * sizeof(struct hpsb_iso_packet_info), GFP_KERNEL); + iso = + kmalloc(sizeof(*iso) + + buf_packets * sizeof(struct hpsb_iso_packet_info), + GFP_KERNEL); if (!iso) return NULL; @@ -111,17 +117,18 @@ static struct hpsb_iso* hpsb_iso_common_init(struct hpsb_host *host, enum hpsb_i iso->prebuffer = 0; /* allocate the packet buffer */ - if (dma_region_alloc(&iso->data_buf, iso->buf_size, host->pdev, dma_direction)) + if (dma_region_alloc + (&iso->data_buf, iso->buf_size, host->pdev, dma_direction)) goto err; return iso; -err: + err: hpsb_iso_shutdown(iso); return NULL; } -int hpsb_iso_n_ready(struct hpsb_iso* iso) +int hpsb_iso_n_ready(struct hpsb_iso *iso) { unsigned long flags; int val; @@ -133,18 +140,19 @@ int hpsb_iso_n_ready(struct hpsb_iso* iso) return val; } - -struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host, +struct hpsb_iso *hpsb_iso_xmit_init(struct hpsb_host *host, unsigned int data_buf_size, unsigned int buf_packets, int channel, int speed, int irq_interval, - void (*callback)(struct hpsb_iso*)) + void (*callback) (struct hpsb_iso *)) { struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_XMIT, data_buf_size, buf_packets, - channel, HPSB_ISO_DMA_DEFAULT, irq_interval, callback); + channel, + HPSB_ISO_DMA_DEFAULT, + irq_interval, callback); if (!iso) return NULL; @@ -157,22 +165,23 @@ struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host, iso->flags |= HPSB_ISO_DRIVER_INIT; return iso; -err: + err: hpsb_iso_shutdown(iso); return NULL; } -struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host, +struct hpsb_iso *hpsb_iso_recv_init(struct hpsb_host *host, unsigned int data_buf_size, unsigned int buf_packets, int channel, int dma_mode, int irq_interval, - void (*callback)(struct hpsb_iso*)) + void (*callback) (struct hpsb_iso *)) { struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_RECV, data_buf_size, buf_packets, - channel, dma_mode, irq_interval, callback); + channel, dma_mode, + irq_interval, callback); if (!iso) return NULL; @@ -183,7 +192,7 @@ struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host, iso->flags |= HPSB_ISO_DRIVER_INIT; return iso; -err: + err: hpsb_iso_shutdown(iso); return NULL; } @@ -197,16 +206,17 @@ int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel) int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel) { - if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64) - return -EINVAL; - return iso->host->driver->isoctl(iso, RECV_UNLISTEN_CHANNEL, channel); + if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64) + return -EINVAL; + return iso->host->driver->isoctl(iso, RECV_UNLISTEN_CHANNEL, channel); } int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask) { if (iso->type != HPSB_ISO_RECV || iso->channel != -1) return -EINVAL; - return iso->host->driver->isoctl(iso, RECV_SET_CHANNEL_MASK, (unsigned long) &mask); + return iso->host->driver->isoctl(iso, RECV_SET_CHANNEL_MASK, + (unsigned long)&mask); } int hpsb_iso_recv_flush(struct hpsb_iso *iso) @@ -283,7 +293,9 @@ int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync) isoctl_args[2] = sync; - retval = iso->host->driver->isoctl(iso, RECV_START, (unsigned long) &isoctl_args[0]); + retval = + iso->host->driver->isoctl(iso, RECV_START, + (unsigned long)&isoctl_args[0]); if (retval) return retval; @@ -296,7 +308,8 @@ int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync) static int hpsb_iso_check_offset_len(struct hpsb_iso *iso, unsigned int offset, unsigned short len, - unsigned int *out_offset, unsigned short *out_len) + unsigned int *out_offset, + unsigned short *out_len) { if (offset >= iso->buf_size) return -EFAULT; @@ -316,8 +329,8 @@ static int hpsb_iso_check_offset_len(struct hpsb_iso *iso, return 0; } - -int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag, u8 sy) +int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, + u8 tag, u8 sy) { struct hpsb_iso_packet_info *info; unsigned long flags; @@ -334,7 +347,8 @@ int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag info = &iso->infos[iso->first_packet]; /* check for bogus offset/length */ - if (hpsb_iso_check_offset_len(iso, offset, len, &info->offset, &info->len)) + if (hpsb_iso_check_offset_len + (iso, offset, len, &info->offset, &info->len)) return -EFAULT; info->tag = tag; @@ -342,13 +356,13 @@ int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag spin_lock_irqsave(&iso->lock, flags); - rv = iso->host->driver->isoctl(iso, XMIT_QUEUE, (unsigned long) info); + rv = iso->host->driver->isoctl(iso, XMIT_QUEUE, (unsigned long)info); if (rv) goto out; /* increment cursors */ - iso->first_packet = (iso->first_packet+1) % iso->buf_packets; - iso->xmit_cycle = (iso->xmit_cycle+1) % 8000; + iso->first_packet = (iso->first_packet + 1) % iso->buf_packets; + iso->xmit_cycle = (iso->xmit_cycle + 1) % 8000; iso->n_ready_packets--; if (iso->prebuffer != 0) { @@ -359,7 +373,7 @@ int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag } } -out: + out: spin_unlock_irqrestore(&iso->lock, flags); return rv; } @@ -369,7 +383,9 @@ int hpsb_iso_xmit_sync(struct hpsb_iso *iso) if (iso->type != HPSB_ISO_XMIT) return -EINVAL; - return wait_event_interruptible(iso->waitq, hpsb_iso_n_ready(iso) == iso->buf_packets); + return wait_event_interruptible(iso->waitq, + hpsb_iso_n_ready(iso) == + iso->buf_packets); } void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error) @@ -396,7 +412,8 @@ void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error) } void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len, - u16 total_len, u16 cycle, u8 channel, u8 tag, u8 sy) + u16 total_len, u16 cycle, u8 channel, u8 tag, + u8 sy) { unsigned long flags; spin_lock_irqsave(&iso->lock, flags); @@ -416,7 +433,7 @@ void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len, info->tag = tag; info->sy = sy; - iso->pkt_dma = (iso->pkt_dma+1) % iso->buf_packets; + iso->pkt_dma = (iso->pkt_dma + 1) % iso->buf_packets; iso->n_ready_packets++; } @@ -435,20 +452,21 @@ int hpsb_iso_recv_release_packets(struct hpsb_iso *iso, unsigned int n_packets) spin_lock_irqsave(&iso->lock, flags); for (i = 0; i < n_packets; i++) { rv = iso->host->driver->isoctl(iso, RECV_RELEASE, - (unsigned long) &iso->infos[iso->first_packet]); + (unsigned long)&iso->infos[iso-> + first_packet]); if (rv) break; - iso->first_packet = (iso->first_packet+1) % iso->buf_packets; + iso->first_packet = (iso->first_packet + 1) % iso->buf_packets; iso->n_ready_packets--; /* release memory from packets discarded when queue was full */ - if (iso->n_ready_packets == 0) { /* Release only after all prior packets handled */ + if (iso->n_ready_packets == 0) { /* Release only after all prior packets handled */ if (iso->bytes_discarded != 0) { struct hpsb_iso_packet_info inf; inf.total_len = iso->bytes_discarded; iso->host->driver->isoctl(iso, RECV_RELEASE, - (unsigned long) &inf); + (unsigned long)&inf); iso->bytes_discarded = 0; } } -- cgit v1.2.3 From c64d472abc68dcad4d34f365545058c3f11973d8 Mon Sep 17 00:00:00 2001 From: Jens-Michael Hoffmann Date: Tue, 22 Nov 2005 12:37:10 -0500 Subject: ieee1394/raw1394: LIndent fixes This patch contains fixes by LIndent. Signed-off-by: Jens-Michael Hoffmann Signed-off-by: Jody McIntyre --- drivers/ieee1394/raw1394.c | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c index 89cac1f55807..b05235639918 100644 --- a/drivers/ieee1394/raw1394.c +++ b/drivers/ieee1394/raw1394.c @@ -2482,8 +2482,8 @@ static int raw1394_iso_recv_packets(struct file_info *fi, void __user * uaddr) /* ensure user-supplied buffer is accessible and big enough */ if (!access_ok(VERIFY_WRITE, upackets.infos, - upackets.n_packets * - sizeof(struct raw1394_iso_packet_info))) + upackets.n_packets * + sizeof(struct raw1394_iso_packet_info))) return -EFAULT; /* copy the packet_infos out */ @@ -2516,8 +2516,8 @@ static int raw1394_iso_send_packets(struct file_info *fi, void __user * uaddr) /* ensure user-supplied buffer is accessible and big enough */ if (!access_ok(VERIFY_READ, upackets.infos, - upackets.n_packets * - sizeof(struct raw1394_iso_packet_info))) + upackets.n_packets * + sizeof(struct raw1394_iso_packet_info))) return -EFAULT; /* copy the infos structs in and queue the packets */ @@ -2741,8 +2741,7 @@ static int raw1394_release(struct inode *inode, struct file *file) list) { entry = fi_hlp->addr_list.next; while (entry != &(fi_hlp->addr_list)) { - arm_addr = list_entry(entry, - struct + arm_addr = list_entry(entry, struct arm_addr, addr_list); if (arm_addr->start == @@ -2905,16 +2904,17 @@ static int __init init_raw1394(void) hpsb_register_highlevel(&raw1394_highlevel); - if (IS_ERR(class_device_create(hpsb_protocol_class, NULL, MKDEV( - IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16), - NULL, RAW1394_DEVICE_NAME))) { + if (IS_ERR + (class_device_create + (hpsb_protocol_class, NULL, + MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16), NULL, + RAW1394_DEVICE_NAME))) { ret = -EFAULT; goto out_unreg; } - - devfs_mk_cdev(MKDEV( - IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16), - S_IFCHR | S_IRUSR | S_IWUSR, RAW1394_DEVICE_NAME); + + devfs_mk_cdev(MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16), + S_IFCHR | S_IRUSR | S_IWUSR, RAW1394_DEVICE_NAME); cdev_init(&raw1394_cdev, &raw1394_fops); raw1394_cdev.owner = THIS_MODULE; @@ -2936,20 +2936,22 @@ static int __init init_raw1394(void) goto out; -out_dev: + out_dev: devfs_remove(RAW1394_DEVICE_NAME); class_device_destroy(hpsb_protocol_class, - MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16)); -out_unreg: + MKDEV(IEEE1394_MAJOR, + IEEE1394_MINOR_BLOCK_RAW1394 * 16)); + out_unreg: hpsb_unregister_highlevel(&raw1394_highlevel); -out: + out: return ret; } static void __exit cleanup_raw1394(void) { class_device_destroy(hpsb_protocol_class, - MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16)); + MKDEV(IEEE1394_MAJOR, + IEEE1394_MINOR_BLOCK_RAW1394 * 16)); cdev_del(&raw1394_cdev); devfs_remove(RAW1394_DEVICE_NAME); hpsb_unregister_highlevel(&raw1394_highlevel); -- cgit v1.2.3 From 14c0fa243b358c24040ff5f44b60c47aaf6430c3 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Thu, 1 Dec 2005 18:51:52 -0500 Subject: ieee1394: resume remote ports when starting a host (fixes device recognition) After initializing an IEEE 1394 host, broadcast a resume packet. This makes remote nodes visible which suspended their ports while the host was down. Such nodes had to be unplugged and replugged in order to be recognized. Motorola DCT6200 cable reciever was affected, probably other devices too. http://marc.theaimsgroup.com/?t=113202715800001 Signed-off-by: Stefan Richter Signed-off-by: Jody McIntyre --- drivers/ieee1394/hosts.h | 1 + drivers/ieee1394/nodemgr.c | 25 +++++++++++++++++++++++++ 2 files changed, 26 insertions(+) (limited to 'drivers') diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h index 38f42112dff0..ae9b02cc013f 100644 --- a/drivers/ieee1394/hosts.h +++ b/drivers/ieee1394/hosts.h @@ -41,6 +41,7 @@ struct hpsb_host { /* this nodes state */ unsigned in_bus_reset:1; unsigned is_shutdown:1; + unsigned resume_packet_sent:1; /* this nodes' duties on the bus */ unsigned is_root:1; diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c index 3f0917bbd7f5..b56934edd62b 100644 --- a/drivers/ieee1394/nodemgr.c +++ b/drivers/ieee1394/nodemgr.c @@ -1410,6 +1410,24 @@ static void nodemgr_node_probe(struct host_info *hi, int generation) return; } +static int nodemgr_send_resume_packet(struct hpsb_host *host) +{ + struct hpsb_packet *packet; + int ret = 1; + + packet = hpsb_make_phypacket(host, + 0x003c0000 | NODEID_TO_NODE(host->node_id) << 24); + if (packet) { + packet->no_waiter = 1; + packet->generation = get_hpsb_generation(host); + ret = hpsb_send_packet(packet); + } + if (ret) + HPSB_WARN("fw-host%d: Failed to broadcast resume packet", + host->id); + return ret; +} + /* Because we are a 1394a-2000 compliant IRM, we need to inform all the other * nodes of the broadcast channel. (Really we're only setting the validity * bit). Other IRM responsibilities go in here as well. */ @@ -1460,6 +1478,13 @@ static int nodemgr_do_irm_duties(struct hpsb_host *host, int cycles) } } + /* Some devices suspend their ports while being connected to an inactive + * host adapter, i.e. if connected before the low-level driver is + * loaded. They become visible either when physically unplugged and + * replugged, or when receiving a resume packet. Send one once. */ + if (!host->resume_packet_sent && !nodemgr_send_resume_packet(host)) + host->resume_packet_sent = 1; + return 1; } -- cgit v1.2.3 From d7758461b9a8253f1c125e5907579e0594d29e3b Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Thu, 1 Dec 2005 18:51:56 -0500 Subject: ieee1394: add definitions for phy packet constants Introduce new macros related to phy packets and use them in ieee1394_core and nodemgr. Signed-off-by: Stefan Richter Signed-off-by: Jody McIntyre --- drivers/ieee1394/ieee1394.h | 19 ++++++++++++++++++- drivers/ieee1394/ieee1394_core.c | 38 ++++++++++++++++++++++---------------- drivers/ieee1394/nodemgr.c | 3 ++- 3 files changed, 42 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/ieee1394.h b/drivers/ieee1394/ieee1394.h index b634a9bb365c..936d776de00a 100644 --- a/drivers/ieee1394/ieee1394.h +++ b/drivers/ieee1394/ieee1394.h @@ -62,6 +62,7 @@ extern const char *hpsb_speedto_str[]; +/* 1394a cable PHY packets */ #define SELFID_PWRCL_NO_POWER 0x0 #define SELFID_PWRCL_PROVIDE_15W 0x1 #define SELFID_PWRCL_PROVIDE_30W 0x2 @@ -76,8 +77,24 @@ extern const char *hpsb_speedto_str[]; #define SELFID_PORT_NCONN 0x1 #define SELFID_PORT_NONE 0x0 +#define PHYPACKET_LINKON 0x40000000 +#define PHYPACKET_PHYCONFIG_R 0x00800000 +#define PHYPACKET_PHYCONFIG_T 0x00400000 +#define EXTPHYPACKET_TYPE_PING 0x00000000 +#define EXTPHYPACKET_TYPE_REMOTEACCESS_BASE 0x00040000 +#define EXTPHYPACKET_TYPE_REMOTEACCESS_PAGED 0x00140000 +#define EXTPHYPACKET_TYPE_REMOTEREPLY_BASE 0x000C0000 +#define EXTPHYPACKET_TYPE_REMOTEREPLY_PAGED 0x001C0000 +#define EXTPHYPACKET_TYPE_REMOTECOMMAND 0x00200000 +#define EXTPHYPACKET_TYPE_REMOTECONFIRMATION 0x00280000 +#define EXTPHYPACKET_TYPE_RESUME 0x003C0000 -/* 1394a PHY bitmasks */ +#define EXTPHYPACKET_TYPEMASK 0xC0FC0000 + +#define PHYPACKET_PORT_SHIFT 24 +#define PHYPACKET_GAPCOUNT_SHIFT 16 + +/* 1394a PHY register map bitmasks */ #define PHY_00_PHYSICAL_ID 0xFC #define PHY_00_R 0x02 /* Root */ #define PHY_00_PS 0x01 /* Power Status*/ diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c index 32a1e016c85e..f2f5e4805b5f 100644 --- a/drivers/ieee1394/ieee1394_core.c +++ b/drivers/ieee1394/ieee1394_core.c @@ -256,10 +256,14 @@ static int check_selfids(struct hpsb_host *host) esid = (struct ext_selfid *)(sid - 1); while (esid->extended) { - if ((esid->porta == 0x2) || (esid->portb == 0x2) - || (esid->portc == 0x2) || (esid->portd == 0x2) - || (esid->porte == 0x2) || (esid->portf == 0x2) - || (esid->portg == 0x2) || (esid->porth == 0x2)) { + if ((esid->porta == SELFID_PORT_PARENT) || + (esid->portb == SELFID_PORT_PARENT) || + (esid->portc == SELFID_PORT_PARENT) || + (esid->portd == SELFID_PORT_PARENT) || + (esid->porte == SELFID_PORT_PARENT) || + (esid->portf == SELFID_PORT_PARENT) || + (esid->portg == SELFID_PORT_PARENT) || + (esid->porth == SELFID_PORT_PARENT)) { HPSB_INFO("SelfIDs failed root check on " "extended SelfID"); return 0; @@ -268,7 +272,9 @@ static int check_selfids(struct hpsb_host *host) } sid = (struct selfid *)esid; - if ((sid->port0 == 0x2) || (sid->port1 == 0x2) || (sid->port2 == 0x2)) { + if ((sid->port0 == SELFID_PORT_PARENT) || + (sid->port1 == SELFID_PORT_PARENT) || + (sid->port2 == SELFID_PORT_PARENT)) { HPSB_INFO("SelfIDs failed root check"); return 0; } @@ -303,18 +309,18 @@ static void build_speed_map(struct hpsb_host *host, int nodecount) if (sid->extended) { esid = (struct ext_selfid *)sid; - if (esid->porta == 0x3) cldcnt[n]++; - if (esid->portb == 0x3) cldcnt[n]++; - if (esid->portc == 0x3) cldcnt[n]++; - if (esid->portd == 0x3) cldcnt[n]++; - if (esid->porte == 0x3) cldcnt[n]++; - if (esid->portf == 0x3) cldcnt[n]++; - if (esid->portg == 0x3) cldcnt[n]++; - if (esid->porth == 0x3) cldcnt[n]++; + if (esid->porta == SELFID_PORT_CHILD) cldcnt[n]++; + if (esid->portb == SELFID_PORT_CHILD) cldcnt[n]++; + if (esid->portc == SELFID_PORT_CHILD) cldcnt[n]++; + if (esid->portd == SELFID_PORT_CHILD) cldcnt[n]++; + if (esid->porte == SELFID_PORT_CHILD) cldcnt[n]++; + if (esid->portf == SELFID_PORT_CHILD) cldcnt[n]++; + if (esid->portg == SELFID_PORT_CHILD) cldcnt[n]++; + if (esid->porth == SELFID_PORT_CHILD) cldcnt[n]++; } else { - if (sid->port0 == 0x3) cldcnt[n]++; - if (sid->port1 == 0x3) cldcnt[n]++; - if (sid->port2 == 0x3) cldcnt[n]++; + if (sid->port0 == SELFID_PORT_CHILD) cldcnt[n]++; + if (sid->port1 == SELFID_PORT_CHILD) cldcnt[n]++; + if (sid->port2 == SELFID_PORT_CHILD) cldcnt[n]++; speedcap[n] = sid->speed; n--; diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c index b56934edd62b..f4b6025fde61 100644 --- a/drivers/ieee1394/nodemgr.c +++ b/drivers/ieee1394/nodemgr.c @@ -1416,7 +1416,8 @@ static int nodemgr_send_resume_packet(struct hpsb_host *host) int ret = 1; packet = hpsb_make_phypacket(host, - 0x003c0000 | NODEID_TO_NODE(host->node_id) << 24); + EXTPHYPACKET_TYPE_RESUME | + NODEID_TO_NODE(host->node_id) << PHYPACKET_PORT_SHIFT); if (packet) { packet->no_waiter = 1; packet->generation = get_hpsb_generation(host); -- cgit v1.2.3 From 546513f9fd96cba613cc2d025ee03d32d79394b7 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Thu, 1 Dec 2005 18:52:01 -0500 Subject: ieee1394: hpsb_send_phy_config() cleanup Eliminate some code in hpsb_send_phy_config() which is provided by hpsb_make_phypacket(). Signed-off-by: Stefan Richter Signed-off-by: Jody McIntyre --- drivers/ieee1394/ieee1394_core.c | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c index f2f5e4805b5f..ff8a40980607 100644 --- a/drivers/ieee1394/ieee1394_core.c +++ b/drivers/ieee1394/ieee1394_core.c @@ -463,6 +463,7 @@ void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet, int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt) { struct hpsb_packet *packet; + quadlet_t d = 0; int retval = 0; if (rootid >= ALL_NODES || rootid < -1 || gapcnt > 0x3f || gapcnt < -1 || @@ -472,26 +473,16 @@ int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt) return -EINVAL; } - packet = hpsb_alloc_packet(0); - if (!packet) - return -ENOMEM; - - packet->host = host; - packet->header_size = 8; - packet->data_size = 0; - packet->expect_response = 0; - packet->no_waiter = 0; - packet->type = hpsb_raw; - packet->header[0] = 0; if (rootid != -1) - packet->header[0] |= rootid << 24 | 1 << 23; + d |= PHYPACKET_PHYCONFIG_R | rootid << PHYPACKET_PORT_SHIFT; if (gapcnt != -1) - packet->header[0] |= gapcnt << 16 | 1 << 22; + d |= PHYPACKET_PHYCONFIG_T | gapcnt << PHYPACKET_GAPCOUNT_SHIFT; - packet->header[1] = ~packet->header[0]; + packet = hpsb_make_phypacket(host, d); + if (!packet) + return -ENOMEM; packet->generation = get_hpsb_generation(host); - retval = hpsb_send_packet_and_wait(packet); hpsb_free_packet(packet); -- cgit v1.2.3 From 741854e4f9a23421e194df8d846899172ff393d6 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Thu, 1 Dec 2005 18:52:03 -0500 Subject: ieee1394: whitespace cleanup in hosts.[ch], ieee1394_core.[ch] Signed-off-by: Stefan Richter Signed-off-by: Jody McIntyre --- drivers/ieee1394/hosts.c | 28 +- drivers/ieee1394/hosts.h | 162 ++++----- drivers/ieee1394/ieee1394_core.c | 762 +++++++++++++++++++-------------------- drivers/ieee1394/ieee1394_core.h | 100 ++--- 4 files changed, 526 insertions(+), 526 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c index d245abe4033c..ba09741fc826 100644 --- a/drivers/ieee1394/hosts.c +++ b/drivers/ieee1394/hosts.c @@ -61,12 +61,12 @@ static void delayed_reset_bus(void * __reset_info) static int dummy_transmit_packet(struct hpsb_host *h, struct hpsb_packet *p) { - return 0; + return 0; } static int dummy_devctl(struct hpsb_host *h, enum devctl_cmd c, int arg) { - return -1; + return -1; } static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command, unsigned long arg) @@ -75,9 +75,9 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command, unsigned } static struct hpsb_host_driver dummy_driver = { - .transmit_packet = dummy_transmit_packet, - .devctl = dummy_devctl, - .isoctl = dummy_isoctl + .transmit_packet = dummy_transmit_packet, + .devctl = dummy_devctl, + .isoctl = dummy_isoctl }; static int alloc_hostnum_cb(struct hpsb_host *host, void *__data) @@ -110,12 +110,12 @@ static DECLARE_MUTEX(host_num_alloc); struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra, struct device *dev) { - struct hpsb_host *h; + struct hpsb_host *h; int i; int hostnum = 0; - h = kzalloc(sizeof(*h) + extra, SLAB_KERNEL); - if (!h) + h = kzalloc(sizeof(*h) + extra, SLAB_KERNEL); + if (!h) return NULL; h->csr.rom = csr1212_create_csr(&csr_bus_ops, CSR_BUS_INFO_SIZE, h); @@ -125,7 +125,7 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra, } h->hostdata = h + 1; - h->driver = drv; + h->driver = drv; skb_queue_head_init(&h->pending_packet_queue); INIT_LIST_HEAD(&h->addr_space); @@ -145,8 +145,8 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra, h->timeout.function = abort_timedouts; h->timeout_interval = HZ / 20; // 50ms by default - h->topology_map = h->csr.topology_map + 3; - h->speed_map = (u8 *)(h->csr.speed_map + 2); + h->topology_map = h->csr.topology_map + 3; + h->speed_map = (u8 *)(h->csr.speed_map + 2); down(&host_num_alloc); @@ -186,14 +186,14 @@ int hpsb_add_host(struct hpsb_host *host) void hpsb_remove_host(struct hpsb_host *host) { - host->is_shutdown = 1; + host->is_shutdown = 1; cancel_delayed_work(&host->delayed_reset); flush_scheduled_work(); - host->driver = &dummy_driver; + host->driver = &dummy_driver; - highlevel_remove_host(host); + highlevel_remove_host(host); hpsb_remove_extra_config_roms(host); diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h index ae9b02cc013f..07d188ca8495 100644 --- a/drivers/ieee1394/hosts.h +++ b/drivers/ieee1394/hosts.h @@ -17,47 +17,47 @@ struct hpsb_packet; struct hpsb_iso; struct hpsb_host { - struct list_head host_list; + struct list_head host_list; - void *hostdata; + void *hostdata; - atomic_t generation; + atomic_t generation; struct sk_buff_head pending_packet_queue; struct timer_list timeout; unsigned long timeout_interval; - unsigned char iso_listen_count[64]; + unsigned char iso_listen_count[64]; - int node_count; /* number of identified nodes on this bus */ - int selfid_count; /* total number of SelfIDs received */ + int node_count; /* number of identified nodes on this bus */ + int selfid_count; /* total number of SelfIDs received */ int nodes_active; /* number of nodes that are actually active */ - nodeid_t node_id; /* node ID of this host */ - nodeid_t irm_id; /* ID of this bus' isochronous resource manager */ - nodeid_t busmgr_id; /* ID of this bus' bus manager */ + nodeid_t node_id; /* node ID of this host */ + nodeid_t irm_id; /* ID of this bus' isochronous resource manager */ + nodeid_t busmgr_id; /* ID of this bus' bus manager */ - /* this nodes state */ - unsigned in_bus_reset:1; - unsigned is_shutdown:1; + /* this nodes state */ + unsigned in_bus_reset:1; + unsigned is_shutdown:1; unsigned resume_packet_sent:1; - /* this nodes' duties on the bus */ - unsigned is_root:1; - unsigned is_cycmst:1; - unsigned is_irm:1; - unsigned is_busmgr:1; + /* this nodes' duties on the bus */ + unsigned is_root:1; + unsigned is_cycmst:1; + unsigned is_irm:1; + unsigned is_busmgr:1; - int reset_retries; - quadlet_t *topology_map; - u8 *speed_map; - struct csr_control csr; + int reset_retries; + quadlet_t *topology_map; + u8 *speed_map; + struct csr_control csr; /* Per node tlabel pool allocation */ struct hpsb_tlabel_pool tpool[64]; - struct hpsb_host_driver *driver; + struct hpsb_host_driver *driver; struct pci_dev *pdev; @@ -77,34 +77,34 @@ struct hpsb_host { enum devctl_cmd { - /* Host is requested to reset its bus and cancel all outstanding async - * requests. If arg == 1, it shall also attempt to become root on the - * bus. Return void. */ - RESET_BUS, - - /* Arg is void, return value is the hardware cycle counter value. */ - GET_CYCLE_COUNTER, - - /* Set the hardware cycle counter to the value in arg, return void. - * FIXME - setting is probably not required. */ - SET_CYCLE_COUNTER, - - /* Configure hardware for new bus ID in arg, return void. */ - SET_BUS_ID, - - /* If arg true, start sending cycle start packets, stop if arg == 0. - * Return void. */ - ACT_CYCLE_MASTER, - - /* Cancel all outstanding async requests without resetting the bus. - * Return void. */ - CANCEL_REQUESTS, - - /* Start or stop receiving isochronous channel in arg. Return void. - * This acts as an optimization hint, hosts are not required not to - * listen on unrequested channels. */ - ISO_LISTEN_CHANNEL, - ISO_UNLISTEN_CHANNEL + /* Host is requested to reset its bus and cancel all outstanding async + * requests. If arg == 1, it shall also attempt to become root on the + * bus. Return void. */ + RESET_BUS, + + /* Arg is void, return value is the hardware cycle counter value. */ + GET_CYCLE_COUNTER, + + /* Set the hardware cycle counter to the value in arg, return void. + * FIXME - setting is probably not required. */ + SET_CYCLE_COUNTER, + + /* Configure hardware for new bus ID in arg, return void. */ + SET_BUS_ID, + + /* If arg true, start sending cycle start packets, stop if arg == 0. + * Return void. */ + ACT_CYCLE_MASTER, + + /* Cancel all outstanding async requests without resetting the bus. + * Return void. */ + CANCEL_REQUESTS, + + /* Start or stop receiving isochronous channel in arg. Return void. + * This acts as an optimization hint, hosts are not required not to + * listen on unrequested channels. */ + ISO_LISTEN_CHANNEL, + ISO_UNLISTEN_CHANNEL }; enum isoctl_cmd { @@ -135,13 +135,13 @@ enum isoctl_cmd { }; enum reset_types { - /* 166 microsecond reset -- only type of reset available on - non-1394a capable controllers */ - LONG_RESET, + /* 166 microsecond reset -- only type of reset available on + non-1394a capable controllers */ + LONG_RESET, - /* Short (arbitrated) reset -- only available on 1394a capable - controllers */ - SHORT_RESET, + /* Short (arbitrated) reset -- only available on 1394a capable + controllers */ + SHORT_RESET, /* Variants that set force_root before issueing the bus reset */ LONG_RESET_FORCE_ROOT, SHORT_RESET_FORCE_ROOT, @@ -159,22 +159,22 @@ struct hpsb_host_driver { * reads to the ConfigROM on its own. */ void (*set_hw_config_rom) (struct hpsb_host *host, quadlet_t *config_rom); - /* This function shall implement packet transmission based on - * packet->type. It shall CRC both parts of the packet (unless - * packet->type == raw) and do byte-swapping as necessary or instruct - * the hardware to do so. It can return immediately after the packet - * was queued for sending. After sending, hpsb_sent_packet() has to be - * called. Return 0 on success, negative errno on failure. - * NOTE: The function must be callable in interrupt context. - */ - int (*transmit_packet) (struct hpsb_host *host, - struct hpsb_packet *packet); - - /* This function requests miscellanous services from the driver, see - * above for command codes and expected actions. Return -1 for unknown - * command, though that should never happen. - */ - int (*devctl) (struct hpsb_host *host, enum devctl_cmd command, int arg); + /* This function shall implement packet transmission based on + * packet->type. It shall CRC both parts of the packet (unless + * packet->type == raw) and do byte-swapping as necessary or instruct + * the hardware to do so. It can return immediately after the packet + * was queued for sending. After sending, hpsb_sent_packet() has to be + * called. Return 0 on success, negative errno on failure. + * NOTE: The function must be callable in interrupt context. + */ + int (*transmit_packet) (struct hpsb_host *host, + struct hpsb_packet *packet); + + /* This function requests miscellanous services from the driver, see + * above for command codes and expected actions. Return -1 for unknown + * command, though that should never happen. + */ + int (*devctl) (struct hpsb_host *host, enum devctl_cmd command, int arg); /* ISO transmission/reception functions. Return 0 on success, -1 * (or -EXXX errno code) on failure. If the low-level driver does not @@ -182,15 +182,15 @@ struct hpsb_host_driver { */ int (*isoctl) (struct hpsb_iso *iso, enum isoctl_cmd command, unsigned long arg); - /* This function is mainly to redirect local CSR reads/locks to the iso - * management registers (bus manager id, bandwidth available, channels - * available) to the hardware registers in OHCI. reg is 0,1,2,3 for bus - * mgr, bwdth avail, ch avail hi, ch avail lo respectively (the same ids - * as OHCI uses). data and compare are the new data and expected data - * respectively, return value is the old value. - */ - quadlet_t (*hw_csr_reg) (struct hpsb_host *host, int reg, - quadlet_t data, quadlet_t compare); + /* This function is mainly to redirect local CSR reads/locks to the iso + * management registers (bus manager id, bandwidth available, channels + * available) to the hardware registers in OHCI. reg is 0,1,2,3 for bus + * mgr, bwdth avail, ch avail hi, ch avail lo respectively (the same ids + * as OHCI uses). data and compare are the new data and expected data + * respectively, return value is the old value. + */ + quadlet_t (*hw_csr_reg) (struct hpsb_host *host, int reg, + quadlet_t data, quadlet_t compare); }; diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c index ff8a40980607..64fbbb01d52a 100644 --- a/drivers/ieee1394/ieee1394_core.c +++ b/drivers/ieee1394/ieee1394_core.c @@ -179,34 +179,34 @@ void hpsb_free_packet(struct hpsb_packet *packet) int hpsb_reset_bus(struct hpsb_host *host, int type) { - if (!host->in_bus_reset) { - host->driver->devctl(host, RESET_BUS, type); - return 0; - } else { - return 1; - } + if (!host->in_bus_reset) { + host->driver->devctl(host, RESET_BUS, type); + return 0; + } else { + return 1; + } } int hpsb_bus_reset(struct hpsb_host *host) { - if (host->in_bus_reset) { - HPSB_NOTICE("%s called while bus reset already in progress", + if (host->in_bus_reset) { + HPSB_NOTICE("%s called while bus reset already in progress", __FUNCTION__); - return 1; - } + return 1; + } - abort_requests(host); - host->in_bus_reset = 1; - host->irm_id = -1; + abort_requests(host); + host->in_bus_reset = 1; + host->irm_id = -1; host->is_irm = 0; - host->busmgr_id = -1; + host->busmgr_id = -1; host->is_busmgr = 0; host->is_cycmst = 0; - host->node_count = 0; - host->selfid_count = 0; + host->node_count = 0; + host->selfid_count = 0; - return 0; + return 0; } @@ -216,47 +216,47 @@ int hpsb_bus_reset(struct hpsb_host *host) */ static int check_selfids(struct hpsb_host *host) { - int nodeid = -1; - int rest_of_selfids = host->selfid_count; - struct selfid *sid = (struct selfid *)host->topology_map; - struct ext_selfid *esid; - int esid_seq = 23; + int nodeid = -1; + int rest_of_selfids = host->selfid_count; + struct selfid *sid = (struct selfid *)host->topology_map; + struct ext_selfid *esid; + int esid_seq = 23; host->nodes_active = 0; - while (rest_of_selfids--) { - if (!sid->extended) { - nodeid++; - esid_seq = 0; + while (rest_of_selfids--) { + if (!sid->extended) { + nodeid++; + esid_seq = 0; - if (sid->phy_id != nodeid) { - HPSB_INFO("SelfIDs failed monotony check with " - "%d", sid->phy_id); - return 0; - } + if (sid->phy_id != nodeid) { + HPSB_INFO("SelfIDs failed monotony check with " + "%d", sid->phy_id); + return 0; + } if (sid->link_active) { host->nodes_active++; if (sid->contender) host->irm_id = LOCAL_BUS | sid->phy_id; } - } else { - esid = (struct ext_selfid *)sid; - - if ((esid->phy_id != nodeid) - || (esid->seq_nr != esid_seq)) { - HPSB_INFO("SelfIDs failed monotony check with " - "%d/%d", esid->phy_id, esid->seq_nr); - return 0; - } - esid_seq++; - } - sid++; - } - - esid = (struct ext_selfid *)(sid - 1); - while (esid->extended) { - if ((esid->porta == SELFID_PORT_PARENT) || + } else { + esid = (struct ext_selfid *)sid; + + if ((esid->phy_id != nodeid) + || (esid->seq_nr != esid_seq)) { + HPSB_INFO("SelfIDs failed monotony check with " + "%d/%d", esid->phy_id, esid->seq_nr); + return 0; + } + esid_seq++; + } + sid++; + } + + esid = (struct ext_selfid *)(sid - 1); + while (esid->extended) { + if ((esid->porta == SELFID_PORT_PARENT) || (esid->portb == SELFID_PORT_PARENT) || (esid->portc == SELFID_PORT_PARENT) || (esid->portd == SELFID_PORT_PARENT) || @@ -267,47 +267,47 @@ static int check_selfids(struct hpsb_host *host) HPSB_INFO("SelfIDs failed root check on " "extended SelfID"); return 0; - } - esid--; - } + } + esid--; + } - sid = (struct selfid *)esid; + sid = (struct selfid *)esid; if ((sid->port0 == SELFID_PORT_PARENT) || (sid->port1 == SELFID_PORT_PARENT) || (sid->port2 == SELFID_PORT_PARENT)) { HPSB_INFO("SelfIDs failed root check"); return 0; - } + } host->node_count = nodeid + 1; - return 1; + return 1; } static void build_speed_map(struct hpsb_host *host, int nodecount) { u8 speedcap[nodecount]; u8 cldcnt[nodecount]; - u8 *map = host->speed_map; - struct selfid *sid; - struct ext_selfid *esid; - int i, j, n; - - for (i = 0; i < (nodecount * 64); i += 64) { - for (j = 0; j < nodecount; j++) { - map[i+j] = IEEE1394_SPEED_MAX; - } - } - - for (i = 0; i < nodecount; i++) { - cldcnt[i] = 0; - } - - /* find direct children count and speed */ - for (sid = (struct selfid *)&host->topology_map[host->selfid_count-1], - n = nodecount - 1; - (void *)sid >= (void *)host->topology_map; sid--) { - if (sid->extended) { - esid = (struct ext_selfid *)sid; + u8 *map = host->speed_map; + struct selfid *sid; + struct ext_selfid *esid; + int i, j, n; + + for (i = 0; i < (nodecount * 64); i += 64) { + for (j = 0; j < nodecount; j++) { + map[i+j] = IEEE1394_SPEED_MAX; + } + } + + for (i = 0; i < nodecount; i++) { + cldcnt[i] = 0; + } + + /* find direct children count and speed */ + for (sid = (struct selfid *)&host->topology_map[host->selfid_count-1], + n = nodecount - 1; + (void *)sid >= (void *)host->topology_map; sid--) { + if (sid->extended) { + esid = (struct ext_selfid *)sid; if (esid->porta == SELFID_PORT_CHILD) cldcnt[n]++; if (esid->portb == SELFID_PORT_CHILD) cldcnt[n]++; @@ -322,50 +322,50 @@ static void build_speed_map(struct hpsb_host *host, int nodecount) if (sid->port1 == SELFID_PORT_CHILD) cldcnt[n]++; if (sid->port2 == SELFID_PORT_CHILD) cldcnt[n]++; - speedcap[n] = sid->speed; - n--; - } - } - - /* set self mapping */ - for (i = 0; i < nodecount; i++) { - map[64*i + i] = speedcap[i]; - } - - /* fix up direct children count to total children count; - * also fix up speedcaps for sibling and parent communication */ - for (i = 1; i < nodecount; i++) { - for (j = cldcnt[i], n = i - 1; j > 0; j--) { - cldcnt[i] += cldcnt[n]; - speedcap[n] = min(speedcap[n], speedcap[i]); - n -= cldcnt[n] + 1; - } - } - - for (n = 0; n < nodecount; n++) { - for (i = n - cldcnt[n]; i <= n; i++) { - for (j = 0; j < (n - cldcnt[n]); j++) { - map[j*64 + i] = map[i*64 + j] = - min(map[i*64 + j], speedcap[n]); - } - for (j = n + 1; j < nodecount; j++) { - map[j*64 + i] = map[i*64 + j] = - min(map[i*64 + j], speedcap[n]); - } - } - } + speedcap[n] = sid->speed; + n--; + } + } + + /* set self mapping */ + for (i = 0; i < nodecount; i++) { + map[64*i + i] = speedcap[i]; + } + + /* fix up direct children count to total children count; + * also fix up speedcaps for sibling and parent communication */ + for (i = 1; i < nodecount; i++) { + for (j = cldcnt[i], n = i - 1; j > 0; j--) { + cldcnt[i] += cldcnt[n]; + speedcap[n] = min(speedcap[n], speedcap[i]); + n -= cldcnt[n] + 1; + } + } + + for (n = 0; n < nodecount; n++) { + for (i = n - cldcnt[n]; i <= n; i++) { + for (j = 0; j < (n - cldcnt[n]); j++) { + map[j*64 + i] = map[i*64 + j] = + min(map[i*64 + j], speedcap[n]); + } + for (j = n + 1; j < nodecount; j++) { + map[j*64 + i] = map[i*64 + j] = + min(map[i*64 + j], speedcap[n]); + } + } + } } void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid) { - if (host->in_bus_reset) { - HPSB_VERBOSE("Including SelfID 0x%x", sid); - host->topology_map[host->selfid_count++] = sid; - } else { - HPSB_NOTICE("Spurious SelfID packet (0x%08x) received from bus %d", + if (host->in_bus_reset) { + HPSB_VERBOSE("Including SelfID 0x%x", sid); + host->topology_map[host->selfid_count++] = sid; + } else { + HPSB_NOTICE("Spurious SelfID packet (0x%08x) received from bus %d", sid, NODEID_TO_BUS(host->node_id)); - } + } } void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot) @@ -373,50 +373,50 @@ void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot) if (!host->in_bus_reset) HPSB_NOTICE("SelfID completion called outside of bus reset!"); - host->node_id = LOCAL_BUS | phyid; - host->is_root = isroot; + host->node_id = LOCAL_BUS | phyid; + host->is_root = isroot; - if (!check_selfids(host)) { - if (host->reset_retries++ < 20) { - /* selfid stage did not complete without error */ - HPSB_NOTICE("Error in SelfID stage, resetting"); + if (!check_selfids(host)) { + if (host->reset_retries++ < 20) { + /* selfid stage did not complete without error */ + HPSB_NOTICE("Error in SelfID stage, resetting"); host->in_bus_reset = 0; /* this should work from ohci1394 now... */ - hpsb_reset_bus(host, LONG_RESET); - return; - } else { - HPSB_NOTICE("Stopping out-of-control reset loop"); - HPSB_NOTICE("Warning - topology map and speed map will not be valid"); + hpsb_reset_bus(host, LONG_RESET); + return; + } else { + HPSB_NOTICE("Stopping out-of-control reset loop"); + HPSB_NOTICE("Warning - topology map and speed map will not be valid"); host->reset_retries = 0; - } - } else { + } + } else { host->reset_retries = 0; - build_speed_map(host, host->node_count); - } + build_speed_map(host, host->node_count); + } HPSB_VERBOSE("selfid_complete called with successful SelfID stage " "... irm_id: 0x%X node_id: 0x%X",host->irm_id,host->node_id); - /* irm_id is kept up to date by check_selfids() */ - if (host->irm_id == host->node_id) { - host->is_irm = 1; - } else { - host->is_busmgr = 0; - host->is_irm = 0; - } + /* irm_id is kept up to date by check_selfids() */ + if (host->irm_id == host->node_id) { + host->is_irm = 1; + } else { + host->is_busmgr = 0; + host->is_irm = 0; + } - if (isroot) { + if (isroot) { host->driver->devctl(host, ACT_CYCLE_MASTER, 1); host->is_cycmst = 1; } atomic_inc(&host->generation); host->in_bus_reset = 0; - highlevel_host_reset(host); + highlevel_host_reset(host); } void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet, - int ackcode) + int ackcode) { unsigned long flags; @@ -507,13 +507,13 @@ int hpsb_send_packet(struct hpsb_packet *packet) { struct hpsb_host *host = packet->host; - if (host->is_shutdown) + if (host->is_shutdown) return -EINVAL; if (host->in_bus_reset || (packet->generation != get_hpsb_generation(host))) - return -EAGAIN; + return -EAGAIN; - packet->state = hpsb_queued; + packet->state = hpsb_queued; /* This just seems silly to me */ WARN_ON(packet->no_waiter && packet->expect_response); @@ -527,42 +527,42 @@ int hpsb_send_packet(struct hpsb_packet *packet) skb_queue_tail(&host->pending_packet_queue, packet->skb); } - if (packet->node_id == host->node_id) { + if (packet->node_id == host->node_id) { /* it is a local request, so handle it locally */ - quadlet_t *data; - size_t size = packet->data_size + packet->header_size; + quadlet_t *data; + size_t size = packet->data_size + packet->header_size; - data = kmalloc(size, GFP_ATOMIC); - if (!data) { - HPSB_ERR("unable to allocate memory for concatenating header and data"); - return -ENOMEM; - } + data = kmalloc(size, GFP_ATOMIC); + if (!data) { + HPSB_ERR("unable to allocate memory for concatenating header and data"); + return -ENOMEM; + } - memcpy(data, packet->header, packet->header_size); + memcpy(data, packet->header, packet->header_size); - if (packet->data_size) + if (packet->data_size) memcpy(((u8*)data) + packet->header_size, packet->data, packet->data_size); - dump_packet("send packet local", packet->header, packet->header_size, -1); + dump_packet("send packet local", packet->header, packet->header_size, -1); - hpsb_packet_sent(host, packet, packet->expect_response ? ACK_PENDING : ACK_COMPLETE); - hpsb_packet_received(host, data, size, 0); + hpsb_packet_sent(host, packet, packet->expect_response ? ACK_PENDING : ACK_COMPLETE); + hpsb_packet_received(host, data, size, 0); - kfree(data); + kfree(data); - return 0; - } + return 0; + } - if (packet->type == hpsb_async && packet->node_id != ALL_NODES) { - packet->speed_code = - host->speed_map[NODEID_TO_NODE(host->node_id) * 64 - + NODEID_TO_NODE(packet->node_id)]; - } + if (packet->type == hpsb_async && packet->node_id != ALL_NODES) { + packet->speed_code = + host->speed_map[NODEID_TO_NODE(host->node_id) * 64 + + NODEID_TO_NODE(packet->node_id)]; + } - dump_packet("send packet", packet->header, packet->header_size, packet->speed_code); + dump_packet("send packet", packet->header, packet->header_size, packet->speed_code); - return host->driver->transmit_packet(host, packet); + return host->driver->transmit_packet(host, packet); } /* We could just use complete() directly as the packet complete @@ -590,81 +590,81 @@ int hpsb_send_packet_and_wait(struct hpsb_packet *packet) static void send_packet_nocare(struct hpsb_packet *packet) { - if (hpsb_send_packet(packet) < 0) { - hpsb_free_packet(packet); - } + if (hpsb_send_packet(packet) < 0) { + hpsb_free_packet(packet); + } } static void handle_packet_response(struct hpsb_host *host, int tcode, quadlet_t *data, size_t size) { - struct hpsb_packet *packet = NULL; + struct hpsb_packet *packet = NULL; struct sk_buff *skb; - int tcode_match = 0; - int tlabel; - unsigned long flags; + int tcode_match = 0; + int tlabel; + unsigned long flags; - tlabel = (data[0] >> 10) & 0x3f; + tlabel = (data[0] >> 10) & 0x3f; spin_lock_irqsave(&host->pending_packet_queue.lock, flags); skb_queue_walk(&host->pending_packet_queue, skb) { packet = (struct hpsb_packet *)skb->data; - if ((packet->tlabel == tlabel) - && (packet->node_id == (data[1] >> 16))){ - break; - } + if ((packet->tlabel == tlabel) + && (packet->node_id == (data[1] >> 16))){ + break; + } packet = NULL; - } + } if (packet == NULL) { - HPSB_DEBUG("unsolicited response packet received - no tlabel match"); - dump_packet("contents", data, 16, -1); + HPSB_DEBUG("unsolicited response packet received - no tlabel match"); + dump_packet("contents", data, 16, -1); spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); - return; - } + return; + } - switch (packet->tcode) { - case TCODE_WRITEQ: - case TCODE_WRITEB: - if (tcode != TCODE_WRITE_RESPONSE) + switch (packet->tcode) { + case TCODE_WRITEQ: + case TCODE_WRITEB: + if (tcode != TCODE_WRITE_RESPONSE) break; tcode_match = 1; memcpy(packet->header, data, 12); - break; - case TCODE_READQ: - if (tcode != TCODE_READQ_RESPONSE) + break; + case TCODE_READQ: + if (tcode != TCODE_READQ_RESPONSE) break; tcode_match = 1; memcpy(packet->header, data, 16); - break; - case TCODE_READB: - if (tcode != TCODE_READB_RESPONSE) + break; + case TCODE_READB: + if (tcode != TCODE_READB_RESPONSE) break; tcode_match = 1; BUG_ON(packet->skb->len - sizeof(*packet) < size - 16); memcpy(packet->header, data, 16); memcpy(packet->data, data + 4, size - 16); - break; - case TCODE_LOCK_REQUEST: - if (tcode != TCODE_LOCK_RESPONSE) + break; + case TCODE_LOCK_REQUEST: + if (tcode != TCODE_LOCK_RESPONSE) break; tcode_match = 1; size = min((size - 16), (size_t)8); BUG_ON(packet->skb->len - sizeof(*packet) < size); memcpy(packet->header, data, 16); memcpy(packet->data, data + 4, size); - break; - } + break; + } - if (!tcode_match) { + if (!tcode_match) { spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); - HPSB_INFO("unsolicited response packet received - tcode mismatch"); - dump_packet("contents", data, 16, -1); - return; - } + HPSB_INFO("unsolicited response packet received - tcode mismatch"); + dump_packet("contents", data, 16, -1); + return; + } __skb_unlink(skb, &host->pending_packet_queue); @@ -683,27 +683,27 @@ static void handle_packet_response(struct hpsb_host *host, int tcode, static struct hpsb_packet *create_reply_packet(struct hpsb_host *host, quadlet_t *data, size_t dsize) { - struct hpsb_packet *p; + struct hpsb_packet *p; - p = hpsb_alloc_packet(dsize); - if (unlikely(p == NULL)) { - /* FIXME - send data_error response */ - return NULL; - } + p = hpsb_alloc_packet(dsize); + if (unlikely(p == NULL)) { + /* FIXME - send data_error response */ + return NULL; + } - p->type = hpsb_async; - p->state = hpsb_unused; - p->host = host; - p->node_id = data[1] >> 16; - p->tlabel = (data[0] >> 10) & 0x3f; - p->no_waiter = 1; + p->type = hpsb_async; + p->state = hpsb_unused; + p->host = host; + p->node_id = data[1] >> 16; + p->tlabel = (data[0] >> 10) & 0x3f; + p->no_waiter = 1; p->generation = get_hpsb_generation(host); if (dsize % 4) p->data[dsize / 4] = 0; - return p; + return p; } #define PREP_ASYNC_HEAD_RCODE(tc) \ @@ -714,7 +714,7 @@ static struct hpsb_packet *create_reply_packet(struct hpsb_host *host, packet->header[2] = 0 static void fill_async_readquad_resp(struct hpsb_packet *packet, int rcode, - quadlet_t data) + quadlet_t data) { PREP_ASYNC_HEAD_RCODE(TCODE_READQ_RESPONSE); packet->header[3] = data; @@ -723,7 +723,7 @@ static void fill_async_readquad_resp(struct hpsb_packet *packet, int rcode, } static void fill_async_readblock_resp(struct hpsb_packet *packet, int rcode, - int length) + int length) { if (rcode != RCODE_COMPLETE) length = 0; @@ -743,7 +743,7 @@ static void fill_async_write_resp(struct hpsb_packet *packet, int rcode) } static void fill_async_lock_resp(struct hpsb_packet *packet, int rcode, int extcode, - int length) + int length) { if (rcode != RCODE_COMPLETE) length = 0; @@ -755,184 +755,184 @@ static void fill_async_lock_resp(struct hpsb_packet *packet, int rcode, int extc } #define PREP_REPLY_PACKET(length) \ - packet = create_reply_packet(host, data, length); \ - if (packet == NULL) break + packet = create_reply_packet(host, data, length); \ + if (packet == NULL) break static void handle_incoming_packet(struct hpsb_host *host, int tcode, quadlet_t *data, size_t size, int write_acked) { - struct hpsb_packet *packet; - int length, rcode, extcode; - quadlet_t buffer; - nodeid_t source = data[1] >> 16; - nodeid_t dest = data[0] >> 16; - u16 flags = (u16) data[0]; - u64 addr; - - /* big FIXME - no error checking is done for an out of bounds length */ - - switch (tcode) { - case TCODE_WRITEQ: - addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; - rcode = highlevel_write(host, source, dest, data+3, + struct hpsb_packet *packet; + int length, rcode, extcode; + quadlet_t buffer; + nodeid_t source = data[1] >> 16; + nodeid_t dest = data[0] >> 16; + u16 flags = (u16) data[0]; + u64 addr; + + /* big FIXME - no error checking is done for an out of bounds length */ + + switch (tcode) { + case TCODE_WRITEQ: + addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; + rcode = highlevel_write(host, source, dest, data+3, addr, 4, flags); - if (!write_acked - && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK) - && (rcode >= 0)) { - /* not a broadcast write, reply */ - PREP_REPLY_PACKET(0); - fill_async_write_resp(packet, rcode); - send_packet_nocare(packet); - } - break; - - case TCODE_WRITEB: - addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; - rcode = highlevel_write(host, source, dest, data+4, + if (!write_acked + && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK) + && (rcode >= 0)) { + /* not a broadcast write, reply */ + PREP_REPLY_PACKET(0); + fill_async_write_resp(packet, rcode); + send_packet_nocare(packet); + } + break; + + case TCODE_WRITEB: + addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; + rcode = highlevel_write(host, source, dest, data+4, addr, data[3]>>16, flags); - if (!write_acked - && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK) - && (rcode >= 0)) { - /* not a broadcast write, reply */ - PREP_REPLY_PACKET(0); - fill_async_write_resp(packet, rcode); - send_packet_nocare(packet); - } - break; - - case TCODE_READQ: - addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; - rcode = highlevel_read(host, source, &buffer, addr, 4, flags); - - if (rcode >= 0) { - PREP_REPLY_PACKET(0); - fill_async_readquad_resp(packet, rcode, buffer); - send_packet_nocare(packet); - } - break; - - case TCODE_READB: - length = data[3] >> 16; - PREP_REPLY_PACKET(length); - - addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; - rcode = highlevel_read(host, source, packet->data, addr, - length, flags); - - if (rcode >= 0) { - fill_async_readblock_resp(packet, rcode, length); - send_packet_nocare(packet); - } else { - hpsb_free_packet(packet); - } - break; - - case TCODE_LOCK_REQUEST: - length = data[3] >> 16; - extcode = data[3] & 0xffff; - addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; - - PREP_REPLY_PACKET(8); - - if ((extcode == 0) || (extcode >= 7)) { - /* let switch default handle error */ - length = 0; - } - - switch (length) { - case 4: - rcode = highlevel_lock(host, source, packet->data, addr, - data[4], 0, extcode,flags); - fill_async_lock_resp(packet, rcode, extcode, 4); - break; - case 8: - if ((extcode != EXTCODE_FETCH_ADD) - && (extcode != EXTCODE_LITTLE_ADD)) { - rcode = highlevel_lock(host, source, - packet->data, addr, - data[5], data[4], - extcode, flags); - fill_async_lock_resp(packet, rcode, extcode, 4); - } else { - rcode = highlevel_lock64(host, source, - (octlet_t *)packet->data, addr, - *(octlet_t *)(data + 4), 0ULL, - extcode, flags); - fill_async_lock_resp(packet, rcode, extcode, 8); - } - break; - case 16: - rcode = highlevel_lock64(host, source, - (octlet_t *)packet->data, addr, - *(octlet_t *)(data + 6), - *(octlet_t *)(data + 4), - extcode, flags); - fill_async_lock_resp(packet, rcode, extcode, 8); - break; - default: - rcode = RCODE_TYPE_ERROR; - fill_async_lock_resp(packet, rcode, - extcode, 0); - } - - if (rcode >= 0) { - send_packet_nocare(packet); - } else { - hpsb_free_packet(packet); - } - break; - } + if (!write_acked + && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK) + && (rcode >= 0)) { + /* not a broadcast write, reply */ + PREP_REPLY_PACKET(0); + fill_async_write_resp(packet, rcode); + send_packet_nocare(packet); + } + break; + + case TCODE_READQ: + addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; + rcode = highlevel_read(host, source, &buffer, addr, 4, flags); + + if (rcode >= 0) { + PREP_REPLY_PACKET(0); + fill_async_readquad_resp(packet, rcode, buffer); + send_packet_nocare(packet); + } + break; + + case TCODE_READB: + length = data[3] >> 16; + PREP_REPLY_PACKET(length); + + addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; + rcode = highlevel_read(host, source, packet->data, addr, + length, flags); + + if (rcode >= 0) { + fill_async_readblock_resp(packet, rcode, length); + send_packet_nocare(packet); + } else { + hpsb_free_packet(packet); + } + break; + + case TCODE_LOCK_REQUEST: + length = data[3] >> 16; + extcode = data[3] & 0xffff; + addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; + + PREP_REPLY_PACKET(8); + + if ((extcode == 0) || (extcode >= 7)) { + /* let switch default handle error */ + length = 0; + } + + switch (length) { + case 4: + rcode = highlevel_lock(host, source, packet->data, addr, + data[4], 0, extcode,flags); + fill_async_lock_resp(packet, rcode, extcode, 4); + break; + case 8: + if ((extcode != EXTCODE_FETCH_ADD) + && (extcode != EXTCODE_LITTLE_ADD)) { + rcode = highlevel_lock(host, source, + packet->data, addr, + data[5], data[4], + extcode, flags); + fill_async_lock_resp(packet, rcode, extcode, 4); + } else { + rcode = highlevel_lock64(host, source, + (octlet_t *)packet->data, addr, + *(octlet_t *)(data + 4), 0ULL, + extcode, flags); + fill_async_lock_resp(packet, rcode, extcode, 8); + } + break; + case 16: + rcode = highlevel_lock64(host, source, + (octlet_t *)packet->data, addr, + *(octlet_t *)(data + 6), + *(octlet_t *)(data + 4), + extcode, flags); + fill_async_lock_resp(packet, rcode, extcode, 8); + break; + default: + rcode = RCODE_TYPE_ERROR; + fill_async_lock_resp(packet, rcode, + extcode, 0); + } + + if (rcode >= 0) { + send_packet_nocare(packet); + } else { + hpsb_free_packet(packet); + } + break; + } } #undef PREP_REPLY_PACKET void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size, - int write_acked) + int write_acked) { - int tcode; - - if (host->in_bus_reset) { - HPSB_INFO("received packet during reset; ignoring"); - return; - } - - dump_packet("received packet", data, size, -1); - - tcode = (data[0] >> 4) & 0xf; - - switch (tcode) { - case TCODE_WRITE_RESPONSE: - case TCODE_READQ_RESPONSE: - case TCODE_READB_RESPONSE: - case TCODE_LOCK_RESPONSE: - handle_packet_response(host, tcode, data, size); - break; - - case TCODE_WRITEQ: - case TCODE_WRITEB: - case TCODE_READQ: - case TCODE_READB: - case TCODE_LOCK_REQUEST: - handle_incoming_packet(host, tcode, data, size, write_acked); - break; - - - case TCODE_ISO_DATA: - highlevel_iso_receive(host, data, size); - break; - - case TCODE_CYCLE_START: - /* simply ignore this packet if it is passed on */ - break; - - default: - HPSB_NOTICE("received packet with bogus transaction code %d", - tcode); - break; - } + int tcode; + + if (host->in_bus_reset) { + HPSB_INFO("received packet during reset; ignoring"); + return; + } + + dump_packet("received packet", data, size, -1); + + tcode = (data[0] >> 4) & 0xf; + + switch (tcode) { + case TCODE_WRITE_RESPONSE: + case TCODE_READQ_RESPONSE: + case TCODE_READB_RESPONSE: + case TCODE_LOCK_RESPONSE: + handle_packet_response(host, tcode, data, size); + break; + + case TCODE_WRITEQ: + case TCODE_WRITEB: + case TCODE_READQ: + case TCODE_READB: + case TCODE_LOCK_REQUEST: + handle_incoming_packet(host, tcode, data, size, write_acked); + break; + + + case TCODE_ISO_DATA: + highlevel_iso_receive(host, data, size); + break; + + case TCODE_CYCLE_START: + /* simply ignore this packet if it is passed on */ + break; + + default: + HPSB_NOTICE("received packet with bogus transaction code %d", + tcode); + break; + } } @@ -1126,7 +1126,7 @@ static int __init ieee1394_init(void) nodemgr implements functionality required of ieee1394a-2000 IRMs */ hpsb_disable_irm = 1; - + return 0; } diff --git a/drivers/ieee1394/ieee1394_core.h b/drivers/ieee1394/ieee1394_core.h index 0b31429d0a68..b35466023f00 100644 --- a/drivers/ieee1394/ieee1394_core.h +++ b/drivers/ieee1394/ieee1394_core.h @@ -10,8 +10,8 @@ struct hpsb_packet { - /* This struct is basically read-only for hosts with the exception of - * the data buffer contents and xnext - see below. */ + /* This struct is basically read-only for hosts with the exception of + * the data buffer contents and xnext - see below. */ /* This can be used for host driver internal linking. * @@ -21,47 +21,47 @@ struct hpsb_packet { * driver_list when free'ing it. */ struct list_head driver_list; - nodeid_t node_id; + nodeid_t node_id; - /* Async and Iso types should be clear, raw means send-as-is, do not - * CRC! Byte swapping shall still be done in this case. */ - enum { hpsb_async, hpsb_iso, hpsb_raw } __attribute__((packed)) type; + /* Async and Iso types should be clear, raw means send-as-is, do not + * CRC! Byte swapping shall still be done in this case. */ + enum { hpsb_async, hpsb_iso, hpsb_raw } __attribute__((packed)) type; - /* Okay, this is core internal and a no care for hosts. - * queued = queued for sending - * pending = sent, waiting for response - * complete = processing completed, successful or not - */ - enum { - hpsb_unused, hpsb_queued, hpsb_pending, hpsb_complete - } __attribute__((packed)) state; + /* Okay, this is core internal and a no care for hosts. + * queued = queued for sending + * pending = sent, waiting for response + * complete = processing completed, successful or not + */ + enum { + hpsb_unused, hpsb_queued, hpsb_pending, hpsb_complete + } __attribute__((packed)) state; - /* These are core internal. */ - signed char tlabel; + /* These are core internal. */ + signed char tlabel; signed char ack_code; unsigned char tcode; - unsigned expect_response:1; - unsigned no_waiter:1; + unsigned expect_response:1; + unsigned no_waiter:1; - /* Speed to transmit with: 0 = 100Mbps, 1 = 200Mbps, 2 = 400Mbps */ - unsigned speed_code:2; + /* Speed to transmit with: 0 = 100Mbps, 1 = 200Mbps, 2 = 400Mbps */ + unsigned speed_code:2; - /* - * *header and *data are guaranteed to be 32-bit DMAable and may be - * overwritten to allow in-place byte swapping. Neither of these is - * CRCed (the sizes also don't include CRC), but contain space for at - * least one additional quadlet to allow in-place CRCing. The memory is - * also guaranteed to be DMA mappable. - */ - quadlet_t *header; - quadlet_t *data; - size_t header_size; - size_t data_size; + /* + * *header and *data are guaranteed to be 32-bit DMAable and may be + * overwritten to allow in-place byte swapping. Neither of these is + * CRCed (the sizes also don't include CRC), but contain space for at + * least one additional quadlet to allow in-place CRCing. The memory is + * also guaranteed to be DMA mappable. + */ + quadlet_t *header; + quadlet_t *data; + size_t header_size; + size_t data_size; - struct hpsb_host *host; - unsigned int generation; + struct hpsb_host *host; + unsigned int generation; atomic_t refcnt; @@ -73,10 +73,10 @@ struct hpsb_packet { /* XXX This is just a hack at the moment */ struct sk_buff *skb; - /* Store jiffies for implementing bus timeouts. */ - unsigned long sendtime; + /* Store jiffies for implementing bus timeouts. */ + unsigned long sendtime; - quadlet_t embedded_header[5]; + quadlet_t embedded_header[5]; }; /* Set a task for when a packet completes */ @@ -102,7 +102,7 @@ void hpsb_free_packet(struct hpsb_packet *packet); */ static inline unsigned int get_hpsb_generation(struct hpsb_host *host) { - return atomic_read(&host->generation); + return atomic_read(&host->generation); } /* @@ -157,7 +157,7 @@ void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot); * from within a transmit packet routine. */ void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet, - int ackcode); + int ackcode); /* * Hand over received packet to the core. The contents of data are expected to @@ -171,7 +171,7 @@ void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet, * packet type. */ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size, - int write_acked); + int write_acked); /* @@ -197,20 +197,20 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size, * Block 15 (240-255) reserved for drivers under development, etc. */ -#define IEEE1394_MAJOR 171 +#define IEEE1394_MAJOR 171 -#define IEEE1394_MINOR_BLOCK_RAW1394 0 -#define IEEE1394_MINOR_BLOCK_VIDEO1394 1 -#define IEEE1394_MINOR_BLOCK_DV1394 2 -#define IEEE1394_MINOR_BLOCK_AMDTP 3 +#define IEEE1394_MINOR_BLOCK_RAW1394 0 +#define IEEE1394_MINOR_BLOCK_VIDEO1394 1 +#define IEEE1394_MINOR_BLOCK_DV1394 2 +#define IEEE1394_MINOR_BLOCK_AMDTP 3 #define IEEE1394_MINOR_BLOCK_EXPERIMENTAL 15 -#define IEEE1394_CORE_DEV MKDEV(IEEE1394_MAJOR, 0) -#define IEEE1394_RAW1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16) -#define IEEE1394_VIDEO1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_VIDEO1394 * 16) -#define IEEE1394_DV1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16) -#define IEEE1394_AMDTP_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_AMDTP * 16) -#define IEEE1394_EXPERIMENTAL_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_EXPERIMENTAL * 16) +#define IEEE1394_CORE_DEV MKDEV(IEEE1394_MAJOR, 0) +#define IEEE1394_RAW1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16) +#define IEEE1394_VIDEO1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_VIDEO1394 * 16) +#define IEEE1394_DV1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16) +#define IEEE1394_AMDTP_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_AMDTP * 16) +#define IEEE1394_EXPERIMENTAL_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_EXPERIMENTAL * 16) /* return the index (within a minor number block) of a file */ static inline unsigned char ieee1394_file_to_instance(struct file *file) -- cgit v1.2.3 From 61c7f775ca25ccfc0e51486103a724fb1a3a08f2 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Mon, 5 Dec 2005 16:28:59 -0500 Subject: ieee1394: write broadcast_channel only to select nodes (fixes device recognition) Some old 1394-1995 SBP-2 bridges would hang if they received a broadcast write request to BROADCAST_CHANNEL before the config ROM was read. Affected devices include Datafab MD2-FW2 2.5" HDD and SmartDisk VST FWCDRW-V8 portable CD writer. The write request is now directed to specific nodes instead of being broadcast to all nodes at once, and it is only performed if a previous read request at this register succeeded. Fixes an old interoperability problem which was perceived as a 2.6.14-specific regression: http://marc.theaimsgroup.com/?t=113190586800003 Signed-off-by: Stefan Richter Signed-off-by: Jody McIntyre --- drivers/ieee1394/nodemgr.c | 42 ++++++++++++++++++++++++++++++++---------- 1 file changed, 32 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c index f4b6025fde61..01ab2bfa8d9d 100644 --- a/drivers/ieee1394/nodemgr.c +++ b/drivers/ieee1394/nodemgr.c @@ -1346,6 +1346,33 @@ static void nodemgr_update_pdrv(struct node_entry *ne) } +/* Write the BROADCAST_CHANNEL as per IEEE1394a 8.3.2.3.11 and 8.4.2.3. This + * seems like an optional service but in the end it is practically mandatory + * as a consequence of these clauses. + * + * Note that we cannot do a broadcast write to all nodes at once because some + * pre-1394a devices would hang. */ +static void nodemgr_irm_write_bc(struct node_entry *ne, int generation) +{ + const u64 bc_addr = (CSR_REGISTER_BASE | CSR_BROADCAST_CHANNEL); + quadlet_t bc_remote, bc_local; + int ret; + + if (!ne->host->is_irm || ne->generation != generation || + ne->nodeid == ne->host->node_id) + return; + + bc_local = cpu_to_be32(ne->host->csr.broadcast_channel); + + /* Check if the register is implemented and 1394a compliant. */ + ret = hpsb_read(ne->host, ne->nodeid, generation, bc_addr, &bc_remote, + sizeof(bc_remote)); + if (!ret && bc_remote & cpu_to_be32(0x80000000) && + bc_remote != bc_local) + hpsb_node_write(ne, bc_addr, &bc_local, sizeof(bc_local)); +} + + static void nodemgr_probe_ne(struct host_info *hi, struct node_entry *ne, int generation) { struct device *dev; @@ -1357,6 +1384,8 @@ static void nodemgr_probe_ne(struct host_info *hi, struct node_entry *ne, int ge if (!dev) return; + nodemgr_irm_write_bc(ne, generation); + /* If "needs_probe", then this is either a new or changed node we * rescan totally. If the generation matches for an existing node * (one that existed prior to the bus reset) we send update calls @@ -1429,9 +1458,7 @@ static int nodemgr_send_resume_packet(struct hpsb_host *host) return ret; } -/* Because we are a 1394a-2000 compliant IRM, we need to inform all the other - * nodes of the broadcast channel. (Really we're only setting the validity - * bit). Other IRM responsibilities go in here as well. */ +/* Perform a few high-level IRM responsibilities. */ static int nodemgr_do_irm_duties(struct hpsb_host *host, int cycles) { quadlet_t bc; @@ -1440,13 +1467,8 @@ static int nodemgr_do_irm_duties(struct hpsb_host *host, int cycles) if (!host->is_irm || host->irm_id == (nodeid_t)-1) return 1; - host->csr.broadcast_channel |= 0x40000000; /* set validity bit */ - - bc = cpu_to_be32(host->csr.broadcast_channel); - - hpsb_write(host, LOCAL_BUS | ALL_NODES, get_hpsb_generation(host), - (CSR_REGISTER_BASE | CSR_BROADCAST_CHANNEL), - &bc, sizeof(quadlet_t)); + /* We are a 1394a-2000 compliant IRM. Set the validity bit. */ + host->csr.broadcast_channel |= 0x40000000; /* If there is no bus manager then we should set the root node's * force_root bit to promote bus stability per the 1394 -- cgit v1.2.3 From e38dc0ae24635a2a8a68d87cd0f4a13e74a52d98 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Mon, 5 Dec 2005 16:29:02 -0500 Subject: ieee1394: remove nonexistent functions from nodemgr.h Signed-off-by: Stefan Richter Signed-off-by: Jody McIntyre --- drivers/ieee1394/nodemgr.h | 18 ------------------ 1 file changed, 18 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h index 3a2f0c02fd08..0b26616e16c3 100644 --- a/drivers/ieee1394/nodemgr.h +++ b/drivers/ieee1394/nodemgr.h @@ -150,24 +150,6 @@ static inline int hpsb_node_entry_valid(struct node_entry *ne) return ne->generation == get_hpsb_generation(ne->host); } -/* - * Returns a node entry (which has its reference count incremented) or NULL if - * the GUID in question is not known. Getting a valid entry does not mean that - * the node with this GUID is currently accessible (might be powered down). - */ -struct node_entry *hpsb_guid_get_entry(u64 guid); - -/* Same as above, but use the nodeid to get an node entry. This is not - * fool-proof by itself, since the nodeid can change. */ -struct node_entry *hpsb_nodeid_get_entry(struct hpsb_host *host, nodeid_t nodeid); - -/* - * If the entry refers to a local host, this function will return the pointer - * to the hpsb_host structure. It will return NULL otherwise. Once you have - * established it is a local host, you can use that knowledge from then on (the - * GUID won't wander to an external node). */ -struct hpsb_host *hpsb_get_host_by_ne(struct node_entry *ne); - /* * This will fill in the given, pre-initialised hpsb_packet with the current * information from the node entry (host, node ID, generation number). It will -- cgit v1.2.3 From 51c1d80e929bace26d2d795bd77fcc14b02ba3bb Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Mon, 12 Dec 2005 23:03:19 -0500 Subject: ieee1394: run high-level updates before high-level probes After a bus reset, let nodemgr call high-level update hooks first for nodes which do not need to be probed. The main benefit is for a bus with more than one SBP-2 device: SBP-2 reconnects will be performed before SBP-2 logins, thus have a much higher chance to succeed, and their SCSI devices will not be blocked much longer than necessary. This was demonstrated for Linux 2.4 by Dave Cinege a while ago. A better approach would be to perform time-consuming probes in parallel by a subthread. I actually plan to implement this for sbp2 but it may take a while to get that done and tested. Until then, this tweak is a huge improvement for users with multiple SBP-2 devices. Signed-off-by: Stefan Richter Signed-off-by: Jody McIntyre --- drivers/ieee1394/nodemgr.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c index 01ab2bfa8d9d..0ec298768621 100644 --- a/drivers/ieee1394/nodemgr.c +++ b/drivers/ieee1394/nodemgr.c @@ -1407,14 +1407,28 @@ static void nodemgr_node_probe(struct host_info *hi, int generation) struct hpsb_host *host = hi->host; struct class *class = &nodemgr_ne_class; struct class_device *cdev; + struct node_entry *ne; /* Do some processing of the nodes we've probed. This pulls them * into the sysfs layer if needed, and can result in processing of * unit-directories, or just updating the node and it's - * unit-directories. */ + * unit-directories. + * + * Run updates before probes. Usually, updates are time-critical + * while probes are time-consuming. (Well, those probes need some + * improvement...) */ + down_read(&class->subsys.rwsem); - list_for_each_entry(cdev, &class->children, node) - nodemgr_probe_ne(hi, container_of(cdev, struct node_entry, class_dev), generation); + list_for_each_entry(cdev, &class->children, node) { + ne = container_of(cdev, struct node_entry, class_dev); + if (!ne->needs_probe) + nodemgr_probe_ne(hi, ne, generation); + } + list_for_each_entry(cdev, &class->children, node) { + ne = container_of(cdev, struct node_entry, class_dev); + if (ne->needs_probe) + nodemgr_probe_ne(hi, ne, generation); + } up_read(&class->subsys.rwsem); -- cgit v1.2.3 From 43863eba763e0c91e33e342ce5b7650fea594a53 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Mon, 12 Dec 2005 23:03:24 -0500 Subject: sbp2: delete sbp2scsi_direction_table DMA_BIDIRECTIONAL data direction may be handled properly by Linux in the future. For now, reject it instead to convert it to another direction. Signed-off-by: Stefan Richter Signed-off-by: Jody McIntyre --- drivers/ieee1394/sbp2.c | 45 ++++++++++++++++----------------------------- drivers/ieee1394/sbp2.h | 40 +--------------------------------------- 2 files changed, 17 insertions(+), 68 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index f0763b797238..372a7726063c 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c @@ -1740,28 +1740,15 @@ static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id, command_orb->misc |= ORB_SET_SPEED(scsi_id->speed_code); command_orb->misc |= ORB_SET_NOTIFY(1); /* Notify us when complete */ - /* - * Get the direction of the transfer. If the direction is unknown, then use our - * goofy table as a back-up. - */ - switch (dma_dir) { - case DMA_NONE: + if (dma_dir == DMA_NONE) orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER; - break; - case DMA_TO_DEVICE: + else if (dma_dir == DMA_TO_DEVICE && scsi_request_bufflen) orb_direction = ORB_DIRECTION_WRITE_TO_MEDIA; - break; - case DMA_FROM_DEVICE: + else if (dma_dir == DMA_FROM_DEVICE && scsi_request_bufflen) orb_direction = ORB_DIRECTION_READ_FROM_MEDIA; - break; - case DMA_BIDIRECTIONAL: - default: - SBP2_ERR("SCSI data transfer direction not specified. " - "Update the SBP2 direction table in sbp2.h if " - "necessary for your application"); - __scsi_print_command(scsi_cmd); - orb_direction = sbp2scsi_direction_table[*scsi_cmd]; - break; + else { + SBP2_WARN("Falling back to DMA_NONE"); + orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER; } /* @@ -1880,16 +1867,6 @@ static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id, command_orb->misc |= ORB_SET_DATA_SIZE(scsi_request_bufflen); command_orb->misc |= ORB_SET_DIRECTION(orb_direction); - /* - * Sanity, in case our direction table is not - * up-to-date - */ - if (!scsi_request_bufflen) { - command_orb->data_descriptor_hi = 0x0; - command_orb->data_descriptor_lo = 0x0; - command_orb->misc |= ORB_SET_DIRECTION(1); - } - } else { /* * Need to turn this into page tables, since the @@ -2370,6 +2347,16 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt, goto done; } + /* + * Bidirectional commands are not yet implemented, + * and unknown transfer direction not handled. + */ + if (SCpnt->sc_data_direction == DMA_BIDIRECTIONAL) { + SBP2_ERR("Cannot handle DMA_BIDIRECTIONAL - rejecting command"); + result = DID_ERROR << 16; + goto done; + } + /* * Try and send our SCSI command */ diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h index abc647bae5b1..8e227c52b72f 100644 --- a/drivers/ieee1394/sbp2.h +++ b/drivers/ieee1394/sbp2.h @@ -260,45 +260,7 @@ struct sbp2_status_block { #define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 #define SBP2_MAX_UDS_PER_NODE 16 /* Maximum scsi devices per node */ #define SBP2_MAX_SECTORS 255 /* Max sectors supported */ - -/* - * SCSI direction table... - * (now used as a back-up in case the direction passed down from above is "unknown") - * - * DIN = IN data direction - * DOU = OUT data direction - * DNO = No data transfer - * DUN = Unknown data direction - * - * Opcode 0xec (Teac specific "opc execute") possibly should be DNO, - * but we'll change it when somebody reports a problem with this. - */ -#define DIN ORB_DIRECTION_READ_FROM_MEDIA -#define DOU ORB_DIRECTION_WRITE_TO_MEDIA -#define DNO ORB_DIRECTION_NO_DATA_TRANSFER -#define DUN DIN - -static unchar sbp2scsi_direction_table[0x100] = { - DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN, - DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN, - DIN,DUN,DIN,DIN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU, - DOU,DOU,DOU,DNO,DIN,DNO,DNO,DIN,DOU,DOU,DOU,DOU,DIN,DOU,DIN,DOU, - DOU,DOU,DIN,DIN,DIN,DNO,DIN,DNO,DNO,DNO,DUN,DNO,DOU,DIN,DNO,DUN, - DUN,DIN,DIN,DNO,DNO,DOU,DUN,DUN,DNO,DIN,DIN,DNO,DIN,DOU,DUN,DUN, - DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN, - DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN, - DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN, - DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN, - DUN,DNO,DOU,DOU,DIN,DNO,DNO,DNO,DIN,DNO,DOU,DUN,DNO,DIN,DOU,DOU, - DOU,DOU,DOU,DNO,DUN,DIN,DOU,DIN,DIN,DIN,DNO,DNO,DNO,DIN,DIN,DUN, - DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN, - DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN, - DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN, - DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN -}; - -/* This should be safe */ -#define SBP2_MAX_CMDS 8 +#define SBP2_MAX_CMDS 8 /* This should be safe */ /* This is the two dma types we use for cmd_dma below */ enum cmd_dma_types { -- cgit v1.2.3 From dc3edd5412341b02d84144ddfd5bf6ccaaeeb1ac Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Mon, 12 Dec 2005 23:03:30 -0500 Subject: sbp2: did not clean up after scsi_add_device() failed If scsi_add_device() at the end of sbp2_start_device() fails, e.g. due to transport errors during SCSI inquiry, sbp2 needs to log out of the device and release all associated resources. Signed-off-by: Stefan Richter Signed-off-by: Jody McIntyre --- drivers/ieee1394/sbp2.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index 372a7726063c..5b9d03e93514 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c @@ -960,6 +960,8 @@ alloc_fail: error = scsi_add_device(scsi_id->scsi_host, 0, scsi_id->ud->id, 0); if (error) { SBP2_ERR("scsi_add_device failed"); + sbp2_logout_device(scsi_id); + sbp2_remove_device(scsi_id); return error; } -- cgit v1.2.3 From 209171a17a908605e516d11436371337a5d87f06 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Tue, 13 Dec 2005 11:05:00 -0500 Subject: ohci1394: log number of implemented isochronous contexts Print the number of IR and IT contexts which a hardware implements as an informational log message when ohci1394 initializes. Signed-off-by: Stefan Richter Signed-off-by: Jody McIntyre --- drivers/ieee1394/ohci1394.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c index 97b6f48033c4..b6b96fa04d62 100644 --- a/drivers/ieee1394/ohci1394.c +++ b/drivers/ieee1394/ohci1394.c @@ -584,12 +584,13 @@ static void ohci_initialize(struct ti_ohci *ohci) sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq)); #endif PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s] " - "MMIO=[%lx-%lx] Max Packet=[%d]", + "MMIO=[%lx-%lx] Max Packet=[%d] IR/IT contexts=[%d/%d]", ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10), ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf, pci_resource_start(ohci->dev, 0), pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1, - ohci->max_packet_size); + ohci->max_packet_size, + ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx); /* Check all of our ports to make sure that if anything is * connected, we enable that port. */ @@ -3351,13 +3352,8 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev, /* Determine the number of available IR and IT contexts. */ ohci->nb_iso_rcv_ctx = get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet); - DBGMSG("%d iso receive contexts available", - ohci->nb_iso_rcv_ctx); - ohci->nb_iso_xmit_ctx = get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet); - DBGMSG("%d iso transmit contexts available", - ohci->nb_iso_xmit_ctx); /* Set the usage bits for non-existent contexts so they can't * be allocated */ -- cgit v1.2.3 From cf8d2c0965b891a5efce8c3a9a07a522e91ddba2 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Tue, 13 Dec 2005 11:05:03 -0500 Subject: sbp2: split sbp2_create_command_orb() for better readability sbp2_create_command_orb() code cleanup: - add two helper functions to reduce nesting depth - omit the return value which was always ignored - remove unnecessary declaration from sb2.h Signed-off-by: Stefan Richter Signed-off-by: Jody McIntyre --- drivers/ieee1394/sbp2.c | 355 ++++++++++++++++++++++++------------------------ drivers/ieee1394/sbp2.h | 7 - 2 files changed, 178 insertions(+), 184 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index 5b9d03e93514..14b0c35ee9a4 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c @@ -1707,26 +1707,184 @@ static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait) return 0; } +static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb, + struct sbp2scsi_host_info *hi, + struct sbp2_command_info *command, + unsigned int scsi_use_sg, + struct scatterlist *sgpnt, + u32 orb_direction, + enum dma_data_direction dma_dir) +{ + command->dma_dir = dma_dir; + orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id); + orb->misc |= ORB_SET_DIRECTION(orb_direction); + + /* Special case if only one element (and less than 64KB in size) */ + if ((scsi_use_sg == 1) && + (sgpnt[0].length <= SBP2_MAX_SG_ELEMENT_LENGTH)) { + + SBP2_DEBUG("Only one s/g element"); + command->dma_size = sgpnt[0].length; + command->dma_type = CMD_DMA_PAGE; + command->cmd_dma = pci_map_page(hi->host->pdev, + sgpnt[0].page, + sgpnt[0].offset, + command->dma_size, + command->dma_dir); + SBP2_DMA_ALLOC("single page scatter element"); + + orb->data_descriptor_lo = command->cmd_dma; + orb->misc |= ORB_SET_DATA_SIZE(command->dma_size); + + } else { + struct sbp2_unrestricted_page_table *sg_element = + &command->scatter_gather_element[0]; + u32 sg_count, sg_len; + dma_addr_t sg_addr; + int i, count = pci_map_sg(hi->host->pdev, sgpnt, scsi_use_sg, + dma_dir); + + SBP2_DMA_ALLOC("scatter list"); + + command->dma_size = scsi_use_sg; + command->sge_buffer = sgpnt; + + /* use page tables (s/g) */ + orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1); + orb->data_descriptor_lo = command->sge_dma; + + /* + * Loop through and fill out our sbp-2 page tables + * (and split up anything too large) + */ + for (i = 0, sg_count = 0 ; i < count; i++, sgpnt++) { + sg_len = sg_dma_len(sgpnt); + sg_addr = sg_dma_address(sgpnt); + while (sg_len) { + sg_element[sg_count].segment_base_lo = sg_addr; + if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) { + sg_element[sg_count].length_segment_base_hi = + PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH); + sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH; + sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH; + } else { + sg_element[sg_count].length_segment_base_hi = + PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len); + sg_len = 0; + } + sg_count++; + } + } + + /* Number of page table (s/g) elements */ + orb->misc |= ORB_SET_DATA_SIZE(sg_count); + + sbp2util_packet_dump(sg_element, + (sizeof(struct sbp2_unrestricted_page_table)) * sg_count, + "sbp2 s/g list", command->sge_dma); + + /* Byte swap page tables if necessary */ + sbp2util_cpu_to_be32_buffer(sg_element, + (sizeof(struct sbp2_unrestricted_page_table)) * + sg_count); + } +} + +static void sbp2_prep_command_orb_no_sg(struct sbp2_command_orb *orb, + struct sbp2scsi_host_info *hi, + struct sbp2_command_info *command, + struct scatterlist *sgpnt, + u32 orb_direction, + unsigned int scsi_request_bufflen, + void *scsi_request_buffer, + enum dma_data_direction dma_dir) +{ + command->dma_dir = dma_dir; + command->dma_size = scsi_request_bufflen; + command->dma_type = CMD_DMA_SINGLE; + command->cmd_dma = pci_map_single(hi->host->pdev, scsi_request_buffer, + command->dma_size, command->dma_dir); + orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id); + orb->misc |= ORB_SET_DIRECTION(orb_direction); + + SBP2_DMA_ALLOC("single bulk"); + + /* + * Handle case where we get a command w/o s/g enabled (but + * check for transfers larger than 64K) + */ + if (scsi_request_bufflen <= SBP2_MAX_SG_ELEMENT_LENGTH) { + + orb->data_descriptor_lo = command->cmd_dma; + orb->misc |= ORB_SET_DATA_SIZE(scsi_request_bufflen); + + } else { + struct sbp2_unrestricted_page_table *sg_element = + &command->scatter_gather_element[0]; + u32 sg_count, sg_len; + dma_addr_t sg_addr; + + /* + * Need to turn this into page tables, since the + * buffer is too large. + */ + orb->data_descriptor_lo = command->sge_dma; + + /* Use page tables (s/g) */ + orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1); + + /* + * fill out our sbp-2 page tables (and split up + * the large buffer) + */ + sg_count = 0; + sg_len = scsi_request_bufflen; + sg_addr = command->cmd_dma; + while (sg_len) { + sg_element[sg_count].segment_base_lo = sg_addr; + if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) { + sg_element[sg_count].length_segment_base_hi = + PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH); + sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH; + sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH; + } else { + sg_element[sg_count].length_segment_base_hi = + PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len); + sg_len = 0; + } + sg_count++; + } + + /* Number of page table (s/g) elements */ + orb->misc |= ORB_SET_DATA_SIZE(sg_count); + + sbp2util_packet_dump(sg_element, + (sizeof(struct sbp2_unrestricted_page_table)) * sg_count, + "sbp2 s/g list", command->sge_dma); + + /* Byte swap page tables if necessary */ + sbp2util_cpu_to_be32_buffer(sg_element, + (sizeof(struct sbp2_unrestricted_page_table)) * + sg_count); + } +} + /* * This function is called to create the actual command orb and s/g list * out of the scsi command itself. */ -static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id, - struct sbp2_command_info *command, - unchar *scsi_cmd, - unsigned int scsi_use_sg, - unsigned int scsi_request_bufflen, - void *scsi_request_buffer, - enum dma_data_direction dma_dir) +static void sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id, + struct sbp2_command_info *command, + unchar *scsi_cmd, + unsigned int scsi_use_sg, + unsigned int scsi_request_bufflen, + void *scsi_request_buffer, + enum dma_data_direction dma_dir) { struct sbp2scsi_host_info *hi = scsi_id->hi; struct scatterlist *sgpnt = (struct scatterlist *)scsi_request_buffer; struct sbp2_command_orb *command_orb = &command->command_orb; - struct sbp2_unrestricted_page_table *scatter_gather_element = - &command->scatter_gather_element[0]; - u32 sg_count, sg_len, orb_direction; - dma_addr_t sg_addr; - int i; + u32 orb_direction; /* * Set-up our command ORB.. @@ -1753,186 +1911,29 @@ static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id, orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER; } - /* - * Set-up our pagetable stuff... unfortunately, this has become - * messier than I'd like. Need to clean this up a bit. ;-) - */ + /* Set-up our pagetable stuff */ if (orb_direction == ORB_DIRECTION_NO_DATA_TRANSFER) { - SBP2_DEBUG("No data transfer"); - - /* - * Handle no data transfer - */ command_orb->data_descriptor_hi = 0x0; command_orb->data_descriptor_lo = 0x0; command_orb->misc |= ORB_SET_DIRECTION(1); - } else if (scsi_use_sg) { - SBP2_DEBUG("Use scatter/gather"); - - /* - * Special case if only one element (and less than 64KB in size) - */ - if ((scsi_use_sg == 1) && (sgpnt[0].length <= SBP2_MAX_SG_ELEMENT_LENGTH)) { - - SBP2_DEBUG("Only one s/g element"); - command->dma_dir = dma_dir; - command->dma_size = sgpnt[0].length; - command->dma_type = CMD_DMA_PAGE; - command->cmd_dma = pci_map_page(hi->host->pdev, - sgpnt[0].page, - sgpnt[0].offset, - command->dma_size, - command->dma_dir); - SBP2_DMA_ALLOC("single page scatter element"); - - command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id); - command_orb->data_descriptor_lo = command->cmd_dma; - command_orb->misc |= ORB_SET_DATA_SIZE(command->dma_size); - command_orb->misc |= ORB_SET_DIRECTION(orb_direction); - - } else { - int count = pci_map_sg(hi->host->pdev, sgpnt, scsi_use_sg, dma_dir); - SBP2_DMA_ALLOC("scatter list"); - - command->dma_size = scsi_use_sg; - command->dma_dir = dma_dir; - command->sge_buffer = sgpnt; - - /* use page tables (s/g) */ - command_orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1); - command_orb->misc |= ORB_SET_DIRECTION(orb_direction); - command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id); - command_orb->data_descriptor_lo = command->sge_dma; - - /* - * Loop through and fill out our sbp-2 page tables - * (and split up anything too large) - */ - for (i = 0, sg_count = 0 ; i < count; i++, sgpnt++) { - sg_len = sg_dma_len(sgpnt); - sg_addr = sg_dma_address(sgpnt); - while (sg_len) { - scatter_gather_element[sg_count].segment_base_lo = sg_addr; - if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) { - scatter_gather_element[sg_count].length_segment_base_hi = - PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH); - sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH; - sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH; - } else { - scatter_gather_element[sg_count].length_segment_base_hi = - PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len); - sg_len = 0; - } - sg_count++; - } - } - - /* Number of page table (s/g) elements */ - command_orb->misc |= ORB_SET_DATA_SIZE(sg_count); - - sbp2util_packet_dump(scatter_gather_element, - (sizeof(struct sbp2_unrestricted_page_table)) * sg_count, - "sbp2 s/g list", command->sge_dma); - - /* - * Byte swap page tables if necessary - */ - sbp2util_cpu_to_be32_buffer(scatter_gather_element, - (sizeof(struct sbp2_unrestricted_page_table)) * - sg_count); - - } - + sbp2_prep_command_orb_sg(command_orb, hi, command, scsi_use_sg, + sgpnt, orb_direction, dma_dir); } else { - SBP2_DEBUG("No scatter/gather"); - - command->dma_dir = dma_dir; - command->dma_size = scsi_request_bufflen; - command->dma_type = CMD_DMA_SINGLE; - command->cmd_dma = - pci_map_single(hi->host->pdev, scsi_request_buffer, - command->dma_size, command->dma_dir); - SBP2_DMA_ALLOC("single bulk"); - - /* - * Handle case where we get a command w/o s/g enabled (but - * check for transfers larger than 64K) - */ - if (scsi_request_bufflen <= SBP2_MAX_SG_ELEMENT_LENGTH) { - - command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id); - command_orb->data_descriptor_lo = command->cmd_dma; - command_orb->misc |= ORB_SET_DATA_SIZE(scsi_request_bufflen); - command_orb->misc |= ORB_SET_DIRECTION(orb_direction); - - } else { - /* - * Need to turn this into page tables, since the - * buffer is too large. - */ - command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id); - command_orb->data_descriptor_lo = command->sge_dma; - - /* Use page tables (s/g) */ - command_orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1); - command_orb->misc |= ORB_SET_DIRECTION(orb_direction); - - /* - * fill out our sbp-2 page tables (and split up - * the large buffer) - */ - sg_count = 0; - sg_len = scsi_request_bufflen; - sg_addr = command->cmd_dma; - while (sg_len) { - scatter_gather_element[sg_count].segment_base_lo = sg_addr; - if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) { - scatter_gather_element[sg_count].length_segment_base_hi = - PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH); - sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH; - sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH; - } else { - scatter_gather_element[sg_count].length_segment_base_hi = - PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len); - sg_len = 0; - } - sg_count++; - } - - /* Number of page table (s/g) elements */ - command_orb->misc |= ORB_SET_DATA_SIZE(sg_count); - - sbp2util_packet_dump(scatter_gather_element, - (sizeof(struct sbp2_unrestricted_page_table)) * sg_count, - "sbp2 s/g list", command->sge_dma); - - /* - * Byte swap page tables if necessary - */ - sbp2util_cpu_to_be32_buffer(scatter_gather_element, - (sizeof(struct sbp2_unrestricted_page_table)) * - sg_count); - - } - + sbp2_prep_command_orb_no_sg(command_orb, hi, command, sgpnt, + orb_direction, scsi_request_bufflen, + scsi_request_buffer, dma_dir); } - /* - * Byte swap command ORB if necessary - */ + /* Byte swap command ORB if necessary */ sbp2util_cpu_to_be32_buffer(command_orb, sizeof(struct sbp2_command_orb)); - /* - * Put our scsi command in the command ORB - */ + /* Put our scsi command in the command ORB */ memset(command_orb->cdb, 0, 12); memcpy(command_orb->cdb, scsi_cmd, COMMAND_SIZE(*scsi_cmd)); - - return 0; } /* diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h index 8e227c52b72f..900ea1d25e71 100644 --- a/drivers/ieee1394/sbp2.h +++ b/drivers/ieee1394/sbp2.h @@ -410,13 +410,6 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id); static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int destid, quadlet_t *data, u64 addr, size_t length, u16 flags); static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait); -static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id, - struct sbp2_command_info *command, - unchar *scsi_cmd, - unsigned int scsi_use_sg, - unsigned int scsi_request_bufflen, - void *scsi_request_buffer, - enum dma_data_direction dma_dir); static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id, struct sbp2_command_info *command); static int sbp2_send_command(struct scsi_id_instance_data *scsi_id, -- cgit v1.2.3 From eaceec7f6cc5223d0f146086884d67746b8aa81d Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Tue, 13 Dec 2005 11:05:05 -0500 Subject: sbp2: remove duplicate code from sbp2_start_device() Use sbp2_remove_device() to free FIFO and ORB DMAs in a failure case. Signed-off-by: Stefan Richter Signed-off-by: Jody McIntyre --- drivers/ieee1394/sbp2.c | 57 ++++++------------------------------------------- 1 file changed, 7 insertions(+), 50 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index 14b0c35ee9a4..18d7eda38851 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c @@ -856,56 +856,8 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id) pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_login_orb), &scsi_id->login_orb_dma); - if (!scsi_id->login_orb) { -alloc_fail: - if (scsi_id->query_logins_response) { - pci_free_consistent(hi->host->pdev, - sizeof(struct sbp2_query_logins_response), - scsi_id->query_logins_response, - scsi_id->query_logins_response_dma); - SBP2_DMA_FREE("query logins response DMA"); - } - - if (scsi_id->query_logins_orb) { - pci_free_consistent(hi->host->pdev, - sizeof(struct sbp2_query_logins_orb), - scsi_id->query_logins_orb, - scsi_id->query_logins_orb_dma); - SBP2_DMA_FREE("query logins ORB DMA"); - } - - if (scsi_id->logout_orb) { - pci_free_consistent(hi->host->pdev, - sizeof(struct sbp2_logout_orb), - scsi_id->logout_orb, - scsi_id->logout_orb_dma); - SBP2_DMA_FREE("logout ORB DMA"); - } - - if (scsi_id->reconnect_orb) { - pci_free_consistent(hi->host->pdev, - sizeof(struct sbp2_reconnect_orb), - scsi_id->reconnect_orb, - scsi_id->reconnect_orb_dma); - SBP2_DMA_FREE("reconnect ORB DMA"); - } - - if (scsi_id->login_response) { - pci_free_consistent(hi->host->pdev, - sizeof(struct sbp2_login_response), - scsi_id->login_response, - scsi_id->login_response_dma); - SBP2_DMA_FREE("login FIFO DMA"); - } - - list_del(&scsi_id->scsi_list); - - kfree(scsi_id); - - SBP2_ERR("Could not allocate memory for scsi_id"); - - return -ENOMEM; - } + if (!scsi_id->login_orb) + goto alloc_fail; SBP2_DMA_ALLOC("consistent DMA region for login ORB"); SBP2_DEBUG("New SBP-2 device inserted, SCSI ID = %x", scsi_id->ud->id); @@ -966,6 +918,11 @@ alloc_fail: } return 0; + +alloc_fail: + SBP2_ERR("Could not allocate memory for scsi_id"); + sbp2_remove_device(scsi_id); + return -ENOMEM; } /* -- cgit v1.2.3 From 994fc28c7b1e697ac56befe4aecabf23f0689f46 Mon Sep 17 00:00:00 2001 From: Zach Brown Date: Thu, 15 Dec 2005 14:28:17 -0800 Subject: [PATCH] add AOP_TRUNCATED_PAGE, prepend AOP_ to WRITEPAGE_ACTIVATE readpage(), prepare_write(), and commit_write() callers are updated to understand the special return code AOP_TRUNCATED_PAGE in the style of writepage() and WRITEPAGE_ACTIVATE. AOP_TRUNCATED_PAGE tells the caller that the callee has unlocked the page and that the operation should be tried again with a new page. OCFS2 uses this to detect and work around a lock inversion in its aop methods. There should be no change in behaviour for methods that don't return AOP_TRUNCATED_PAGE. WRITEPAGE_ACTIVATE is also prepended with AOP_ for consistency and they are made enums so that kerneldoc can be used to document their semantics. Signed-off-by: Zach Brown --- drivers/block/loop.c | 23 ++++++++++++++++++----- drivers/block/rd.c | 4 ++-- 2 files changed, 20 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 96c664af8d06..a452b13620a2 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -213,7 +213,7 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec, struct address_space_operations *aops = mapping->a_ops; pgoff_t index; unsigned offset, bv_offs; - int len, ret = 0; + int len, ret; down(&mapping->host->i_sem); index = pos >> PAGE_CACHE_SHIFT; @@ -232,9 +232,15 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec, page = grab_cache_page(mapping, index); if (unlikely(!page)) goto fail; - if (unlikely(aops->prepare_write(file, page, offset, - offset + size))) + ret = aops->prepare_write(file, page, offset, + offset + size); + if (unlikely(ret)) { + if (ret == AOP_TRUNCATED_PAGE) { + page_cache_release(page); + continue; + } goto unlock; + } transfer_result = lo_do_transfer(lo, WRITE, page, offset, bvec->bv_page, bv_offs, size, IV); if (unlikely(transfer_result)) { @@ -251,9 +257,15 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec, kunmap_atomic(kaddr, KM_USER0); } flush_dcache_page(page); - if (unlikely(aops->commit_write(file, page, offset, - offset + size))) + ret = aops->commit_write(file, page, offset, + offset + size); + if (unlikely(ret)) { + if (ret == AOP_TRUNCATED_PAGE) { + page_cache_release(page); + continue; + } goto unlock; + } if (unlikely(transfer_result)) goto unlock; bv_offs += size; @@ -264,6 +276,7 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec, unlock_page(page); page_cache_release(page); } + ret = 0; out: up(&mapping->host->i_sem); return ret; diff --git a/drivers/block/rd.c b/drivers/block/rd.c index 68c60a5bcdab..ffd6abd6d5a0 100644 --- a/drivers/block/rd.c +++ b/drivers/block/rd.c @@ -154,7 +154,7 @@ static int ramdisk_commit_write(struct file *file, struct page *page, /* * ->writepage to the the blockdev's mapping has to redirty the page so that the - * VM doesn't go and steal it. We return WRITEPAGE_ACTIVATE so that the VM + * VM doesn't go and steal it. We return AOP_WRITEPAGE_ACTIVATE so that the VM * won't try to (pointlessly) write the page again for a while. * * Really, these pages should not be on the LRU at all. @@ -165,7 +165,7 @@ static int ramdisk_writepage(struct page *page, struct writeback_control *wbc) make_page_uptodate(page); SetPageDirty(page); if (wbc->for_reclaim) - return WRITEPAGE_ACTIVATE; + return AOP_WRITEPAGE_ACTIVATE; unlock_page(page); return 0; } -- cgit v1.2.3 From 8ffdc6550c47f75ca4e6c9f30a2a89063e035cf2 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 6 Jan 2006 09:49:03 +0100 Subject: [BLOCK] add @uptodate to end_that_request_last() and @error to rq_end_io_fn() add @uptodate argument to end_that_request_last() and @error to rq_end_io_fn(). there's no generic way to pass error code to request completion function, making generic error handling of non-fs request difficult (rq->errors is driver-specific and each driver uses it differently). this patch adds @uptodate to end_that_request_last() and @error to rq_end_io_fn(). for fs requests, this doesn't really matter, so just using the same uptodate argument used in the last call to end_that_request_first() should suffice. imho, this can also help the generic command-carrying request jens is working on. Signed-off-by: tejun heo Signed-Off-By: Jens Axboe --- drivers/block/DAC960.c | 2 +- drivers/block/cciss.c | 2 +- drivers/block/cpqarray.c | 2 +- drivers/block/floppy.c | 2 +- drivers/block/nbd.c | 2 +- drivers/block/sx8.c | 2 +- drivers/block/ub.c | 2 +- drivers/block/viodasd.c | 2 +- drivers/cdrom/cdu31a.c | 2 +- drivers/ide/ide-cd.c | 4 ++-- drivers/ide/ide-io.c | 6 +++--- drivers/message/i2o/i2o_block.c | 2 +- drivers/mmc/mmc_block.c | 4 ++-- drivers/s390/block/dasd.c | 2 +- drivers/s390/char/tape_block.c | 2 +- drivers/scsi/ide-scsi.c | 4 ++-- drivers/scsi/scsi_lib.c | 2 +- drivers/scsi/sd.c | 2 +- 18 files changed, 23 insertions(+), 23 deletions(-) (limited to 'drivers') diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 70eaa5c7ac08..21097a39a057 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -3471,7 +3471,7 @@ static inline boolean DAC960_ProcessCompletedRequest(DAC960_Command_T *Command, if (!end_that_request_first(Request, UpToDate, Command->BlockCount)) { - end_that_request_last(Request); + end_that_request_last(Request, UpToDate); if (Command->Completion) { complete(Command->Completion); diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index c3441b3f086e..d2815b7a9150 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -2310,7 +2310,7 @@ static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd, printk("Done with %p\n", cmd->rq); #endif /* CCISS_DEBUG */ - end_that_request_last(cmd->rq); + end_that_request_last(cmd->rq, status ? 1 : -EIO); cmd_free(h,cmd,1); } diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index cf1822a6361c..9bddb6874873 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c @@ -1036,7 +1036,7 @@ static inline void complete_command(cmdlist_t *cmd, int timeout) complete_buffers(cmd->rq->bio, ok); DBGPX(printk("Done with %p\n", cmd->rq);); - end_that_request_last(cmd->rq); + end_that_request_last(cmd->rq, ok ? 1 : -EIO); } /* diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index f7e765a1d313..a5b857c5c4b8 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -2301,7 +2301,7 @@ static void floppy_end_request(struct request *req, int uptodate) add_disk_randomness(req->rq_disk); floppy_off((long)req->rq_disk->private_data); blkdev_dequeue_request(req); - end_that_request_last(req); + end_that_request_last(req, uptodate); /* We're done with the request */ current_req = NULL; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 9e268ddedfbd..485345c8e632 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -136,7 +136,7 @@ static void nbd_end_request(struct request *req) spin_lock_irqsave(q->queue_lock, flags); if (!end_that_request_first(req, uptodate, req->nr_sectors)) { - end_that_request_last(req); + end_that_request_last(req, uptodate); } spin_unlock_irqrestore(q->queue_lock, flags); } diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index 1ded3b433459..9251f4131b53 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c @@ -770,7 +770,7 @@ static inline void carm_end_request_queued(struct carm_host *host, rc = end_that_request_first(req, uptodate, req->hard_nr_sectors); assert(rc == 0); - end_that_request_last(req); + end_that_request_last(req, uptodate); rc = carm_put_request(host, crq); assert(rc == 0); diff --git a/drivers/block/ub.c b/drivers/block/ub.c index 10740a065088..a05fe5843e6c 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -951,7 +951,7 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) static void ub_end_rq(struct request *rq, int uptodate) { end_that_request_first(rq, uptodate, rq->hard_nr_sectors); - end_that_request_last(rq); + end_that_request_last(rq, uptodate); } static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c index 2d518aa2720a..063f0304a163 100644 --- a/drivers/block/viodasd.c +++ b/drivers/block/viodasd.c @@ -305,7 +305,7 @@ static void viodasd_end_request(struct request *req, int uptodate, if (end_that_request_first(req, uptodate, num_sectors)) return; add_disk_randomness(req->rq_disk); - end_that_request_last(req); + end_that_request_last(req, uptodate); } /* diff --git a/drivers/cdrom/cdu31a.c b/drivers/cdrom/cdu31a.c index ac96de15d833..378e88d20757 100644 --- a/drivers/cdrom/cdu31a.c +++ b/drivers/cdrom/cdu31a.c @@ -1402,7 +1402,7 @@ static void do_cdu31a_request(request_queue_t * q) if (!end_that_request_first(req, 1, nblock)) { spin_lock_irq(q->queue_lock); blkdev_dequeue_request(req); - end_that_request_last(req); + end_that_request_last(req, 1); spin_unlock_irq(q->queue_lock); } continue; diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 70aeb3a60120..d31117eb95aa 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -614,7 +614,7 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate) */ spin_lock_irqsave(&ide_lock, flags); end_that_request_chunk(failed, 0, failed->data_len); - end_that_request_last(failed); + end_that_request_last(failed, 0); spin_unlock_irqrestore(&ide_lock, flags); } @@ -1735,7 +1735,7 @@ end_request: spin_lock_irqsave(&ide_lock, flags); blkdev_dequeue_request(rq); - end_that_request_last(rq); + end_that_request_last(rq, 1); HWGROUP(drive)->rq = NULL; spin_unlock_irqrestore(&ide_lock, flags); return ide_stopped; diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index ecfafcdafea4..8435b44a700b 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -89,7 +89,7 @@ int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate, blkdev_dequeue_request(rq); HWGROUP(drive)->rq = NULL; - end_that_request_last(rq); + end_that_request_last(rq, uptodate); ret = 0; } return ret; @@ -247,7 +247,7 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq) } blkdev_dequeue_request(rq); HWGROUP(drive)->rq = NULL; - end_that_request_last(rq); + end_that_request_last(rq, 1); spin_unlock_irqrestore(&ide_lock, flags); } @@ -379,7 +379,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) blkdev_dequeue_request(rq); HWGROUP(drive)->rq = NULL; rq->errors = err; - end_that_request_last(rq); + end_that_request_last(rq, !rq->errors); spin_unlock_irqrestore(&ide_lock, flags); } diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index f283b5bafdd3..4f522527b7ed 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c @@ -466,7 +466,7 @@ static void i2o_block_end_request(struct request *req, int uptodate, spin_lock_irqsave(q->queue_lock, flags); - end_that_request_last(req); + end_that_request_last(req, uptodate); if (likely(dev)) { dev->open_queue_depth--; diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c index abcf19116d70..8e380c14bf65 100644 --- a/drivers/mmc/mmc_block.c +++ b/drivers/mmc/mmc_block.c @@ -263,7 +263,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) */ add_disk_randomness(req->rq_disk); blkdev_dequeue_request(req); - end_that_request_last(req); + end_that_request_last(req, 1); } spin_unlock_irq(&md->lock); } while (ret); @@ -289,7 +289,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) add_disk_randomness(req->rq_disk); blkdev_dequeue_request(req); - end_that_request_last(req); + end_that_request_last(req, 0); spin_unlock_irq(&md->lock); return 0; diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 7008d32433bf..fdb61380c523 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1035,7 +1035,7 @@ dasd_end_request(struct request *req, int uptodate) if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) BUG(); add_disk_randomness(req->rq_disk); - end_that_request_last(req); + end_that_request_last(req, uptodate); } /* diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index 1efc9f21229e..559d51490e2f 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c @@ -78,7 +78,7 @@ tapeblock_end_request(struct request *req, int uptodate) { if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) BUG(); - end_that_request_last(req); + end_that_request_last(req, uptodate); } static void diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c index 4cb1f3ed9100..3c688ef54660 100644 --- a/drivers/scsi/ide-scsi.c +++ b/drivers/scsi/ide-scsi.c @@ -1046,7 +1046,7 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd) /* kill current request */ blkdev_dequeue_request(req); - end_that_request_last(req); + end_that_request_last(req, 0); if (req->flags & REQ_SENSE) kfree(scsi->pc->buffer); kfree(scsi->pc); @@ -1056,7 +1056,7 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd) /* now nuke the drive queue */ while ((req = elv_next_request(drive->queue))) { blkdev_dequeue_request(req); - end_that_request_last(req); + end_that_request_last(req, 0); } HWGROUP(drive)->rq = NULL; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index a7f3f0c84db7..53551f1dfe21 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -791,7 +791,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, spin_lock_irqsave(q->queue_lock, flags); if (blk_rq_tagged(req)) blk_queue_end_tag(q, req); - end_that_request_last(req); + end_that_request_last(req, uptodate); spin_unlock_irqrestore(q->queue_lock, flags); /* diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 3d3ad7d1b779..d651150ee76d 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -748,7 +748,7 @@ static void sd_end_flush(request_queue_t *q, struct request *flush_rq) * force journal abort of barriers */ end_that_request_first(rq, -EOPNOTSUPP, rq->hard_nr_sectors); - end_that_request_last(rq); + end_that_request_last(rq, -EOPNOTSUPP); } } -- cgit v1.2.3 From 461d4e90c8cd049718884cd17c955e231140d3be Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 6 Jan 2006 09:52:55 +0100 Subject: [BLOCK] update SCSI to use new blk_ordered for barriers All ordered request related stuff delegated to HLD. Midlayer now doens't deal with ordered setting or prepare_flush callback. sd.c updated to deal with blk_queue_ordered setting. Currently, ordered tag isn't used as SCSI midlayer cannot guarantee request ordering. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- drivers/scsi/hosts.c | 9 -------- drivers/scsi/scsi_lib.c | 46 --------------------------------------- drivers/scsi/sd.c | 58 +++++++++++++++++-------------------------------- 3 files changed, 20 insertions(+), 93 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 5b9c2c5a7f0e..66783c860a19 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -347,17 +347,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) shost->cmd_per_lun = sht->cmd_per_lun; shost->unchecked_isa_dma = sht->unchecked_isa_dma; shost->use_clustering = sht->use_clustering; - shost->ordered_flush = sht->ordered_flush; shost->ordered_tag = sht->ordered_tag; - /* - * hosts/devices that do queueing must support ordered tags - */ - if (shost->can_queue > 1 && shost->ordered_flush) { - printk(KERN_ERR "scsi: ordered flushes don't support queueing\n"); - shost->ordered_flush = 0; - } - if (sht->max_host_blocked) shost->max_host_blocked = sht->max_host_blocked; else diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 53551f1dfe21..7a38b101976c 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -932,9 +932,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, int sense_valid = 0; int sense_deferred = 0; - if (blk_complete_barrier_rq(q, req, good_bytes >> 9)) - return; - /* * Free up any indirection buffers we allocated for DMA purposes. * For the case of a READ, we need to copy the data out of the @@ -1199,38 +1196,6 @@ static int scsi_init_io(struct scsi_cmnd *cmd) return BLKPREP_KILL; } -static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq) -{ - struct scsi_device *sdev = q->queuedata; - struct scsi_driver *drv; - - if (sdev->sdev_state == SDEV_RUNNING) { - drv = *(struct scsi_driver **) rq->rq_disk->private_data; - - if (drv->prepare_flush) - return drv->prepare_flush(q, rq); - } - - return 0; -} - -static void scsi_end_flush_fn(request_queue_t *q, struct request *rq) -{ - struct scsi_device *sdev = q->queuedata; - struct request *flush_rq = rq->end_io_data; - struct scsi_driver *drv; - - if (flush_rq->errors) { - printk("scsi: barrier error, disabling flush support\n"); - blk_queue_ordered(q, QUEUE_ORDERED_NONE); - } - - if (sdev->sdev_state == SDEV_RUNNING) { - drv = *(struct scsi_driver **) rq->rq_disk->private_data; - drv->end_flush(q, rq); - } -} - static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk, sector_t *error_sector) { @@ -1703,17 +1668,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) blk_queue_segment_boundary(q, shost->dma_boundary); blk_queue_issue_flush_fn(q, scsi_issue_flush_fn); - /* - * ordered tags are superior to flush ordering - */ - if (shost->ordered_tag) - blk_queue_ordered(q, QUEUE_ORDERED_TAG); - else if (shost->ordered_flush) { - blk_queue_ordered(q, QUEUE_ORDERED_FLUSH); - q->prepare_flush_fn = scsi_prepare_flush_fn; - q->end_flush_fn = scsi_end_flush_fn; - } - if (!shost->use_clustering) clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); return q; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index d651150ee76d..2eefc9eb5da6 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -121,8 +121,7 @@ static void sd_shutdown(struct device *dev); static void sd_rescan(struct device *); static int sd_init_command(struct scsi_cmnd *); static int sd_issue_flush(struct device *, sector_t *); -static void sd_end_flush(request_queue_t *, struct request *); -static int sd_prepare_flush(request_queue_t *, struct request *); +static void sd_prepare_flush(request_queue_t *, struct request *); static void sd_read_capacity(struct scsi_disk *sdkp, char *diskname, unsigned char *buffer); @@ -137,8 +136,6 @@ static struct scsi_driver sd_template = { .rescan = sd_rescan, .init_command = sd_init_command, .issue_flush = sd_issue_flush, - .prepare_flush = sd_prepare_flush, - .end_flush = sd_end_flush, }; /* @@ -729,42 +726,13 @@ static int sd_issue_flush(struct device *dev, sector_t *error_sector) return ret; } -static void sd_end_flush(request_queue_t *q, struct request *flush_rq) +static void sd_prepare_flush(request_queue_t *q, struct request *rq) { - struct request *rq = flush_rq->end_io_data; - struct scsi_cmnd *cmd = rq->special; - unsigned int bytes = rq->hard_nr_sectors << 9; - - if (!flush_rq->errors) { - spin_unlock(q->queue_lock); - scsi_io_completion(cmd, bytes, 0); - spin_lock(q->queue_lock); - } else if (blk_barrier_postflush(rq)) { - spin_unlock(q->queue_lock); - scsi_io_completion(cmd, 0, bytes); - spin_lock(q->queue_lock); - } else { - /* - * force journal abort of barriers - */ - end_that_request_first(rq, -EOPNOTSUPP, rq->hard_nr_sectors); - end_that_request_last(rq, -EOPNOTSUPP); - } -} - -static int sd_prepare_flush(request_queue_t *q, struct request *rq) -{ - struct scsi_device *sdev = q->queuedata; - struct scsi_disk *sdkp = dev_get_drvdata(&sdev->sdev_gendev); - - if (!sdkp || !sdkp->WCE) - return 0; - memset(rq->cmd, 0, sizeof(rq->cmd)); - rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER; + rq->flags |= REQ_BLOCK_PC; rq->timeout = SD_TIMEOUT; rq->cmd[0] = SYNCHRONIZE_CACHE; - return 1; + rq->cmd_len = 10; } static void sd_rescan(struct device *dev) @@ -1462,6 +1430,7 @@ static int sd_revalidate_disk(struct gendisk *disk) struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_device *sdp = sdkp->device; unsigned char *buffer; + unsigned ordered; SCSI_LOG_HLQUEUE(3, printk("sd_revalidate_disk: disk=%s\n", disk->disk_name)); @@ -1498,7 +1467,20 @@ static int sd_revalidate_disk(struct gendisk *disk) sd_read_write_protect_flag(sdkp, disk->disk_name, buffer); sd_read_cache_type(sdkp, disk->disk_name, buffer); } - + + /* + * We now have all cache related info, determine how we deal + * with ordered requests. Note that as the current SCSI + * dispatch function can alter request order, we cannot use + * QUEUE_ORDERED_TAG_* even when ordered tag is supported. + */ + if (sdkp->WCE) + ordered = QUEUE_ORDERED_DRAIN_FLUSH; + else + ordered = QUEUE_ORDERED_DRAIN; + + blk_queue_ordered(sdkp->disk->queue, ordered, sd_prepare_flush); + set_capacity(disk, sdkp->capacity); kfree(buffer); @@ -1598,6 +1580,7 @@ static int sd_probe(struct device *dev) strcpy(gd->devfs_name, sdp->devfs_name); gd->private_data = &sdkp->driver; + gd->queue = sdkp->device->request_queue; sd_revalidate_disk(gd); @@ -1605,7 +1588,6 @@ static int sd_probe(struct device *dev) gd->flags = GENHD_FL_DRIVERFS; if (sdp->removable) gd->flags |= GENHD_FL_REMOVABLE; - gd->queue = sdkp->device->request_queue; dev_set_drvdata(dev, sdkp); add_disk(gd); -- cgit v1.2.3 From 007365ad60387df30f02f01fdc2b6e6432f6c265 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 6 Jan 2006 09:53:52 +0100 Subject: [BLOCK] scsi: add FUA support to sd Add FUA support for barriers to SCSI disk. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- drivers/scsi/sd.c | 29 ++++++++++++++++++++++++++--- 1 file changed, 26 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 2eefc9eb5da6..32d4d8d7b9f3 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -102,6 +102,7 @@ struct scsi_disk { u8 write_prot; unsigned WCE : 1; /* state of disk WCE bit */ unsigned RCD : 1; /* state of disk RCD bit, unused */ + unsigned DPOFUA : 1; /* state of disk DPOFUA bit */ }; static DEFINE_IDR(sd_index_idr); @@ -343,6 +344,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt) if (block > 0xffffffff) { SCpnt->cmnd[0] += READ_16 - READ_6; + SCpnt->cmnd[1] |= blk_fua_rq(rq) ? 0x8 : 0; SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0; SCpnt->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0; SCpnt->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0; @@ -362,6 +364,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt) this_count = 0xffff; SCpnt->cmnd[0] += READ_10 - READ_6; + SCpnt->cmnd[1] |= blk_fua_rq(rq) ? 0x8 : 0; SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff; SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff; @@ -370,6 +373,17 @@ static int sd_init_command(struct scsi_cmnd * SCpnt) SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff; SCpnt->cmnd[8] = (unsigned char) this_count & 0xff; } else { + if (unlikely(blk_fua_rq(rq))) { + /* + * This happens only if this drive failed + * 10byte rw command with ILLEGAL_REQUEST + * during operation and thus turned off + * use_10_for_rw. + */ + printk(KERN_ERR "sd: FUA write on READ/WRITE(6) drive\n"); + return 0; + } + SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f); SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff); SCpnt->cmnd[3] = (unsigned char) block & 0xff; @@ -1395,10 +1409,18 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname, sdkp->RCD = 0; } + sdkp->DPOFUA = (data.device_specific & 0x10) != 0; + if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { + printk(KERN_NOTICE "SCSI device %s: uses " + "READ/WRITE(6), disabling FUA\n", diskname); + sdkp->DPOFUA = 0; + } + ct = sdkp->RCD + 2*sdkp->WCE; - printk(KERN_NOTICE "SCSI device %s: drive cache: %s\n", - diskname, types[ct]); + printk(KERN_NOTICE "SCSI device %s: drive cache: %s%s\n", + diskname, types[ct], + sdkp->DPOFUA ? " w/ FUA" : ""); return; } @@ -1475,7 +1497,8 @@ static int sd_revalidate_disk(struct gendisk *disk) * QUEUE_ORDERED_TAG_* even when ordered tag is supported. */ if (sdkp->WCE) - ordered = QUEUE_ORDERED_DRAIN_FLUSH; + ordered = sdkp->DPOFUA + ? QUEUE_ORDERED_DRAIN_FUA : QUEUE_ORDERED_DRAIN_FLUSH; else ordered = QUEUE_ORDERED_DRAIN; -- cgit v1.2.3 From 93c9338713d4e11102cd09b4670ad42a336b06a3 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 6 Jan 2006 09:55:00 +0100 Subject: [BLOCK] update libata to use new blk_ordered for barriers Reflect changes in SCSI midlayer and updated to use new ordered request implementation Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- drivers/scsi/ahci.c | 1 - drivers/scsi/ata_piix.c | 1 - drivers/scsi/sata_mv.c | 1 - drivers/scsi/sata_nv.c | 1 - drivers/scsi/sata_promise.c | 1 - drivers/scsi/sata_sil.c | 1 - drivers/scsi/sata_sil24.c | 1 - drivers/scsi/sata_sis.c | 1 - drivers/scsi/sata_svw.c | 1 - drivers/scsi/sata_sx4.c | 1 - drivers/scsi/sata_uli.c | 1 - drivers/scsi/sata_via.c | 1 - drivers/scsi/sata_vsc.c | 1 - 13 files changed, 13 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c index 887eaa2a3ebf..d113290b5fc0 100644 --- a/drivers/scsi/ahci.c +++ b/drivers/scsi/ahci.c @@ -214,7 +214,6 @@ static struct scsi_host_template ahci_sht = { .dma_boundary = AHCI_DMA_BOUNDARY, .slave_configure = ata_scsi_slave_config, .bios_param = ata_std_bios_param, - .ordered_flush = 1, }; static const struct ata_port_operations ahci_ops = { diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c index 0ea27873b9ff..4b647eefc9a9 100644 --- a/drivers/scsi/ata_piix.c +++ b/drivers/scsi/ata_piix.c @@ -185,7 +185,6 @@ static struct scsi_host_template piix_sht = { .dma_boundary = ATA_DMA_BOUNDARY, .slave_configure = ata_scsi_slave_config, .bios_param = ata_std_bios_param, - .ordered_flush = 1, }; static const struct ata_port_operations piix_pata_ops = { diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c index b2bf16a9bf4b..cd54244058b5 100644 --- a/drivers/scsi/sata_mv.c +++ b/drivers/scsi/sata_mv.c @@ -374,7 +374,6 @@ static struct scsi_host_template mv_sht = { .dma_boundary = MV_DMA_BOUNDARY, .slave_configure = ata_scsi_slave_config, .bios_param = ata_std_bios_param, - .ordered_flush = 1, }; static const struct ata_port_operations mv5_ops = { diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c index 4954896dfdb9..c0cf52cb975a 100644 --- a/drivers/scsi/sata_nv.c +++ b/drivers/scsi/sata_nv.c @@ -235,7 +235,6 @@ static struct scsi_host_template nv_sht = { .dma_boundary = ATA_DMA_BOUNDARY, .slave_configure = ata_scsi_slave_config, .bios_param = ata_std_bios_param, - .ordered_flush = 1, }; static const struct ata_port_operations nv_ops = { diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c index da7fa04b8a73..3d1ea09a06a1 100644 --- a/drivers/scsi/sata_promise.c +++ b/drivers/scsi/sata_promise.c @@ -114,7 +114,6 @@ static struct scsi_host_template pdc_ata_sht = { .dma_boundary = ATA_DMA_BOUNDARY, .slave_configure = ata_scsi_slave_config, .bios_param = ata_std_bios_param, - .ordered_flush = 1, }; static const struct ata_port_operations pdc_sata_ops = { diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c index d2053487c73b..b017f85e6d6a 100644 --- a/drivers/scsi/sata_sil.c +++ b/drivers/scsi/sata_sil.c @@ -147,7 +147,6 @@ static struct scsi_host_template sil_sht = { .dma_boundary = ATA_DMA_BOUNDARY, .slave_configure = ata_scsi_slave_config, .bios_param = ata_std_bios_param, - .ordered_flush = 1, }; static const struct ata_port_operations sil_ops = { diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c index a0ad3ed2200a..923130185a9e 100644 --- a/drivers/scsi/sata_sil24.c +++ b/drivers/scsi/sata_sil24.c @@ -292,7 +292,6 @@ static struct scsi_host_template sil24_sht = { .dma_boundary = ATA_DMA_BOUNDARY, .slave_configure = ata_scsi_slave_config, .bios_param = ata_std_bios_param, - .ordered_flush = 1, /* NCQ not supported yet */ }; static const struct ata_port_operations sil24_ops = { diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c index 32e12620b162..2df8c5632ac3 100644 --- a/drivers/scsi/sata_sis.c +++ b/drivers/scsi/sata_sis.c @@ -99,7 +99,6 @@ static struct scsi_host_template sis_sht = { .dma_boundary = ATA_DMA_BOUNDARY, .slave_configure = ata_scsi_slave_config, .bios_param = ata_std_bios_param, - .ordered_flush = 1, }; static const struct ata_port_operations sis_ops = { diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c index 6e7f7c83a75a..668373590aa4 100644 --- a/drivers/scsi/sata_svw.c +++ b/drivers/scsi/sata_svw.c @@ -303,7 +303,6 @@ static struct scsi_host_template k2_sata_sht = { .proc_info = k2_sata_proc_info, #endif .bios_param = ata_std_bios_param, - .ordered_flush = 1, }; diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c index 94b253b80da8..bc87c16c80d2 100644 --- a/drivers/scsi/sata_sx4.c +++ b/drivers/scsi/sata_sx4.c @@ -194,7 +194,6 @@ static struct scsi_host_template pdc_sata_sht = { .dma_boundary = ATA_DMA_BOUNDARY, .slave_configure = ata_scsi_slave_config, .bios_param = ata_std_bios_param, - .ordered_flush = 1, }; static const struct ata_port_operations pdc_20621_ops = { diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c index b2422a0f25c8..9635ca700977 100644 --- a/drivers/scsi/sata_uli.c +++ b/drivers/scsi/sata_uli.c @@ -87,7 +87,6 @@ static struct scsi_host_template uli_sht = { .dma_boundary = ATA_DMA_BOUNDARY, .slave_configure = ata_scsi_slave_config, .bios_param = ata_std_bios_param, - .ordered_flush = 1, }; static const struct ata_port_operations uli_ops = { diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c index c76215692da2..6d5b0a794cfd 100644 --- a/drivers/scsi/sata_via.c +++ b/drivers/scsi/sata_via.c @@ -106,7 +106,6 @@ static struct scsi_host_template svia_sht = { .dma_boundary = ATA_DMA_BOUNDARY, .slave_configure = ata_scsi_slave_config, .bios_param = ata_std_bios_param, - .ordered_flush = 1, }; static const struct ata_port_operations svia_sata_ops = { diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c index fcfa486965b4..2e2c3b7acb0c 100644 --- a/drivers/scsi/sata_vsc.c +++ b/drivers/scsi/sata_vsc.c @@ -235,7 +235,6 @@ static struct scsi_host_template vsc_sata_sht = { .dma_boundary = ATA_DMA_BOUNDARY, .slave_configure = ata_scsi_slave_config, .bios_param = ata_std_bios_param, - .ordered_flush = 1, }; -- cgit v1.2.3 From 9a3dccc42556537a48f39ee9a9e7ab90a933f766 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 6 Jan 2006 09:56:18 +0100 Subject: [BLOCK] add FUA support to libata Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- drivers/scsi/libata-core.c | 31 +++++++++++++++++++++++++------ drivers/scsi/libata-scsi.c | 32 ++++++++++++++++++++++++++------ drivers/scsi/libata.h | 4 +++- 3 files changed, 54 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 9ea102587914..bdfb0a88cd6f 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c @@ -562,16 +562,28 @@ static const u8 ata_rw_cmds[] = { ATA_CMD_WRITE_MULTI, ATA_CMD_READ_MULTI_EXT, ATA_CMD_WRITE_MULTI_EXT, + 0, + 0, + 0, + ATA_CMD_WRITE_MULTI_FUA_EXT, /* pio */ ATA_CMD_PIO_READ, ATA_CMD_PIO_WRITE, ATA_CMD_PIO_READ_EXT, ATA_CMD_PIO_WRITE_EXT, + 0, + 0, + 0, + 0, /* dma */ ATA_CMD_READ, ATA_CMD_WRITE, ATA_CMD_READ_EXT, - ATA_CMD_WRITE_EXT + ATA_CMD_WRITE_EXT, + 0, + 0, + 0, + ATA_CMD_WRITE_FUA_EXT }; /** @@ -584,25 +596,32 @@ static const u8 ata_rw_cmds[] = { * LOCKING: * caller. */ -void ata_rwcmd_protocol(struct ata_queued_cmd *qc) +int ata_rwcmd_protocol(struct ata_queued_cmd *qc) { struct ata_taskfile *tf = &qc->tf; struct ata_device *dev = qc->dev; + u8 cmd; - int index, lba48, write; + int index, fua, lba48, write; + fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; if (dev->flags & ATA_DFLAG_PIO) { tf->protocol = ATA_PROT_PIO; - index = dev->multi_count ? 0 : 4; + index = dev->multi_count ? 0 : 8; } else { tf->protocol = ATA_PROT_DMA; - index = 8; + index = 16; } - tf->command = ata_rw_cmds[index + lba48 + write]; + cmd = ata_rw_cmds[index + fua + lba48 + write]; + if (cmd) { + tf->command = cmd; + return 0; + } + return -1; } static const char * const xfer_mode_str[] = { diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c index e0439be4b573..2c644cbb6e9c 100644 --- a/drivers/scsi/libata-scsi.c +++ b/drivers/scsi/libata-scsi.c @@ -1080,11 +1080,13 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm scsicmd[0] == WRITE_16) tf->flags |= ATA_TFLAG_WRITE; - /* Calculate the SCSI LBA and transfer length. */ + /* Calculate the SCSI LBA, transfer length and FUA. */ switch (scsicmd[0]) { case READ_10: case WRITE_10: scsi_10_lba_len(scsicmd, &block, &n_block); + if (unlikely(scsicmd[1] & (1 << 3))) + tf->flags |= ATA_TFLAG_FUA; break; case READ_6: case WRITE_6: @@ -1099,6 +1101,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm case READ_16: case WRITE_16: scsi_16_lba_len(scsicmd, &block, &n_block); + if (unlikely(scsicmd[1] & (1 << 3))) + tf->flags |= ATA_TFLAG_FUA; break; default: DPRINTK("no-byte command\n"); @@ -1142,7 +1146,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm tf->device |= (block >> 24) & 0xf; } - ata_rwcmd_protocol(qc); + if (unlikely(ata_rwcmd_protocol(qc) < 0)) + goto invalid_fld; qc->nsect = n_block; tf->nsect = n_block & 0xff; @@ -1160,7 +1165,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm if ((block >> 28) || (n_block > 256)) goto out_of_range; - ata_rwcmd_protocol(qc); + if (unlikely(ata_rwcmd_protocol(qc) < 0)) + goto invalid_fld; /* Convert LBA to CHS */ track = (u32)block / dev->sectors; @@ -1695,6 +1701,7 @@ static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last) unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, unsigned int buflen) { + struct ata_device *dev = args->dev; u8 *scsicmd = args->cmd->cmnd, *p, *last; const u8 sat_blk_desc[] = { 0, 0, 0, 0, /* number of blocks: sat unspecified */ @@ -1703,6 +1710,7 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, }; u8 pg, spg; unsigned int ebd, page_control, six_byte, output_len, alloc_len, minlen; + u8 dpofua; VPRINTK("ENTER\n"); @@ -1771,9 +1779,17 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, if (minlen < 1) return 0; + + dpofua = 0; + if (ata_id_has_fua(args->id) && dev->flags & ATA_DFLAG_LBA48 && + (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count)) + dpofua = 1 << 4; + if (six_byte) { output_len--; rbuf[0] = output_len; + if (minlen > 2) + rbuf[2] |= dpofua; if (ebd) { if (minlen > 3) rbuf[3] = sizeof(sat_blk_desc); @@ -1786,6 +1802,8 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, rbuf[0] = output_len >> 8; if (minlen > 1) rbuf[1] = output_len; + if (minlen > 3) + rbuf[3] |= dpofua; if (ebd) { if (minlen > 7) rbuf[7] = sizeof(sat_blk_desc); @@ -2446,7 +2464,7 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) if (xlat_func) ata_scsi_translate(ap, dev, cmd, done, xlat_func); else - ata_scsi_simulate(dev->id, cmd, done); + ata_scsi_simulate(ap, dev, cmd, done); } else ata_scsi_translate(ap, dev, cmd, done, atapi_xlat); @@ -2469,14 +2487,16 @@ out_unlock: * spin_lock_irqsave(host_set lock) */ -void ata_scsi_simulate(u16 *id, +void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev, struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct ata_scsi_args args; const u8 *scsicmd = cmd->cmnd; - args.id = id; + args.ap = ap; + args.dev = dev; + args.id = dev->id; args.cmd = cmd; args.done = done; diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h index 251e53bdc6e0..e03ce48b7b4b 100644 --- a/drivers/scsi/libata.h +++ b/drivers/scsi/libata.h @@ -32,6 +32,8 @@ #define DRV_VERSION "1.20" /* must be exactly four chars */ struct ata_scsi_args { + struct ata_port *ap; + struct ata_device *dev; u16 *id; struct scsi_cmnd *cmd; void (*done)(struct scsi_cmnd *); @@ -41,7 +43,7 @@ struct ata_scsi_args { extern int atapi_enabled; extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, struct ata_device *dev); -extern void ata_rwcmd_protocol(struct ata_queued_cmd *qc); +extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc); extern void ata_qc_free(struct ata_queued_cmd *qc); extern int ata_qc_issue(struct ata_queued_cmd *qc); extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); -- cgit v1.2.3 From 3e087b575496b8aa445192f58e7d996b1cdfa121 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 6 Jan 2006 09:57:31 +0100 Subject: [BLOCK] update IDE to use new blk_ordered for barriers Update IDE to use new blk_ordered. This change makes the following behavior changes. * Partial completion of the barrier request is handled as failure of the whole ordered sequence. No more partial completion for barrier requests. * Any failure of pre or post flush request results in failure of the whole ordered sequence. So, successfully completed ordered sequence guarantees that all requests prior to the barrier made to physical medium and, then, the while barrier request made to the physical medium. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- drivers/ide/ide-disk.c | 137 +++++++++++++++++++------------------------------ drivers/ide/ide-io.c | 5 +- 2 files changed, 54 insertions(+), 88 deletions(-) (limited to 'drivers') diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 4e5767968d7f..4b441720b6ba 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -681,50 +681,9 @@ static ide_proc_entry_t idedisk_proc[] = { #endif /* CONFIG_PROC_FS */ -static void idedisk_end_flush(request_queue_t *q, struct request *flush_rq) +static void idedisk_prepare_flush(request_queue_t *q, struct request *rq) { ide_drive_t *drive = q->queuedata; - struct request *rq = flush_rq->end_io_data; - int good_sectors = rq->hard_nr_sectors; - int bad_sectors; - sector_t sector; - - if (flush_rq->errors & ABRT_ERR) { - printk(KERN_ERR "%s: barrier support doesn't work\n", drive->name); - blk_queue_ordered(drive->queue, QUEUE_ORDERED_NONE); - blk_queue_issue_flush_fn(drive->queue, NULL); - good_sectors = 0; - } else if (flush_rq->errors) { - good_sectors = 0; - if (blk_barrier_preflush(rq)) { - sector = ide_get_error_location(drive,flush_rq->buffer); - if ((sector >= rq->hard_sector) && - (sector < rq->hard_sector + rq->hard_nr_sectors)) - good_sectors = sector - rq->hard_sector; - } - } - - if (flush_rq->errors) - printk(KERN_ERR "%s: failed barrier write: " - "sector=%Lx(good=%d/bad=%d)\n", - drive->name, (unsigned long long)rq->sector, - good_sectors, - (int) (rq->hard_nr_sectors-good_sectors)); - - bad_sectors = rq->hard_nr_sectors - good_sectors; - - if (good_sectors) - __ide_end_request(drive, rq, 1, good_sectors); - if (bad_sectors) - __ide_end_request(drive, rq, 0, bad_sectors); -} - -static int idedisk_prepare_flush(request_queue_t *q, struct request *rq) -{ - ide_drive_t *drive = q->queuedata; - - if (!drive->wcache) - return 0; memset(rq->cmd, 0, sizeof(rq->cmd)); @@ -735,9 +694,8 @@ static int idedisk_prepare_flush(request_queue_t *q, struct request *rq) rq->cmd[0] = WIN_FLUSH_CACHE; - rq->flags |= REQ_DRIVE_TASK | REQ_SOFTBARRIER; + rq->flags |= REQ_DRIVE_TASK; rq->buffer = rq->cmd; - return 1; } static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk, @@ -794,27 +752,64 @@ static int set_nowerr(ide_drive_t *drive, int arg) return 0; } +static void update_ordered(ide_drive_t *drive) +{ + struct hd_driveid *id = drive->id; + unsigned ordered = QUEUE_ORDERED_NONE; + prepare_flush_fn *prep_fn = NULL; + issue_flush_fn *issue_fn = NULL; + + if (drive->wcache) { + unsigned long long capacity; + int barrier; + /* + * We must avoid issuing commands a drive does not + * understand or we may crash it. We check flush cache + * is supported. We also check we have the LBA48 flush + * cache if the drive capacity is too large. By this + * time we have trimmed the drive capacity if LBA48 is + * not available so we don't need to recheck that. + */ + capacity = idedisk_capacity(drive); + barrier = ide_id_has_flush_cache(id) && + (drive->addressing == 0 || capacity <= (1ULL << 28) || + ide_id_has_flush_cache_ext(id)); + + printk(KERN_INFO "%s: cache flushes %ssupported\n", + drive->name, barrier ? "" : "not"); + + if (barrier) { + ordered = QUEUE_ORDERED_DRAIN_FLUSH; + prep_fn = idedisk_prepare_flush; + issue_fn = idedisk_issue_flush; + } + } else + ordered = QUEUE_ORDERED_DRAIN; + + blk_queue_ordered(drive->queue, ordered, prep_fn); + blk_queue_issue_flush_fn(drive->queue, issue_fn); +} + static int write_cache(ide_drive_t *drive, int arg) { ide_task_t args; - int err; - - if (!ide_id_has_flush_cache(drive->id)) - return 1; + int err = 1; - memset(&args, 0, sizeof(ide_task_t)); - args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ? + if (ide_id_has_flush_cache(drive->id)) { + memset(&args, 0, sizeof(ide_task_t)); + args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ? SETFEATURES_EN_WCACHE : SETFEATURES_DIS_WCACHE; - args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES; - args.command_type = IDE_DRIVE_TASK_NO_DATA; - args.handler = &task_no_data_intr; + args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES; + args.command_type = IDE_DRIVE_TASK_NO_DATA; + args.handler = &task_no_data_intr; + err = ide_raw_taskfile(drive, &args, NULL); + if (err == 0) + drive->wcache = arg; + } - err = ide_raw_taskfile(drive, &args, NULL); - if (err) - return err; + update_ordered(drive); - drive->wcache = arg; - return 0; + return err; } static int do_idedisk_flushcache (ide_drive_t *drive) @@ -888,7 +883,6 @@ static void idedisk_setup (ide_drive_t *drive) { struct hd_driveid *id = drive->id; unsigned long long capacity; - int barrier; idedisk_add_settings(drive); @@ -992,31 +986,6 @@ static void idedisk_setup (ide_drive_t *drive) drive->wcache = 1; write_cache(drive, 1); - - /* - * We must avoid issuing commands a drive does not understand - * or we may crash it. We check flush cache is supported. We also - * check we have the LBA48 flush cache if the drive capacity is - * too large. By this time we have trimmed the drive capacity if - * LBA48 is not available so we don't need to recheck that. - */ - barrier = 0; - if (ide_id_has_flush_cache(id)) - barrier = 1; - if (drive->addressing == 1) { - /* Can't issue the correct flush ? */ - if (capacity > (1ULL << 28) && !ide_id_has_flush_cache_ext(id)) - barrier = 0; - } - - printk(KERN_INFO "%s: cache flushes %ssupported\n", - drive->name, barrier ? "" : "not "); - if (barrier) { - blk_queue_ordered(drive->queue, QUEUE_ORDERED_FLUSH); - drive->queue->prepare_flush_fn = idedisk_prepare_flush; - drive->queue->end_flush_fn = idedisk_end_flush; - blk_queue_issue_flush_fn(drive->queue, idedisk_issue_flush); - } } static void ide_cacheflush_p(ide_drive_t *drive) diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 8435b44a700b..b5dc6df8e67d 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -119,10 +119,7 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) if (!nr_sectors) nr_sectors = rq->hard_cur_sectors; - if (blk_complete_barrier_rq_locked(drive->queue, rq, nr_sectors)) - ret = rq->nr_sectors != 0; - else - ret = __ide_end_request(drive, rq, uptodate, nr_sectors); + ret = __ide_end_request(drive, rq, uptodate, nr_sectors); spin_unlock_irqrestore(&ide_lock, flags); return ret; -- cgit v1.2.3 From e650c305ec3178818b317dad37a6d9c7fa8ba28d Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 6 Jan 2006 12:38:30 +0100 Subject: [SCSI] scsi_end_async() needs to take an uptodate parameter Signed-off-by: Jens Axboe --- drivers/scsi/scsi_lib.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 7a38b101976c..ba93d6e66d48 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -308,7 +308,7 @@ struct scsi_io_context { static kmem_cache_t *scsi_io_context_cache; -static void scsi_end_async(struct request *req) +static void scsi_end_async(struct request *req, int uptodate) { struct scsi_io_context *sioc = req->end_io_data; -- cgit v1.2.3 From 4b2f0260c74324abca76ccaa42d426af163125e7 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 6 Jan 2006 00:09:47 -0800 Subject: [PATCH] nbd: fix TX/RX race condition Janos Haar of First NetCenter Bt. reported numerous crashes involving the NBD driver. With his help, this was tracked down to bogus bio vectors which in turn was the result of a race condition between the receive/transmit routines in the NBD driver. The bug manifests itself like this: CPU0 CPU1 do_nbd_request add req to queuelist nbd_send_request send req head for each bio kmap send nbd_read_stat nbd_find_request nbd_end_request kunmap When CPU1 finishes nbd_end_request, the request and all its associated bio's are freed. So when CPU0 calls kunmap whose argument is derived from the last bio, it may crash. Under normal circumstances, the race occurs only on the last bio. However, if an error is encountered on the remote NBD server (such as an incorrect magic number in the request), or if there were a bug in the server, it is possible for the nbd_end_request to occur any time after the request's addition to the queuelist. The following patch fixes this problem by making sure that requests are not added to the queuelist until after they have been completed transmission. In order for the receiving side to be ready for responses involving requests still being transmitted, the patch introduces the concept of the active request. When a response matches the current active request, its processing is delayed until after the tranmission has come to a stop. This has been tested by Janos and it has been successful in curing this race condition. From: Herbert Xu Here is an updated patch which removes the active_req wait in nbd_clear_queue and the associated memory barrier. I've also clarified this in the comment. Signed-off-by: Herbert Xu Cc: Cc: Paul Clements Signed-off-by: Herbert Xu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/block/nbd.c | 122 ++++++++++++++++++++++++++-------------------------- 1 file changed, 60 insertions(+), 62 deletions(-) (limited to 'drivers') diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 9e268ddedfbd..d5c8ee7d9815 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -54,11 +54,15 @@ #include #include #include +#include +#include +#include #include #include #include +#include #include #include @@ -230,14 +234,6 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req) request.len = htonl(size); memcpy(request.handle, &req, sizeof(req)); - down(&lo->tx_lock); - - if (!sock || !lo->sock) { - printk(KERN_ERR "%s: Attempted send on closed socket\n", - lo->disk->disk_name); - goto error_out; - } - dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n", lo->disk->disk_name, req, nbdcmd_to_ascii(nbd_cmd(req)), @@ -276,11 +272,9 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req) } } } - up(&lo->tx_lock); return 0; error_out: - up(&lo->tx_lock); return 1; } @@ -289,9 +283,14 @@ static struct request *nbd_find_request(struct nbd_device *lo, char *handle) struct request *req; struct list_head *tmp; struct request *xreq; + int err; memcpy(&xreq, handle, sizeof(xreq)); + err = wait_event_interruptible(lo->active_wq, lo->active_req != xreq); + if (unlikely(err)) + goto out; + spin_lock(&lo->queue_lock); list_for_each(tmp, &lo->queue_head) { req = list_entry(tmp, struct request, queuelist); @@ -302,7 +301,11 @@ static struct request *nbd_find_request(struct nbd_device *lo, char *handle) return req; } spin_unlock(&lo->queue_lock); - return NULL; + + err = -ENOENT; + +out: + return ERR_PTR(err); } static inline int sock_recv_bvec(struct socket *sock, struct bio_vec *bvec) @@ -331,7 +334,11 @@ static struct request *nbd_read_stat(struct nbd_device *lo) goto harderror; } req = nbd_find_request(lo, reply.handle); - if (req == NULL) { + if (unlikely(IS_ERR(req))) { + result = PTR_ERR(req); + if (result != -ENOENT) + goto harderror; + printk(KERN_ERR "%s: Unexpected reply (%p)\n", lo->disk->disk_name, reply.handle); result = -EBADR; @@ -395,19 +402,24 @@ static void nbd_clear_que(struct nbd_device *lo) BUG_ON(lo->magic != LO_MAGIC); - do { - req = NULL; - spin_lock(&lo->queue_lock); - if (!list_empty(&lo->queue_head)) { - req = list_entry(lo->queue_head.next, struct request, queuelist); - list_del_init(&req->queuelist); - } - spin_unlock(&lo->queue_lock); - if (req) { - req->errors++; - nbd_end_request(req); - } - } while (req); + /* + * Because we have set lo->sock to NULL under the tx_lock, all + * modifications to the list must have completed by now. For + * the same reason, the active_req must be NULL. + * + * As a consequence, we don't need to take the spin lock while + * purging the list here. + */ + BUG_ON(lo->sock); + BUG_ON(lo->active_req); + + while (!list_empty(&lo->queue_head)) { + req = list_entry(lo->queue_head.next, struct request, + queuelist); + list_del_init(&req->queuelist); + req->errors++; + nbd_end_request(req); + } } /* @@ -435,11 +447,6 @@ static void do_nbd_request(request_queue_t * q) BUG_ON(lo->magic != LO_MAGIC); - if (!lo->file) { - printk(KERN_ERR "%s: Request when not-ready\n", - lo->disk->disk_name); - goto error_out; - } nbd_cmd(req) = NBD_CMD_READ; if (rq_data_dir(req) == WRITE) { nbd_cmd(req) = NBD_CMD_WRITE; @@ -453,32 +460,34 @@ static void do_nbd_request(request_queue_t * q) req->errors = 0; spin_unlock_irq(q->queue_lock); - spin_lock(&lo->queue_lock); - - if (!lo->file) { - spin_unlock(&lo->queue_lock); - printk(KERN_ERR "%s: failed between accept and semaphore, file lost\n", - lo->disk->disk_name); + down(&lo->tx_lock); + if (unlikely(!lo->sock)) { + up(&lo->tx_lock); + printk(KERN_ERR "%s: Attempted send on closed socket\n", + lo->disk->disk_name); req->errors++; nbd_end_request(req); spin_lock_irq(q->queue_lock); continue; } - list_add(&req->queuelist, &lo->queue_head); - spin_unlock(&lo->queue_lock); + lo->active_req = req; if (nbd_send_req(lo, req) != 0) { printk(KERN_ERR "%s: Request send failed\n", lo->disk->disk_name); - if (nbd_find_request(lo, (char *)&req) != NULL) { - /* we still own req */ - req->errors++; - nbd_end_request(req); - } else /* we're racing with nbd_clear_que */ - printk(KERN_DEBUG "nbd: can't find req\n"); + req->errors++; + nbd_end_request(req); + } else { + spin_lock(&lo->queue_lock); + list_add(&req->queuelist, &lo->queue_head); + spin_unlock(&lo->queue_lock); } + lo->active_req = NULL; + up(&lo->tx_lock); + wake_up_all(&lo->active_wq); + spin_lock_irq(q->queue_lock); continue; @@ -529,17 +538,10 @@ static int nbd_ioctl(struct inode *inode, struct file *file, down(&lo->tx_lock); lo->sock = NULL; up(&lo->tx_lock); - spin_lock(&lo->queue_lock); file = lo->file; lo->file = NULL; - spin_unlock(&lo->queue_lock); nbd_clear_que(lo); - spin_lock(&lo->queue_lock); - if (!list_empty(&lo->queue_head)) { - printk(KERN_ERR "nbd: disconnect: some requests are in progress -> please try again.\n"); - error = -EBUSY; - } - spin_unlock(&lo->queue_lock); + BUG_ON(!list_empty(&lo->queue_head)); if (file) fput(file); return error; @@ -598,24 +600,19 @@ static int nbd_ioctl(struct inode *inode, struct file *file, lo->sock = NULL; } up(&lo->tx_lock); - spin_lock(&lo->queue_lock); file = lo->file; lo->file = NULL; - spin_unlock(&lo->queue_lock); nbd_clear_que(lo); printk(KERN_WARNING "%s: queue cleared\n", lo->disk->disk_name); if (file) fput(file); return lo->harderror; case NBD_CLEAR_QUE: - down(&lo->tx_lock); - if (lo->sock) { - up(&lo->tx_lock); - return 0; /* probably should be error, but that would - * break "nbd-client -d", so just return 0 */ - } - up(&lo->tx_lock); - nbd_clear_que(lo); + /* + * This is for compatibility only. The queue is always cleared + * by NBD_DO_IT or NBD_CLEAR_SOCK. + */ + BUG_ON(!lo->sock && !list_empty(&lo->queue_head)); return 0; case NBD_PRINT_DEBUG: printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n", @@ -688,6 +685,7 @@ static int __init nbd_init(void) spin_lock_init(&nbd_dev[i].queue_lock); INIT_LIST_HEAD(&nbd_dev[i].queue_head); init_MUTEX(&nbd_dev[i].tx_lock); + init_waitqueue_head(&nbd_dev[i].active_wq); nbd_dev[i].blksize = 1024; nbd_dev[i].bytesize = 0x7ffffc00ULL << 10; /* 2TB */ disk->major = NBD_MAJOR; -- cgit v1.2.3 From 98a38ebdda69f1498be4f618d8d919695c8d6352 Mon Sep 17 00:00:00 2001 From: Andy Whitcroft Date: Fri, 6 Jan 2006 00:10:35 -0800 Subject: [PATCH] memhotplug: register_ and unregister_memory_notifier should be global Both register_memory_notifer and unregister_memory_notifier are global and declared so in linux/memory.h. Update the HOTPLUG specific definitions to match. This fixes a compile warning when HOTPLUG is enabled. Signed-off-by: Andy Whitcroft Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/base/memory.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 7e1d077874df..19fe93155fe6 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -49,12 +49,12 @@ static struct kset_uevent_ops memory_uevent_ops = { static struct notifier_block *memory_chain; -static int register_memory_notifier(struct notifier_block *nb) +int register_memory_notifier(struct notifier_block *nb) { return notifier_chain_register(&memory_chain, nb); } -static void unregister_memory_notifier(struct notifier_block *nb) +void unregister_memory_notifier(struct notifier_block *nb) { notifier_chain_unregister(&memory_chain, nb); } -- cgit v1.2.3 From 900b2b463dc6e65ec474d6880412c63c25b3aea9 Mon Sep 17 00:00:00 2001 From: Andy Whitcroft Date: Fri, 6 Jan 2006 00:10:35 -0800 Subject: [PATCH] memhotplug: register_memory should be global register_memory is global and declared so in linux/memory.h. Update the HOTPLUG specific definition to match. This fixes a compile warning when HOTPLUG is enabled. Signed-off-by: Andy Whitcroft Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/base/memory.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 19fe93155fe6..58801d718cc2 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -62,8 +62,7 @@ void unregister_memory_notifier(struct notifier_block *nb) /* * register_memory - Setup a sysfs device for a memory block */ -static int -register_memory(struct memory_block *memory, struct mem_section *section, +int register_memory(struct memory_block *memory, struct mem_section *section, struct node *root) { int error; -- cgit v1.2.3 From c9662b4b37f8f00a212eb4131d1d177b6ed8ddbd Mon Sep 17 00:00:00 2001 From: Arthur Othieno Date: Fri, 6 Jan 2006 00:11:29 -0800 Subject: [PATCH] macintosh: don't store i2c_add_driver() return if no further processing done therm_pm72.c and windfarm_lm75_sensor.c both store the return from i2c_add_driver() but do no further processing on the result. Simply return what i2c_add_driver() did, instead. Signed-off-by: Arthur Othieno Acked-by: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/macintosh/therm_pm72.c | 7 +------ drivers/macintosh/windfarm_lm75_sensor.c | 7 +------ 2 files changed, 2 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c index 190878eef990..435427daed75 100644 --- a/drivers/macintosh/therm_pm72.c +++ b/drivers/macintosh/therm_pm72.c @@ -1988,18 +1988,13 @@ static void fcu_lookup_fans(struct device_node *fcu_node) static int fcu_of_probe(struct of_device* dev, const struct of_device_id *match) { - int rc; - state = state_detached; /* Lookup the fans in the device tree */ fcu_lookup_fans(dev->node); /* Add the driver */ - rc = i2c_add_driver(&therm_pm72_driver); - if (rc < 0) - return rc; - return 0; + return i2c_add_driver(&therm_pm72_driver); } static int fcu_of_remove(struct of_device* dev) diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c index a0a41ad0f2b5..c62ed68a3138 100644 --- a/drivers/macintosh/windfarm_lm75_sensor.c +++ b/drivers/macintosh/windfarm_lm75_sensor.c @@ -240,12 +240,7 @@ static int wf_lm75_detach(struct i2c_client *client) static int __init wf_lm75_sensor_init(void) { - int rc; - - rc = i2c_add_driver(&wf_lm75_driver); - if (rc < 0) - return rc; - return 0; + return i2c_add_driver(&wf_lm75_driver); } static void __exit wf_lm75_sensor_exit(void) -- cgit v1.2.3 From 2d8179c0b77b54e27321944e16f65defeda81e27 Mon Sep 17 00:00:00 2001 From: Sylvain Munaut Date: Fri, 6 Jan 2006 00:11:31 -0800 Subject: [PATCH] ppc32/serial: Fix compiler errors with GCC 4.x in mpc52xx_uart.c Signed-off-by: Wolfgang Denk Signed-off-by: Sylvain Munaut Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/serial/mpc52xx_uart.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c index b8727d9bf690..4dcf0310bef5 100644 --- a/drivers/serial/mpc52xx_uart.c +++ b/drivers/serial/mpc52xx_uart.c @@ -668,7 +668,7 @@ mpc52xx_console_setup(struct console *co, char *options) } -extern struct uart_driver mpc52xx_uart_driver; +static struct uart_driver mpc52xx_uart_driver; static struct console mpc52xx_console = { .name = "ttyS", -- cgit v1.2.3 From d62de3aa8ac762c09845aa38634a845da55f31dc Mon Sep 17 00:00:00 2001 From: Sylvain Munaut Date: Fri, 6 Jan 2006 00:11:32 -0800 Subject: [PATCH] ppc32/serial: Change mpc52xx_uart.c to use the Low Density Serial port major Before this patch we were just using the "classic" /dev/ttySx devices. However when another on the system is loaded that uses those (like drivers for serial PCMCIA), that creates a conflict for the minors. Therefore, we now use /dev/ttyPSC[0:5] (note the 0-based numbering !) with some minors we've been assigned in the "Low Density Serial port major" Signed-off-by: Sylvain Munaut Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/serial/mpc52xx_uart.c | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c index 4dcf0310bef5..1288d6203e94 100644 --- a/drivers/serial/mpc52xx_uart.c +++ b/drivers/serial/mpc52xx_uart.c @@ -37,11 +37,11 @@ * by the bootloader or in the platform init code. * * The idx field must be equal to the PSC index ( e.g. 0 for PSC1, 1 for PSC2, - * and so on). So the PSC1 is mapped to /dev/ttyS0, PSC2 to /dev/ttyS1 and so - * on. But be warned, it's an ABSOLUTE REQUIREMENT ! This is needed mainly for - * the console code : without this 1:1 mapping, at early boot time, when we are - * parsing the kernel args console=ttyS?, we wouldn't know wich PSC it will be - * mapped to. + * and so on). So the PSC1 is mapped to /dev/ttyPSC0, PSC2 to /dev/ttyPSC1 and + * so on. But be warned, it's an ABSOLUTE REQUIREMENT ! This is needed mainly + * fpr the console code : without this 1:1 mapping, at early boot time, when we + * are parsing the kernel args console=ttyPSC?, we wouldn't know wich PSC it + * will be mapped to. */ #include @@ -65,6 +65,10 @@ #include +/* We've been assigned a range on the "Low-density serial ports" major */ +#define SERIAL_PSC_MAJOR 204 +#define SERIAL_PSC_MINOR 148 + #define ISR_PASS_LIMIT 256 /* Max number of iteration in the interrupt */ @@ -671,12 +675,12 @@ mpc52xx_console_setup(struct console *co, char *options) static struct uart_driver mpc52xx_uart_driver; static struct console mpc52xx_console = { - .name = "ttyS", + .name = "ttyPSC", .write = mpc52xx_console_write, .device = uart_console_device, .setup = mpc52xx_console_setup, .flags = CON_PRINTBUFFER, - .index = -1, /* Specified on the cmdline (e.g. console=ttyS0 ) */ + .index = -1, /* Specified on the cmdline (e.g. console=ttyPSC0 ) */ .data = &mpc52xx_uart_driver, }; @@ -703,10 +707,10 @@ console_initcall(mpc52xx_console_init); static struct uart_driver mpc52xx_uart_driver = { .owner = THIS_MODULE, .driver_name = "mpc52xx_psc_uart", - .dev_name = "ttyS", - .devfs_name = "ttyS", - .major = TTY_MAJOR, - .minor = 64, + .dev_name = "ttyPSC", + .devfs_name = "ttyPSC", + .major = SERIAL_PSC_MAJOR, + .minor = SERIAL_PSC_MINOR, .nr = MPC52xx_PSC_MAXNUM, .cons = MPC52xx_PSC_CONSOLE, }; -- cgit v1.2.3 From 9f6d4b0c21a6894dad7665d3dda4174c7c120784 Mon Sep 17 00:00:00 2001 From: Ben Collins Date: Fri, 6 Jan 2006 00:11:40 -0800 Subject: [PATCH] therm_adt746x: Quiet fan speed change messages Only output the messages about fan speed changes with a verbose=1 module param. Signed-off-by: Fabio M. Di Nitto Signed-off-by: Ben Collins Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/macintosh/therm_adt746x.c | 39 ++++++++++++++++++++++++--------------- 1 file changed, 24 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c index f38696622eb4..5e1f5e9653cb 100644 --- a/drivers/macintosh/therm_adt746x.c +++ b/drivers/macintosh/therm_adt746x.c @@ -52,6 +52,7 @@ static char *sensor_location[3] = {NULL, NULL, NULL}; static int limit_adjust = 0; static int fan_speed = -1; +static int verbose = 0; MODULE_AUTHOR("Colin Leroy "); MODULE_DESCRIPTION("Driver for ADT746x thermostat in iBook G4 and " @@ -66,6 +67,10 @@ module_param(fan_speed, int, 0644); MODULE_PARM_DESC(fan_speed,"Specify starting fan speed (0-255) " "(default 64)"); +module_param(verbose, bool, 0); +MODULE_PARM_DESC(verbose,"Verbose log operations " + "(default 0)"); + struct thermostat { struct i2c_client clt; u8 temps[3]; @@ -149,13 +154,13 @@ detach_thermostat(struct i2c_adapter *adapter) if (thread_therm != NULL) { kthread_stop(thread_therm); } - + printk(KERN_INFO "adt746x: Putting max temperatures back from " "%d, %d, %d to %d, %d, %d\n", th->limits[0], th->limits[1], th->limits[2], th->initial_limits[0], th->initial_limits[1], th->initial_limits[2]); - + for (i = 0; i < 3; i++) write_reg(th, LIMIT_REG[i], th->initial_limits[i]); @@ -212,12 +217,14 @@ static void write_fan_speed(struct thermostat *th, int speed, int fan) return; if (th->last_speed[fan] != speed) { - if (speed == -1) - printk(KERN_DEBUG "adt746x: Setting speed to automatic " - "for %s fan.\n", sensor_location[fan+1]); - else - printk(KERN_DEBUG "adt746x: Setting speed to %d " - "for %s fan.\n", speed, sensor_location[fan+1]); + if (verbose) { + if (speed == -1) + printk(KERN_DEBUG "adt746x: Setting speed to automatic " + "for %s fan.\n", sensor_location[fan+1]); + else + printk(KERN_DEBUG "adt746x: Setting speed to %d " + "for %s fan.\n", speed, sensor_location[fan+1]); + } } else return; @@ -298,10 +305,11 @@ static void update_fans_speed (struct thermostat *th) if (new_speed > 255) new_speed = 255; - printk(KERN_DEBUG "adt746x: setting fans speed to %d " - "(limit exceeded by %d on %s) \n", - new_speed, var, - sensor_location[fan_number+1]); + if (verbose) + printk(KERN_DEBUG "adt746x: Setting fans speed to %d " + "(limit exceeded by %d on %s) \n", + new_speed, var, + sensor_location[fan_number+1]); write_both_fan_speed(th, new_speed); th->last_var[fan_number] = var; } else if (var < -2) { @@ -309,8 +317,9 @@ static void update_fans_speed (struct thermostat *th) * so cold (lastvar >= -1) */ if (i == 2 && lastvar < -1) { if (th->last_speed[fan_number] != 0) - printk(KERN_DEBUG "adt746x: Stopping " - "fans.\n"); + if (verbose) + printk(KERN_DEBUG "adt746x: Stopping " + "fans.\n"); write_both_fan_speed(th, 0); } } @@ -406,7 +415,7 @@ static int attach_one_thermostat(struct i2c_adapter *adapter, int addr, th->initial_limits[i] = read_reg(th, LIMIT_REG[i]); set_limit(th, i); } - + printk(KERN_INFO "adt746x: Lowering max temperatures from %d, %d, %d" " to %d, %d, %d\n", th->initial_limits[0], th->initial_limits[1], -- cgit v1.2.3 From 7c4cb60e5b97677424e95baee9c29df54b26e6ba Mon Sep 17 00:00:00 2001 From: Zachary Amsden Date: Fri, 6 Jan 2006 00:11:47 -0800 Subject: [PATCH] x86: GDT alignment fix Make GDT page aligned and page padded to support running inside of a hypervisor. This prevents false sharing of the GDT page with other hot data, which is not allowed in Xen, and causes performance problems in VMware. Rather than go back to the old method of statically allocating the GDT (which wastes unneded space for non-present CPUs), the GDT for APs is allocated dynamically. Signed-off-by: Zachary Amsden Cc: "Seth, Rohit" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/pnp/pnpbios/bioscalls.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c index 6b7583f497d0..7cb476ed7f91 100644 --- a/drivers/pnp/pnpbios/bioscalls.c +++ b/drivers/pnp/pnpbios/bioscalls.c @@ -69,14 +69,16 @@ __asm__( #define Q_SET_SEL(cpu, selname, address, size) \ do { \ -set_base(per_cpu(cpu_gdt_table,cpu)[(selname) >> 3], __va((u32)(address))); \ -set_limit(per_cpu(cpu_gdt_table,cpu)[(selname) >> 3], size); \ +struct desc_struct *gdt = get_cpu_gdt_table((cpu)); \ +set_base(gdt[(selname) >> 3], __va((u32)(address))); \ +set_limit(gdt[(selname) >> 3], size); \ } while(0) #define Q2_SET_SEL(cpu, selname, address, size) \ do { \ -set_base(per_cpu(cpu_gdt_table,cpu)[(selname) >> 3], (u32)(address)); \ -set_limit(per_cpu(cpu_gdt_table,cpu)[(selname) >> 3], size); \ +struct desc_struct *gdt = get_cpu_gdt_table((cpu)); \ +set_base(gdt[(selname) >> 3], (u32)(address)); \ +set_limit(gdt[(selname) >> 3], size); \ } while(0) static struct desc_struct bad_bios_desc = { 0, 0x00409200 }; @@ -115,8 +117,8 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3, return PNP_FUNCTION_NOT_SUPPORTED; cpu = get_cpu(); - save_desc_40 = per_cpu(cpu_gdt_table,cpu)[0x40 / 8]; - per_cpu(cpu_gdt_table,cpu)[0x40 / 8] = bad_bios_desc; + save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8]; + get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc; /* On some boxes IRQ's during PnP BIOS calls are deadly. */ spin_lock_irqsave(&pnp_bios_lock, flags); @@ -158,7 +160,7 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3, ); spin_unlock_irqrestore(&pnp_bios_lock, flags); - per_cpu(cpu_gdt_table,cpu)[0x40 / 8] = save_desc_40; + get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40; put_cpu(); /* If we get here and this is set then the PnP BIOS faulted on us. */ @@ -535,8 +537,10 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header) set_base(bad_bios_desc, __va((unsigned long)0x40 << 4)); _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4)); - for(i=0; i < NR_CPUS; i++) - { + for (i = 0; i < NR_CPUS; i++) { + struct desc_struct *gdt = get_cpu_gdt_table(i); + if (!gdt) + continue; Q2_SET_SEL(i, PNP_CS32, &pnp_bios_callfunc, 64 * 1024); Q_SET_SEL(i, PNP_CS16, header->fields.pm16cseg, 64 * 1024); Q_SET_SEL(i, PNP_DS, header->fields.pm16dseg, 64 * 1024); -- cgit v1.2.3 From 5702d0f742b2f462267bca147334f77a255bcc74 Mon Sep 17 00:00:00 2001 From: Zachary Amsden Date: Fri, 6 Jan 2006 00:11:51 -0800 Subject: [PATCH] x86: Pnp segments in segment h Move PnP BIOS segment definitions into segment.h; the segments are reserved here, so they might as well be defined here as well. Note I didn't do this for APM BIOS, as Macintosh and other systems use those values to emulate APM in some scary way I don't want to understand. Signed-off-by: Zachary Amsden Acked-by: "Seth, Rohit" Cc: Stephen Rothwell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/pnp/pnpbios/bioscalls.c | 9 --------- 1 file changed, 9 deletions(-) (limited to 'drivers') diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c index 7cb476ed7f91..37bacfcdbc5d 100644 --- a/drivers/pnp/pnpbios/bioscalls.c +++ b/drivers/pnp/pnpbios/bioscalls.c @@ -31,15 +31,6 @@ static struct { } pnp_bios_callpoint; -/* The PnP BIOS entries in the GDT */ -#define PNP_GDT (GDT_ENTRY_PNPBIOS_BASE * 8) - -#define PNP_CS32 (PNP_GDT+0x00) /* segment for calling fn */ -#define PNP_CS16 (PNP_GDT+0x08) /* code segment for BIOS */ -#define PNP_DS (PNP_GDT+0x10) /* data segment for BIOS */ -#define PNP_TS1 (PNP_GDT+0x18) /* transfer data segment */ -#define PNP_TS2 (PNP_GDT+0x20) /* another data segment */ - /* * These are some opcodes for a "static asmlinkage" * As this code is *not* executed inside the linux kernel segment, but in a -- cgit v1.2.3 From 5fe9fe3c6f9a1ae7aa224bb7a66eb9aad9e4abef Mon Sep 17 00:00:00 2001 From: Zachary Amsden Date: Fri, 6 Jan 2006 00:11:55 -0800 Subject: [PATCH] x86: Pnp byte granularity The one remaining caller of set_limit, the PnP BIOS code, calls into the PnP BIOS, passing kernel parameters in and out. These parameteres may be passed from arbitrary kernel virtual memory, so they deserve strict protection to stop a bad BIOS from smashing beyond the object size. Unfortunately, the use of set_limit was badly botching this by setting the limit in terms of pages, when it really should have byte granularity. When doing this, I discovered my BIOS had the buggy code during the "get system device node" call: mov ax, es:[bx] Which is harmless, but has a trivial workaround. Signed-off-by: Zachary Amsden Cc: "Seth, Rohit" Cc: Stephen Rothwell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/pnp/pnpbios/bioscalls.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c index 37bacfcdbc5d..a72126180e97 100644 --- a/drivers/pnp/pnpbios/bioscalls.c +++ b/drivers/pnp/pnpbios/bioscalls.c @@ -283,12 +283,15 @@ int pnp_bios_dev_node_info(struct pnp_dev_node_info *data) static int __pnp_bios_get_dev_node(u8 *nodenum, char boot, struct pnp_bios_node *data) { u16 status; + u16 tmp_nodenum; if (!pnp_bios_present()) return PNP_FUNCTION_NOT_SUPPORTED; if ( !boot && pnpbios_dont_use_current_config ) return PNP_FUNCTION_NOT_SUPPORTED; + tmp_nodenum = *nodenum; status = call_pnp_bios(PNP_GET_SYS_DEV_NODE, 0, PNP_TS1, 0, PNP_TS2, boot ? 2 : 1, PNP_DS, 0, - nodenum, sizeof(char), data, 65536); + &tmp_nodenum, sizeof(tmp_nodenum), data, 65536); + *nodenum = tmp_nodenum; return status; } -- cgit v1.2.3 From e6a9918c9617ed21f71f2f20b45efe06822c8f00 Mon Sep 17 00:00:00 2001 From: Zachary Amsden Date: Fri, 6 Jan 2006 00:11:56 -0800 Subject: [PATCH] x86: Fixed pnp bios limits PnP BIOS data, code, and 32-bit entry segments all have fixed limits as well; set them in the GDT rather than adding more code. It would be nice to add these fixups to the boot GDT rather than setting the GDT for each CPU; perhaps I can wiggle this in later, but getting it in before the subsys init looks tricky. Also, make some progress on deprecating the ugly Q_SET_SEL macros. Signed-off-by: Zachary Amsden Cc: "Seth, Rohit" Cc: Stephen Rothwell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/pnp/pnpbios/bioscalls.c | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c index a72126180e97..a1f0b0ba2bfe 100644 --- a/drivers/pnp/pnpbios/bioscalls.c +++ b/drivers/pnp/pnpbios/bioscalls.c @@ -58,13 +58,6 @@ __asm__( ".previous \n" ); -#define Q_SET_SEL(cpu, selname, address, size) \ -do { \ -struct desc_struct *gdt = get_cpu_gdt_table((cpu)); \ -set_base(gdt[(selname) >> 3], __va((u32)(address))); \ -set_limit(gdt[(selname) >> 3], size); \ -} while(0) - #define Q2_SET_SEL(cpu, selname, address, size) \ do { \ struct desc_struct *gdt = get_cpu_gdt_table((cpu)); \ @@ -535,8 +528,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header) struct desc_struct *gdt = get_cpu_gdt_table(i); if (!gdt) continue; - Q2_SET_SEL(i, PNP_CS32, &pnp_bios_callfunc, 64 * 1024); - Q_SET_SEL(i, PNP_CS16, header->fields.pm16cseg, 64 * 1024); - Q_SET_SEL(i, PNP_DS, header->fields.pm16dseg, 64 * 1024); - } + set_base(gdt[GDT_ENTRY_PNPBIOS_CS32], &pnp_bios_callfunc); + set_base(gdt[GDT_ENTRY_PNPBIOS_CS16], __va(header->fields.pm16cseg)); + set_base(gdt[GDT_ENTRY_PNPBIOS_DS], __va(header->fields.pm16dseg)); + } } -- cgit v1.2.3 From a7a4ad0998dcd682f4968e8ec5fc1259914a1c4a Mon Sep 17 00:00:00 2001 From: Jordan Crouse Date: Fri, 6 Jan 2006 00:12:15 -0800 Subject: [PATCH] Geode LX HW RNG Support Add support to hw_random for the Geode LX HRNG device. Signed-off-by: Jordan Crouse Cc: Alan Cox Cc: Jeff Garzik Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/hw_random.c | 70 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 69 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/char/hw_random.c b/drivers/char/hw_random.c index 6f673d2de0b1..49769f59ea1b 100644 --- a/drivers/char/hw_random.c +++ b/drivers/char/hw_random.c @@ -1,4 +1,9 @@ /* + Added support for the AMD Geode LX RNG + (c) Copyright 2004-2005 Advanced Micro Devices, Inc. + + derived from + Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG) (c) Copyright 2003 Red Hat Inc @@ -95,6 +100,11 @@ static unsigned int via_data_present (void); static u32 via_data_read (void); #endif +static int __init geode_init(struct pci_dev *dev); +static void geode_cleanup(void); +static unsigned int geode_data_present (void); +static u32 geode_data_read (void); + struct rng_operations { int (*init) (struct pci_dev *dev); void (*cleanup) (void); @@ -122,6 +132,7 @@ enum { rng_hw_intel, rng_hw_amd, rng_hw_via, + rng_hw_geode, }; static struct rng_operations rng_vendor_ops[] = { @@ -139,6 +150,9 @@ static struct rng_operations rng_vendor_ops[] = { /* rng_hw_via */ { via_init, via_cleanup, via_data_present, via_data_read, 1 }, #endif + + /* rng_hw_geode */ + { geode_init, geode_cleanup, geode_data_present, geode_data_read, 4 } }; /* @@ -159,6 +173,9 @@ static struct pci_device_id rng_pci_tbl[] = { { 0x8086, 0x244e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel }, { 0x8086, 0x245e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel }, + { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_geode }, + { 0, }, /* terminate list */ }; MODULE_DEVICE_TABLE (pci, rng_pci_tbl); @@ -460,6 +477,57 @@ static void via_cleanup(void) } #endif +/*********************************************************************** + * + * AMD Geode RNG operations + * + */ + +static void __iomem *geode_rng_base = NULL; + +#define GEODE_RNG_DATA_REG 0x50 +#define GEODE_RNG_STATUS_REG 0x54 + +static u32 geode_data_read(void) +{ + u32 val; + + assert(geode_rng_base != NULL); + val = readl(geode_rng_base + GEODE_RNG_DATA_REG); + return val; +} + +static unsigned int geode_data_present(void) +{ + u32 val; + + assert(geode_rng_base != NULL); + val = readl(geode_rng_base + GEODE_RNG_STATUS_REG); + return val; +} + +static void geode_cleanup(void) +{ + iounmap(geode_rng_base); + geode_rng_base = NULL; +} + +static int geode_init(struct pci_dev *dev) +{ + unsigned long rng_base = pci_resource_start(dev, 0); + + if (rng_base == 0) + return 1; + + geode_rng_base = ioremap(rng_base, 0x58); + + if (geode_rng_base == NULL) { + printk(KERN_ERR PFX "Cannot ioremap RNG memory\n"); + return -EBUSY; + } + + return 0; +} /*********************************************************************** * @@ -574,7 +642,7 @@ static int __init rng_init (void) DPRINTK ("ENTER\n"); - /* Probe for Intel, AMD RNGs */ + /* Probe for Intel, AMD, Geode RNGs */ for_each_pci_dev(pdev) { ent = pci_match_id(rng_pci_tbl, pdev); if (ent) { -- cgit v1.2.3 From 8c1d286e6aa5581e9d214cbaec2bee0394bb8de8 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Fri, 6 Jan 2006 00:18:38 -0800 Subject: [PATCH] don't freeze firewire on suspend. We had a report from one loony user who tried out suspend to disk using a swap partition on a firewire drive. As the firewire thread was put to sleep it didn't work out too well. Signed-off-by: Dave Jones Cc: Pavel Machek Cc: Ben Collins Cc: Jody McIntyre Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/ieee1394/ieee1394_core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c index 64fbbb01d52a..25ef5a86f5f0 100644 --- a/drivers/ieee1394/ieee1394_core.c +++ b/drivers/ieee1394/ieee1394_core.c @@ -1027,10 +1027,10 @@ static int hpsbpkt_thread(void *__hi) daemonize("khpsbpkt"); + current->flags |= PF_NOFREEZE; + while (1) { if (down_interruptible(&khpsbpkt_sig)) { - if (try_to_freeze()) - continue; printk("khpsbpkt: received unexpected signal?!\n" ); break; } -- cgit v1.2.3 From 973bd9937569146de0917f54f05b2942f8257912 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 6 Jan 2006 00:19:07 -0800 Subject: [PATCH] s390: atomic primitives Hugh Dickins Fix the broken atomic_cmpxchg primitive. Add atomic_sub_and_test, atomic64_sub_return, atomic64_sub_and_test, atomic64_cmpxchg, atomic64_add_unless and atomic64_inc_not_zero. Replace old style atomic_compare_and_swap by atomic_cmpxchg. Shorten the whole header by defining most primitives with the two inline functions atomic_add_return and atomic_sub_return. In addition this patch contains the s390 related fixes of Hugh's "mm: fill arch atomic64 gaps" patch. Signed-off-by: Martin Schwidefsky Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/s390/block/dasd.c | 4 ++-- drivers/s390/char/sclp_quiesce.c | 2 +- drivers/s390/char/tape_block.c | 2 +- drivers/s390/cio/ccwgroup.c | 6 +++--- drivers/s390/cio/device.c | 4 ++-- drivers/s390/net/iucv.c | 8 ++++---- drivers/s390/net/qeth_main.c | 20 +++++++++----------- 7 files changed, 22 insertions(+), 24 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 7008d32433bf..62787393a209 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -7,7 +7,7 @@ * Bugreports.to..: * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 * - * $Revision: 1.167 $ + * $Revision: 1.169 $ */ #include @@ -1323,7 +1323,7 @@ void dasd_schedule_bh(struct dasd_device * device) { /* Protect against rescheduling. */ - if (atomic_compare_and_swap (0, 1, &device->tasklet_scheduled)) + if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) return; dasd_get_device(device); tasklet_hi_schedule(&device->tasklet); diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c index 83f75774df60..56fa69168898 100644 --- a/drivers/s390/char/sclp_quiesce.c +++ b/drivers/s390/char/sclp_quiesce.c @@ -32,7 +32,7 @@ do_load_quiesce_psw(void * __unused) psw_t quiesce_psw; int cpu; - if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid)) + if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1) signal_processor(smp_processor_id(), sigp_stop); /* Wait for all other cpus to enter stopped state */ for_each_online_cpu(cpu) { diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index 1efc9f21229e..482e07e388c8 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c @@ -65,7 +65,7 @@ static void tapeblock_trigger_requeue(struct tape_device *device) { /* Protect against rescheduling. */ - if (atomic_compare_and_swap(0, 1, &device->blk_data.requeue_scheduled)) + if (atomic_cmpxchg(&device->blk_data.requeue_scheduled, 0, 1) != 0) return; schedule_work(&device->blk_data.requeue_task); } diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index be9d2d65c22f..e849289d4f3c 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -1,7 +1,7 @@ /* * drivers/s390/cio/ccwgroup.c * bus driver for ccwgroup - * $Revision: 1.32 $ + * $Revision: 1.33 $ * * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, * IBM Corporation @@ -263,7 +263,7 @@ ccwgroup_set_online(struct ccwgroup_device *gdev) struct ccwgroup_driver *gdrv; int ret; - if (atomic_compare_and_swap(0, 1, &gdev->onoff)) + if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) return -EAGAIN; if (gdev->state == CCWGROUP_ONLINE) { ret = 0; @@ -289,7 +289,7 @@ ccwgroup_set_offline(struct ccwgroup_device *gdev) struct ccwgroup_driver *gdrv; int ret; - if (atomic_compare_and_swap(0, 1, &gdev->onoff)) + if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) return -EAGAIN; if (gdev->state == CCWGROUP_OFFLINE) { ret = 0; diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 85908cacc3b8..0590cffe62aa 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1,7 +1,7 @@ /* * drivers/s390/cio/device.c * bus driver for ccw devices - * $Revision: 1.131 $ + * $Revision: 1.137 $ * * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, * IBM Corporation @@ -374,7 +374,7 @@ online_store (struct device *dev, struct device_attribute *attr, const char *buf int i, force, ret; char *tmp; - if (atomic_compare_and_swap(0, 1, &cdev->private->onoff)) + if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) return -EAGAIN; if (cdev->drv && !try_module_get(cdev->drv->owner)) { diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c index df7647c3c100..ecb2f8fd7873 100644 --- a/drivers/s390/net/iucv.c +++ b/drivers/s390/net/iucv.c @@ -1,5 +1,5 @@ /* - * $Id: iucv.c,v 1.45 2005/04/26 22:59:06 braunu Exp $ + * $Id: iucv.c,v 1.47 2005/11/21 11:35:22 mschwide Exp $ * * IUCV network driver * @@ -29,7 +29,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * - * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.45 $ + * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.47 $ * */ @@ -355,7 +355,7 @@ do { \ static void iucv_banner(void) { - char vbuf[] = "$Revision: 1.45 $"; + char vbuf[] = "$Revision: 1.47 $"; char *version = vbuf; if ((version = strchr(version, ':'))) { @@ -477,7 +477,7 @@ grab_param(void) ptr++; if (ptr >= iucv_param_pool + PARAM_POOL_SIZE) ptr = iucv_param_pool; - } while (atomic_compare_and_swap(0, 1, &ptr->in_use)); + } while (atomic_cmpxchg(&ptr->in_use, 0, 1) != 0); hint = ptr - iucv_param_pool; memset(&ptr->param, 0, sizeof(ptr->param)); diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index f8f55cc468ba..7b2663f27817 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c @@ -1396,7 +1396,7 @@ qeth_idx_activate_get_answer(struct qeth_channel *channel, channel->ccw.cda = (__u32) __pa(iob->data); wait_event(card->wait_q, - atomic_compare_and_swap(0,1,&channel->irq_pending) == 0); + atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); QETH_DBF_TEXT(setup, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); rc = ccw_device_start(channel->ccwdev, @@ -1463,7 +1463,7 @@ qeth_idx_activate_channel(struct qeth_channel *channel, memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2); wait_event(card->wait_q, - atomic_compare_and_swap(0,1,&channel->irq_pending) == 0); + atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); QETH_DBF_TEXT(setup, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); rc = ccw_device_start(channel->ccwdev, @@ -1616,7 +1616,7 @@ qeth_issue_next_read(struct qeth_card *card) } qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); wait_event(card->wait_q, - atomic_compare_and_swap(0,1,&card->read.irq_pending) == 0); + atomic_cmpxchg(&card->read.irq_pending, 0, 1) == 0); QETH_DBF_TEXT(trace, 6, "noirqpnd"); rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, (addr_t) iob, 0, 0); @@ -1882,7 +1882,7 @@ qeth_send_control_data(struct qeth_card *card, int len, spin_unlock_irqrestore(&card->lock, flags); QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN); wait_event(card->wait_q, - atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0); + atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0); qeth_prepare_control_data(card, len, iob); if (IS_IPA(iob->data)) timer.expires = jiffies + QETH_IPA_TIMEOUT; @@ -1924,7 +1924,7 @@ qeth_osn_send_control_data(struct qeth_card *card, int len, QETH_DBF_TEXT(trace, 5, "osndctrd"); wait_event(card->wait_q, - atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0); + atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0); qeth_prepare_control_data(card, len, iob); QETH_DBF_TEXT(trace, 6, "osnoirqp"); spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); @@ -4236,9 +4236,8 @@ qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, QETH_DBF_TEXT(trace, 6, "dosndpfa"); /* spin until we get the queue ... */ - while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED, - QETH_OUT_Q_LOCKED, - &queue->state)); + while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, + QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); /* ... now we've got the queue */ index = queue->next_buf_to_fill; buffer = &queue->bufs[queue->next_buf_to_fill]; @@ -4292,9 +4291,8 @@ qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, QETH_DBF_TEXT(trace, 6, "dosndpkt"); /* spin until we get the queue ... */ - while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED, - QETH_OUT_Q_LOCKED, - &queue->state)); + while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, + QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); start_index = queue->next_buf_to_fill; buffer = &queue->bufs[queue->next_buf_to_fill]; /* -- cgit v1.2.3 From 56dc6a88ec76019e0d0729165cb5b98536270e1d Mon Sep 17 00:00:00 2001 From: Peter Oberparleiter Date: Fri, 6 Jan 2006 00:19:09 -0800 Subject: [PATCH] s390: cms volume label definitions Moved definition of CMS volume label to vtoc.h and modify partitions/ibm.c to use this volume label definition instead of anonymous array. Signed-off-by: Peter Oberparleiter Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/s390/block/dasd_diag.c | 7 ++++--- drivers/s390/block/dasd_diag.h | 23 +---------------------- 2 files changed, 5 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index ab8754e566bc..16c4b7d94bd7 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -6,7 +6,7 @@ * Bugreports.to..: * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 * - * $Revision: 1.51 $ + * $Revision: 1.52 $ */ #include @@ -25,6 +25,7 @@ #include #include #include +#include #include "dasd_int.h" #include "dasd_diag.h" @@ -329,7 +330,7 @@ dasd_diag_check_device(struct dasd_device *device) struct dasd_diag_private *private; struct dasd_diag_characteristics *rdc_data; struct dasd_diag_bio bio; - struct dasd_diag_cms_label *label; + struct vtoc_cms_label *label; blocknum_t end_block; unsigned int sb, bsize; int rc; @@ -380,7 +381,7 @@ dasd_diag_check_device(struct dasd_device *device) mdsk_term_io(device); /* figure out blocksize of device */ - label = (struct dasd_diag_cms_label *) get_zeroed_page(GFP_KERNEL); + label = (struct vtoc_cms_label *) get_zeroed_page(GFP_KERNEL); if (label == NULL) { DEV_MESSAGE(KERN_WARNING, device, "%s", "No memory to allocate initialization request"); diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h index df31484d73a7..37edf6e91715 100644 --- a/drivers/s390/block/dasd_diag.h +++ b/drivers/s390/block/dasd_diag.h @@ -6,7 +6,7 @@ * Bugreports.to..: * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 * - * $Revision: 1.8 $ + * $Revision: 1.9 $ */ #define MDSK_WRITE_REQ 0x01 @@ -44,27 +44,6 @@ struct dasd_diag_characteristics { u8 rdev_features; } __attribute__ ((packed, aligned(4))); -struct dasd_diag_cms_label { - u8 label_id[4]; - u8 vol_id[6]; - u16 version_id; - u32 block_size; - u32 origin_ptr; - u32 usable_count; - u32 formatted_count; - u32 block_count; - u32 used_count; - u32 fst_size; - u32 fst_count; - u8 format_date[6]; - u8 reserved1[2]; - u32 disk_offset; - u32 map_block; - u32 hblk_disp; - u32 user_disp; - u8 reserved2[4]; - u8 segment_name[8]; -} __attribute__ ((packed)); #ifdef CONFIG_ARCH_S390X #define DASD_DIAG_FLAGA_DEFAULT DASD_DIAG_FLAGA_FORMAT_64BIT -- cgit v1.2.3 From 6810a2bce3aa6573faa9920487274f166fe95c6e Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Fri, 6 Jan 2006 00:19:13 -0800 Subject: [PATCH] s390: re-activated path detection If we receive path not operational indications (pnom in pmcw nonzero), we switch off those paths. To catch them becoming available again, we have to recalculate the lpm from the pmcw each time we start path verification. Signed-off-by: Cornelia Huck Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/s390/cio/device_pgid.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'drivers') diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 0adac8a67331..757b2706d5a9 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c @@ -22,6 +22,7 @@ #include "cio_debug.h" #include "css.h" #include "device.h" +#include "ioasm.h" /* * Start Sense Path Group ID helper function. Used in ccw_device_recog @@ -364,8 +365,22 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event) void ccw_device_verify_start(struct ccw_device *cdev) { + struct subchannel *sch = to_subchannel(cdev->dev.parent); + cdev->private->flags.pgid_single = 0; cdev->private->iretry = 5; + /* + * Update sch->lpm with current values to catch paths becoming + * available again. + */ + if (stsch(sch->irq, &sch->schib)) { + ccw_device_verify_done(cdev, -ENODEV); + return; + } + sch->lpm = sch->schib.pmcw.pim & + sch->schib.pmcw.pam & + sch->schib.pmcw.pom & + sch->opm; __ccw_device_verify_start(cdev); } -- cgit v1.2.3 From cfb1b55595a0dfd87b5849e8d0216c029f34445f Mon Sep 17 00:00:00 2001 From: Carsten Otte Date: Fri, 6 Jan 2006 00:19:14 -0800 Subject: [PATCH] s390: move s390_root_dev_* out of the cio layer Extract the s390_root_dev_* functions from the common I/O layer as they are also used by non-ccw device drivers. Signed-off-by: Carsten Otte Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/s390/Makefile | 2 +- drivers/s390/block/dcssblk.c | 2 +- drivers/s390/cio/css.c | 41 ---------------------------------- drivers/s390/net/cu3088.c | 3 ++- drivers/s390/net/iucv.c | 2 +- drivers/s390/net/qeth_main.c | 1 + drivers/s390/s390_rdev.c | 53 ++++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 59 insertions(+), 45 deletions(-) create mode 100644 drivers/s390/s390_rdev.c (limited to 'drivers') diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile index c99a2fe92fb0..9803c9352d78 100644 --- a/drivers/s390/Makefile +++ b/drivers/s390/Makefile @@ -2,7 +2,7 @@ # Makefile for the S/390 specific device drivers # -obj-y += s390mach.o sysinfo.o +obj-y += s390mach.o sysinfo.o s390_rdev.o obj-y += cio/ block/ char/ crypto/ net/ scsi/ drivers-y += drivers/s390/built-in.o diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 4fde41188996..2e727f49ad19 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -15,7 +15,7 @@ #include #include #include -#include // for s390_root_dev_(un)register() +#include //#define DCSSBLK_DEBUG /* Debug messages on/off */ #define DCSSBLK_NAME "dcssblk" diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 555119cacc27..7e4d57b4266f 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -481,45 +481,6 @@ struct bus_type css_bus_type = { subsys_initcall(init_channel_subsystem); -/* - * Register root devices for some drivers. The release function must not be - * in the device drivers, so we do it here. - */ -static void -s390_root_dev_release(struct device *dev) -{ - kfree(dev); -} - -struct device * -s390_root_dev_register(const char *name) -{ - struct device *dev; - int ret; - - if (!strlen(name)) - return ERR_PTR(-EINVAL); - dev = kmalloc(sizeof(struct device), GFP_KERNEL); - if (!dev) - return ERR_PTR(-ENOMEM); - memset(dev, 0, sizeof(struct device)); - strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE)); - dev->release = s390_root_dev_release; - ret = device_register(dev); - if (ret) { - kfree(dev); - return ERR_PTR(ret); - } - return dev; -} - -void -s390_root_dev_unregister(struct device *dev) -{ - if (dev) - device_unregister(dev); -} - int css_enqueue_subchannel_slow(unsigned long schid) { @@ -564,6 +525,4 @@ css_slow_subchannels_exist(void) MODULE_LICENSE("GPL"); EXPORT_SYMBOL(css_bus_type); -EXPORT_SYMBOL(s390_root_dev_register); -EXPORT_SYMBOL(s390_root_dev_unregister); EXPORT_SYMBOL_GPL(css_characteristics_avail); diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c index 0075894c71db..77dacb465732 100644 --- a/drivers/s390/net/cu3088.c +++ b/drivers/s390/net/cu3088.c @@ -1,5 +1,5 @@ /* - * $Id: cu3088.c,v 1.35 2005/03/30 19:28:52 richtera Exp $ + * $Id: cu3088.c,v 1.36 2005/10/25 14:37:17 cohuck Exp $ * * CTC / LCS ccw_device driver * @@ -27,6 +27,7 @@ #include #include +#include #include #include diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c index ecb2f8fd7873..ea8177392564 100644 --- a/drivers/s390/net/iucv.c +++ b/drivers/s390/net/iucv.c @@ -54,7 +54,7 @@ #include #include #include -#include //for root device stuff +#include /* FLAGS: * All flags are defined in the field IPFLAGS1 of each function diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 7b2663f27817..97f927c01a82 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c @@ -65,6 +65,7 @@ #include #include #include +#include #include "qeth.h" #include "qeth_mpc.h" diff --git a/drivers/s390/s390_rdev.c b/drivers/s390/s390_rdev.c new file mode 100644 index 000000000000..566cc3d185b6 --- /dev/null +++ b/drivers/s390/s390_rdev.c @@ -0,0 +1,53 @@ +/* + * drivers/s390/s390_rdev.c + * s390 root device + * $Revision: 1.2 $ + * + * Copyright (C) 2002, 2005 IBM Deutschland Entwicklung GmbH, + * IBM Corporation + * Author(s): Cornelia Huck (cohuck@de.ibm.com) + * Carsten Otte (cotte@de.ibm.com) + */ + +#include +#include +#include +#include + +static void +s390_root_dev_release(struct device *dev) +{ + kfree(dev); +} + +struct device * +s390_root_dev_register(const char *name) +{ + struct device *dev; + int ret; + + if (!strlen(name)) + return ERR_PTR(-EINVAL); + dev = kmalloc(sizeof(struct device), GFP_KERNEL); + if (!dev) + return ERR_PTR(-ENOMEM); + memset(dev, 0, sizeof(struct device)); + strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE)); + dev->release = s390_root_dev_release; + ret = device_register(dev); + if (ret) { + kfree(dev); + return ERR_PTR(ret); + } + return dev; +} + +void +s390_root_dev_unregister(struct device *dev) +{ + if (dev) + device_unregister(dev); +} + +EXPORT_SYMBOL(s390_root_dev_register); +EXPORT_SYMBOL(s390_root_dev_unregister); -- cgit v1.2.3 From 9a7af289660dc749d7c58234191601046a9bf488 Mon Sep 17 00:00:00 2001 From: Horst Hummel Date: Fri, 6 Jan 2006 00:19:14 -0800 Subject: [PATCH] s390: BIODASDPRRD ioctl return code The IOCTL BIODASDPRRD had no return code for 'profiling is inactive' and therefore tunedasd wrote misleading message for request-counter = 0. Introduce return-code EIO for inactive profiling. Signed-off-by: Horst Hummel Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/s390/block/dasd_ioctl.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index 789595b3fa09..044b75371990 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c @@ -7,7 +7,7 @@ * Bugreports.to..: * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 * - * $Revision: 1.47 $ + * $Revision: 1.50 $ * * i/o controls for the dasd driver. */ @@ -352,6 +352,9 @@ dasd_ioctl_read_profile(struct block_device *bdev, int no, long args) if (device == NULL) return -ENODEV; + if (dasd_profile_level == DASD_PROFILE_OFF) + return -EIO; + if (copy_to_user((long __user *) args, (long *) &device->profile, sizeof (struct dasd_profile_info_t))) return -EFAULT; -- cgit v1.2.3 From 1c01b8a5963aec60488c1c97d67cffd8b5275e3f Mon Sep 17 00:00:00 2001 From: Horst Hummel Date: Fri, 6 Jan 2006 00:19:15 -0800 Subject: [PATCH] s390: dasd failfast support To properly support multipath-failover handling, the linux block layer has introduced a special request flag, 'REQ_FAILFAST'. This flag is now used to return requests immediately in case the device is not operational. Signed-off-by: Horst Hummel Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/s390/block/dasd.c | 28 ++++++++++++++++++++-------- drivers/s390/block/dasd_diag.c | 4 +++- drivers/s390/block/dasd_eckd.c | 7 ++++++- drivers/s390/block/dasd_fba.c | 4 +++- drivers/s390/block/dasd_int.h | 3 ++- 5 files changed, 34 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 62787393a209..1141a5963b67 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -7,7 +7,7 @@ * Bugreports.to..: * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 * - * $Revision: 1.169 $ + * $Revision: 1.172 $ */ #include @@ -1224,6 +1224,12 @@ __dasd_start_head(struct dasd_device * device) if (list_empty(&device->ccw_queue)) return; cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); + /* check FAILFAST */ + if (device->stopped & ~DASD_STOPPED_PENDING && + test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags)) { + cqr->status = DASD_CQR_FAILED; + dasd_schedule_bh(device); + } if ((cqr->status == DASD_CQR_QUEUED) && (!device->stopped)) { /* try to start the first I/O that can be started */ @@ -1750,8 +1756,10 @@ dasd_exit(void) * SECTION: common functions for ccw_driver use */ -/* initial attempt at a probe function. this can be simplified once - * the other detection code is gone */ +/* + * Initial attempt at a probe function. this can be simplified once + * the other detection code is gone. + */ int dasd_generic_probe (struct ccw_device *cdev, struct dasd_discipline *discipline) @@ -1770,8 +1778,10 @@ dasd_generic_probe (struct ccw_device *cdev, return ret; } -/* this will one day be called from a global not_oper handler. - * It is also used by driver_unregister during module unload */ +/* + * This will one day be called from a global not_oper handler. + * It is also used by driver_unregister during module unload. + */ void dasd_generic_remove (struct ccw_device *cdev) { @@ -1798,9 +1808,11 @@ dasd_generic_remove (struct ccw_device *cdev) dasd_delete_device(device); } -/* activate a device. This is called from dasd_{eckd,fba}_probe() when either +/* + * Activate a device. This is called from dasd_{eckd,fba}_probe() when either * the device is detected for the first time and is supposed to be used - * or the user has started activation through sysfs */ + * or the user has started activation through sysfs. + */ int dasd_generic_set_online (struct ccw_device *cdev, struct dasd_discipline *discipline) @@ -1917,7 +1929,6 @@ dasd_generic_notify(struct ccw_device *cdev, int event) if (cqr->status == DASD_CQR_IN_IO) cqr->status = DASD_CQR_FAILED; device->stopped |= DASD_STOPPED_DC_EIO; - dasd_schedule_bh(device); } else { list_for_each_entry(cqr, &device->ccw_queue, list) if (cqr->status == DASD_CQR_IN_IO) { @@ -1927,6 +1938,7 @@ dasd_generic_notify(struct ccw_device *cdev, int event) device->stopped |= DASD_STOPPED_DC_WAIT; dasd_set_timer(device, 0); } + dasd_schedule_bh(device); ret = 1; break; case CIO_OPER: diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 16c4b7d94bd7..a33d4064b537 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -6,7 +6,7 @@ * Bugreports.to..: * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 * - * $Revision: 1.52 $ + * $Revision: 1.53 $ */ #include @@ -549,6 +549,8 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req) } cqr->retries = DIAG_MAX_RETRIES; cqr->buildclk = get_clock(); + if (req->flags & REQ_FAILFAST) + set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->device = device; cqr->expires = DIAG_TIMEOUT; cqr->status = DASD_CQR_FILLED; diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 811060e10c00..efc4cf62496e 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -7,7 +7,7 @@ * Bugreports.to..: * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 * - * $Revision: 1.71 $ + * $Revision: 1.74 $ */ #include @@ -1136,6 +1136,8 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req) recid++; } } + if (req->flags & REQ_FAILFAST) + set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->device = device; cqr->expires = 5 * 60 * HZ; /* 5 minutes */ cqr->lpm = private->path_data.ppm; @@ -1252,6 +1254,7 @@ dasd_eckd_release(struct block_device *bdev, int no, long args) cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; cqr->device = device; clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); + set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->retries = 0; cqr->expires = 2 * HZ; cqr->buildclk = get_clock(); @@ -1296,6 +1299,7 @@ dasd_eckd_reserve(struct block_device *bdev, int no, long args) cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; cqr->device = device; clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); + set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->retries = 0; cqr->expires = 2 * HZ; cqr->buildclk = get_clock(); @@ -1339,6 +1343,7 @@ dasd_eckd_steal_lock(struct block_device *bdev, int no, long args) cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; cqr->device = device; clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); + set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->retries = 0; cqr->expires = 2 * HZ; cqr->buildclk = get_clock(); diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 28cb4613b7f5..9bac8d87a9cc 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -4,7 +4,7 @@ * Bugreports.to..: * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 * - * $Revision: 1.40 $ + * $Revision: 1.41 $ */ #include @@ -352,6 +352,8 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req) recid++; } } + if (req->flags & REQ_FAILFAST) + set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->device = device; cqr->expires = 5 * 60 * HZ; /* 5 minutes */ cqr->retries = 32; diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 9fab04f3056d..2fb05c4a528c 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -6,7 +6,7 @@ * Bugreports.to..: * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 * - * $Revision: 1.65 $ + * $Revision: 1.68 $ */ #ifndef DASD_INT_H @@ -208,6 +208,7 @@ struct dasd_ccw_req { /* per dasd_ccw_req flags */ #define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */ +#define DASD_CQR_FLAGS_FAILFAST 1 /* FAILFAST */ /* Signature for error recovery functions. */ typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *); -- cgit v1.2.3 From 8129ee164267dc030b8e1d541ee3643c0b9f2fa1 Mon Sep 17 00:00:00 2001 From: Frank Pavlic Date: Fri, 6 Jan 2006 00:19:20 -0800 Subject: [PATCH] s390: qdio V=V pass-through New feature V=V qdio pass-through. QDIO and HiperSockets processing in z/VM V=V guest environments (as well as V=R with z/VM running in LPAR mode) requires shadowing of all QDIO architecture queue elements. Especially the shadowing of SBALs and SLSBs structures in the hypervisor, and the need to issue SIGA SYNC operations to observe state changes, eventually causes significant CPU processing overhead in the hypervisor. The QDIO pass-through support for V=V guests avoids the shadowing of SBALs and SLSBs. This significantly reduces the hypervisor overhead for QDIO based I/O. Signed-off-by: Frank Pavlic Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/s390/cio/chsc.h | 4 +- drivers/s390/cio/qdio.c | 589 ++++++++++++++++++++++++++++++++++++++---------- drivers/s390/cio/qdio.h | 104 ++++++--- 3 files changed, 547 insertions(+), 150 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index be20da49d147..69450134bec7 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -43,7 +43,9 @@ struct css_general_char { u32 ext_mb : 1; /* bit 48 */ u32 : 7; u32 aif_tdd : 1; /* bit 56 */ - u32 : 10; + u32 : 1; + u32 qebsm : 1; /* bit 58 */ + u32 : 8; u32 aif_osa : 1; /* bit 67 */ u32 : 28; }__attribute__((packed)); diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index eb39218b925e..e8bdfcd1d02a 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c @@ -56,7 +56,7 @@ #include "ioasm.h" #include "chsc.h" -#define VERSION_QDIO_C "$Revision: 1.108 $" +#define VERSION_QDIO_C "$Revision: 1.113 $" /****************** MODULE PARAMETER VARIABLES ********************/ MODULE_AUTHOR("Utz Bacher "); @@ -76,6 +76,7 @@ static struct qdio_perf_stats perf_stats; #endif /* QDIO_PERFORMANCE_STATS */ static int hydra_thinints; +static int is_passthrough = 0; static int omit_svs; static int indicator_used[INDICATORS_PER_CACHELINE]; @@ -136,12 +137,126 @@ qdio_release_q(struct qdio_q *q) atomic_dec(&q->use_count); } -static volatile inline void -qdio_set_slsb(volatile char *slsb, unsigned char value) +/*check ccq */ +static inline int +qdio_check_ccq(struct qdio_q *q, unsigned int ccq) { - xchg((char*)slsb,value); + char dbf_text[15]; + + if (ccq == 0 || ccq == 32 || ccq == 96) + return 0; + if (ccq == 97) + return 1; + /*notify devices immediately*/ + sprintf(dbf_text,"%d", ccq); + QDIO_DBF_TEXT2(1,trace,dbf_text); + return -EIO; } +/* EQBS: extract buffer states */ +static inline int +qdio_do_eqbs(struct qdio_q *q, unsigned char *state, + unsigned int *start, unsigned int *cnt) +{ + struct qdio_irq *irq; + unsigned int tmp_cnt, q_no, ccq; + int rc ; + char dbf_text[15]; + ccq = 0; + tmp_cnt = *cnt; + irq = (struct qdio_irq*)q->irq_ptr; + q_no = q->q_no; + if(!q->is_input_q) + q_no += irq->no_input_qs; + ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt); + rc = qdio_check_ccq(q, ccq); + if (rc < 0) { + QDIO_DBF_TEXT2(1,trace,"eqberr"); + sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no); + QDIO_DBF_TEXT2(1,trace,dbf_text); + q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION| + QDIO_STATUS_LOOK_FOR_ERROR, + 0, 0, 0, -1, -1, q->int_parm); + return 0; + } + return (tmp_cnt - *cnt); +} + +/* SQBS: set buffer states */ +static inline int +qdio_do_sqbs(struct qdio_q *q, unsigned char state, + unsigned int *start, unsigned int *cnt) +{ + struct qdio_irq *irq; + unsigned int tmp_cnt, q_no, ccq; + int rc; + char dbf_text[15]; + + ccq = 0; + tmp_cnt = *cnt; + irq = (struct qdio_irq*)q->irq_ptr; + q_no = q->q_no; + if(!q->is_input_q) + q_no += irq->no_input_qs; + ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt); + rc = qdio_check_ccq(q, ccq); + if (rc < 0) { + QDIO_DBF_TEXT3(1,trace,"sqberr"); + sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt,*cnt,ccq,q_no); + QDIO_DBF_TEXT3(1,trace,dbf_text); + q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION| + QDIO_STATUS_LOOK_FOR_ERROR, + 0, 0, 0, -1, -1, q->int_parm); + return 0; + } + return (tmp_cnt - *cnt); +} + +static inline int +qdio_set_slsb(struct qdio_q *q, unsigned int *bufno, + unsigned char state, unsigned int *count) +{ + volatile char *slsb; + struct qdio_irq *irq; + + irq = (struct qdio_irq*)q->irq_ptr; + if (!irq->is_qebsm) { + slsb = (char *)&q->slsb.acc.val[(*bufno)]; + xchg(slsb, state); + return 1; + } + return qdio_do_sqbs(q, state, bufno, count); +} + +#ifdef CONFIG_QDIO_DEBUG +static inline void +qdio_trace_slsb(struct qdio_q *q) +{ + if (q->queue_type==QDIO_TRACE_QTYPE) { + if (q->is_input_q) + QDIO_DBF_HEX2(0,slsb_in,&q->slsb, + QDIO_MAX_BUFFERS_PER_Q); + else + QDIO_DBF_HEX2(0,slsb_out,&q->slsb, + QDIO_MAX_BUFFERS_PER_Q); + } +} +#endif + +static inline int +set_slsb(struct qdio_q *q, unsigned int *bufno, + unsigned char state, unsigned int *count) +{ + int rc; +#ifdef CONFIG_QDIO_DEBUG + qdio_trace_slsb(q); +#endif + rc = qdio_set_slsb(q, bufno, state, count); +#ifdef CONFIG_QDIO_DEBUG + qdio_trace_slsb(q); +#endif + return rc; +} static inline int qdio_siga_sync(struct qdio_q *q, unsigned int gpr2, unsigned int gpr3) @@ -155,7 +270,7 @@ qdio_siga_sync(struct qdio_q *q, unsigned int gpr2, perf_stats.siga_syncs++; #endif /* QDIO_PERFORMANCE_STATS */ - cc = do_siga_sync(q->irq, gpr2, gpr3); + cc = do_siga_sync(0x10000|q->irq, gpr2, gpr3); if (cc) QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); @@ -170,6 +285,19 @@ qdio_siga_sync_q(struct qdio_q *q) return qdio_siga_sync(q, q->mask, 0); } +static int +__do_siga_output(struct qdio_q *q, unsigned int *busy_bit) +{ + struct qdio_irq *irq; + unsigned int fc = 0; + + irq = (struct qdio_irq *) q->irq_ptr; + if (!irq->is_qebsm) + return do_siga_output(0x10000|q->irq, q->mask, busy_bit, fc); + fc |= 0x80; + return do_siga_output(irq->sch_token, q->mask, busy_bit, fc); +} + /* * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns * an access exception @@ -189,7 +317,7 @@ qdio_siga_output(struct qdio_q *q) QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); for (;;) { - cc = do_siga_output(q->irq, q->mask, &busy_bit); + cc = __do_siga_output(q, &busy_bit); //QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit); if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) { if (!start_time) @@ -221,7 +349,7 @@ qdio_siga_input(struct qdio_q *q) perf_stats.siga_ins++; #endif /* QDIO_PERFORMANCE_STATS */ - cc = do_siga_input(q->irq, q->mask); + cc = do_siga_input(0x10000|q->irq, q->mask); if (cc) QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); @@ -230,7 +358,7 @@ qdio_siga_input(struct qdio_q *q) } /* locked by the locks in qdio_activate and qdio_cleanup */ -static __u32 volatile * +static __u32 * qdio_get_indicator(void) { int i; @@ -258,7 +386,7 @@ qdio_put_indicator(__u32 *addr) atomic_dec(&spare_indicator_usecount); } -static inline volatile void +static inline void tiqdio_clear_summary_bit(__u32 *location) { QDIO_DBF_TEXT5(0,trace,"clrsummb"); @@ -267,7 +395,7 @@ tiqdio_clear_summary_bit(__u32 *location) xchg(location,0); } -static inline volatile void +static inline void tiqdio_set_summary_bit(__u32 *location) { QDIO_DBF_TEXT5(0,trace,"setsummb"); @@ -336,7 +464,9 @@ static inline int qdio_stop_polling(struct qdio_q *q) { #ifdef QDIO_USE_PROCESSING_STATE - int gsf; + unsigned int tmp, gsf, count = 1; + unsigned char state = 0; + struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; if (!atomic_swap(&q->polling,0)) return 1; @@ -348,17 +478,22 @@ qdio_stop_polling(struct qdio_q *q) if (!q->is_input_q) return 1; - gsf=GET_SAVED_FRONTIER(q); - set_slsb(&q->slsb.acc.val[(gsf+QDIO_MAX_BUFFERS_PER_Q-1)& - (QDIO_MAX_BUFFERS_PER_Q-1)], - SLSB_P_INPUT_NOT_INIT); + tmp = gsf = GET_SAVED_FRONTIER(q); + tmp = ((tmp + QDIO_MAX_BUFFERS_PER_Q-1) & (QDIO_MAX_BUFFERS_PER_Q-1) ); + set_slsb(q, &tmp, SLSB_P_INPUT_NOT_INIT, &count); + /* * we don't issue this SYNC_MEMORY, as we trust Rick T and * moreover will not use the PROCESSING state under VM, so * q->polling was 0 anyway */ /*SYNC_MEMORY;*/ - if (q->slsb.acc.val[gsf]!=SLSB_P_INPUT_PRIMED) + if (irq->is_qebsm) { + count = 1; + qdio_do_eqbs(q, &state, &gsf, &count); + } else + state = q->slsb.acc.val[gsf]; + if (state != SLSB_P_INPUT_PRIMED) return 1; /* * set our summary bit again, as otherwise there is a @@ -431,18 +566,136 @@ tiqdio_clear_global_summary(void) /************************* OUTBOUND ROUTINES *******************************/ +static int +qdio_qebsm_get_outbound_buffer_frontier(struct qdio_q *q) +{ + struct qdio_irq *irq; + unsigned char state; + unsigned int cnt, count, ftc; + + irq = (struct qdio_irq *) q->irq_ptr; + if ((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis)) + SYNC_MEMORY; + + ftc = q->first_to_check; + count = qdio_min(atomic_read(&q->number_of_buffers_used), + (QDIO_MAX_BUFFERS_PER_Q-1)); + if (count == 0) + return q->first_to_check; + cnt = qdio_do_eqbs(q, &state, &ftc, &count); + if (cnt == 0) + return q->first_to_check; + switch (state) { + case SLSB_P_OUTPUT_ERROR: + QDIO_DBF_TEXT3(0,trace,"outperr"); + atomic_sub(cnt , &q->number_of_buffers_used); + if (q->qdio_error) + q->error_status_flags |= + QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR; + q->qdio_error = SLSB_P_OUTPUT_ERROR; + q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR; + q->first_to_check = ftc; + break; + case SLSB_P_OUTPUT_EMPTY: + QDIO_DBF_TEXT5(0,trace,"outpempt"); + atomic_sub(cnt, &q->number_of_buffers_used); + q->first_to_check = ftc; + break; + case SLSB_CU_OUTPUT_PRIMED: + /* all buffers primed */ + QDIO_DBF_TEXT5(0,trace,"outpprim"); + break; + default: + break; + } + QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); + return q->first_to_check; +} + +static int +qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q) +{ + struct qdio_irq *irq; + unsigned char state; + int tmp, ftc, count, cnt; + char dbf_text[15]; + + + irq = (struct qdio_irq *) q->irq_ptr; + ftc = q->first_to_check; + count = qdio_min(atomic_read(&q->number_of_buffers_used), + (QDIO_MAX_BUFFERS_PER_Q-1)); + if (count == 0) + return q->first_to_check; + cnt = qdio_do_eqbs(q, &state, &ftc, &count); + if (cnt == 0) + return q->first_to_check; + switch (state) { + case SLSB_P_INPUT_ERROR : +#ifdef CONFIG_QDIO_DEBUG + QDIO_DBF_TEXT3(1,trace,"inperr"); + sprintf(dbf_text,"%2x,%2x",ftc,count); + QDIO_DBF_TEXT3(1,trace,dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + if (q->qdio_error) + q->error_status_flags |= + QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR; + q->qdio_error = SLSB_P_INPUT_ERROR; + q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR; + atomic_sub(cnt, &q->number_of_buffers_used); + q->first_to_check = ftc; + break; + case SLSB_P_INPUT_PRIMED : + QDIO_DBF_TEXT3(0,trace,"inptprim"); + sprintf(dbf_text,"%2x,%2x",ftc,count); + QDIO_DBF_TEXT3(1,trace,dbf_text); + tmp = 0; + ftc = q->first_to_check; +#ifdef QDIO_USE_PROCESSING_STATE + if (cnt > 1) { + cnt -= 1; + tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt); + if (!tmp) + break; + } + cnt = 1; + tmp += set_slsb(q, &ftc, + SLSB_P_INPUT_PROCESSING, &cnt); + atomic_set(&q->polling, 1); +#else + tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt); +#endif + atomic_sub(tmp, &q->number_of_buffers_used); + q->first_to_check = ftc; + break; + case SLSB_CU_INPUT_EMPTY: + case SLSB_P_INPUT_NOT_INIT: + case SLSB_P_INPUT_PROCESSING: + QDIO_DBF_TEXT5(0,trace,"inpnipro"); + break; + default: + break; + } + QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); + return q->first_to_check; +} static inline int qdio_get_outbound_buffer_frontier(struct qdio_q *q) { - int f,f_mod_no; - volatile char *slsb; - int first_not_to_check; + struct qdio_irq *irq; + volatile char *slsb; + unsigned int count = 1; + int first_not_to_check, f, f_mod_no; char dbf_text[15]; QDIO_DBF_TEXT4(0,trace,"getobfro"); QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + irq = (struct qdio_irq *) q->irq_ptr; + if (irq->is_qebsm) + return qdio_qebsm_get_outbound_buffer_frontier(q); + slsb=&q->slsb.acc.val[0]; f_mod_no=f=q->first_to_check; /* @@ -484,7 +737,7 @@ check_next: QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256); /* kind of process the buffer */ - set_slsb(&q->slsb.acc.val[f_mod_no], SLSB_P_OUTPUT_NOT_INIT); + set_slsb(q, &f_mod_no, SLSB_P_OUTPUT_NOT_INIT, &count); /* * we increment the frontier, as this buffer @@ -597,48 +850,48 @@ qdio_kick_outbound_q(struct qdio_q *q) result=qdio_siga_output(q); - switch (result) { - case 0: - /* went smooth this time, reset timestamp */ + switch (result) { + case 0: + /* went smooth this time, reset timestamp */ #ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT3(0,trace,"cc2reslv"); - sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no, - atomic_read(&q->busy_siga_counter)); - QDIO_DBF_TEXT3(0,trace,dbf_text); + QDIO_DBF_TEXT3(0,trace,"cc2reslv"); + sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no, + atomic_read(&q->busy_siga_counter)); + QDIO_DBF_TEXT3(0,trace,dbf_text); #endif /* CONFIG_QDIO_DEBUG */ - q->timing.busy_start=0; + q->timing.busy_start=0; + break; + case (2|QDIO_SIGA_ERROR_B_BIT_SET): + /* cc=2 and busy bit: */ + atomic_inc(&q->busy_siga_counter); + + /* if the last siga was successful, save + * timestamp here */ + if (!q->timing.busy_start) + q->timing.busy_start=NOW; + + /* if we're in time, don't touch error_status_flags + * and siga_error */ + if (NOW-q->timing.busy_startbusy_siga_counter); - - /* if the last siga was successful, save - * timestamp here */ - if (!q->timing.busy_start) - q->timing.busy_start=NOW; - - /* if we're in time, don't touch error_status_flags - * and siga_error */ - if (NOW-q->timing.busy_startirq,q->q_no, - atomic_read(&q->busy_siga_counter)); - QDIO_DBF_TEXT3(0,trace,dbf_text); + sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no, + atomic_read(&q->busy_siga_counter)); + QDIO_DBF_TEXT3(0,trace,dbf_text); #endif /* CONFIG_QDIO_DEBUG */ - /* else fallthrough and report error */ - default: - /* for plain cc=1, 2 or 3: */ - if (q->siga_error) - q->error_status_flags|= - QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR; + /* else fallthrough and report error */ + default: + /* for plain cc=1, 2 or 3: */ + if (q->siga_error) q->error_status_flags|= - QDIO_STATUS_LOOK_FOR_ERROR; - q->siga_error=result; - } + QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR; + q->error_status_flags|= + QDIO_STATUS_LOOK_FOR_ERROR; + q->siga_error=result; + } } static inline void @@ -743,8 +996,10 @@ qdio_outbound_processing(struct qdio_q *q) static inline int qdio_get_inbound_buffer_frontier(struct qdio_q *q) { + struct qdio_irq *irq; int f,f_mod_no; volatile char *slsb; + unsigned int count = 1; int first_not_to_check; #ifdef CONFIG_QDIO_DEBUG char dbf_text[15]; @@ -756,6 +1011,10 @@ qdio_get_inbound_buffer_frontier(struct qdio_q *q) QDIO_DBF_TEXT4(0,trace,"getibfro"); QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + irq = (struct qdio_irq *) q->irq_ptr; + if (irq->is_qebsm) + return qdio_qebsm_get_inbound_buffer_frontier(q); + slsb=&q->slsb.acc.val[0]; f_mod_no=f=q->first_to_check; /* @@ -792,19 +1051,19 @@ check_next: * kill VM in terms of CP overhead */ if (q->siga_sync) { - set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT); + set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count); } else { /* set the previous buffer to NOT_INIT. The current * buffer will be set to PROCESSING at the end of * this function to avoid further interrupts. */ if (last_position>=0) - set_slsb(&slsb[last_position], - SLSB_P_INPUT_NOT_INIT); + set_slsb(q, &last_position, + SLSB_P_INPUT_NOT_INIT, &count); atomic_set(&q->polling,1); last_position=f_mod_no; } #else /* QDIO_USE_PROCESSING_STATE */ - set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT); + set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count); #endif /* QDIO_USE_PROCESSING_STATE */ /* * not needed, as the inbound queue will be synced on the next @@ -829,7 +1088,7 @@ check_next: QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256); /* kind of process the buffer */ - set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT); + set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count); if (q->qdio_error) q->error_status_flags|= @@ -857,7 +1116,7 @@ out: #ifdef QDIO_USE_PROCESSING_STATE if (last_position>=0) - set_slsb(&slsb[last_position],SLSB_P_INPUT_PROCESSING); + set_slsb(q, &last_position, SLSB_P_INPUT_NOT_INIT, &count); #endif /* QDIO_USE_PROCESSING_STATE */ QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); @@ -902,6 +1161,10 @@ static inline int tiqdio_is_inbound_q_done(struct qdio_q *q) { int no_used; + unsigned int start_buf, count; + unsigned char state = 0; + struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; + #ifdef CONFIG_QDIO_DEBUG char dbf_text[15]; #endif @@ -927,8 +1190,13 @@ tiqdio_is_inbound_q_done(struct qdio_q *q) if (!q->siga_sync) /* we'll check for more primed buffers in qeth_stop_polling */ return 0; - - if (q->slsb.acc.val[q->first_to_check]!=SLSB_P_INPUT_PRIMED) + if (irq->is_qebsm) { + count = 1; + start_buf = q->first_to_check; + qdio_do_eqbs(q, &state, &start_buf, &count); + } else + state = q->slsb.acc.val[q->first_to_check]; + if (state != SLSB_P_INPUT_PRIMED) /* * nothing more to do, if next buffer is not PRIMED. * note that we did a SYNC_MEMORY before, that there @@ -955,6 +1223,10 @@ static inline int qdio_is_inbound_q_done(struct qdio_q *q) { int no_used; + unsigned int start_buf, count; + unsigned char state = 0; + struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; + #ifdef CONFIG_QDIO_DEBUG char dbf_text[15]; #endif @@ -973,8 +1245,13 @@ qdio_is_inbound_q_done(struct qdio_q *q) QDIO_DBF_TEXT4(0,trace,dbf_text); return 1; } - - if (q->slsb.acc.val[q->first_to_check]==SLSB_P_INPUT_PRIMED) { + if (irq->is_qebsm) { + count = 1; + start_buf = q->first_to_check; + qdio_do_eqbs(q, &state, &start_buf, &count); + } else + state = q->slsb.acc.val[q->first_to_check]; + if (state == SLSB_P_INPUT_PRIMED) { /* we got something to do */ QDIO_DBF_TEXT4(0,trace,"inqisntA"); QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); @@ -1523,11 +1800,11 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev, QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); /* fill in slsb */ - for (j=0;jslsb.acc.val[j], - SLSB_P_INPUT_NOT_INIT); -/* q->sbal[j]->element[1].sbalf.i1.key=QDIO_STORAGE_KEY;*/ - } + if (!irq_ptr->is_qebsm) { + unsigned int count = 1; + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) + set_slsb(q, &j, SLSB_P_INPUT_NOT_INIT, &count); + } } for (i=0;islsb.acc.val[j], - SLSB_P_OUTPUT_NOT_INIT); -/* q->sbal[j]->element[1].sbalf.i1.key=QDIO_STORAGE_KEY;*/ - } + if (!irq_ptr->is_qebsm) { + unsigned int count = 1; + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) + set_slsb(q, &j, SLSB_P_OUTPUT_NOT_INIT, &count); + } } } @@ -1905,7 +2182,7 @@ int qdio_synchronize(struct ccw_device *cdev, unsigned int flags, unsigned int queue_number) { - int cc; + int cc = 0; struct qdio_q *q; struct qdio_irq *irq_ptr; void *ptr; @@ -1929,12 +2206,14 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags, q=irq_ptr->input_qs[queue_number]; if (!q) return -EINVAL; - cc = do_siga_sync(q->irq, 0, q->mask); + if (!(irq_ptr->is_qebsm)) + cc = do_siga_sync(0x10000|q->irq, 0, q->mask); } else if (flags&QDIO_FLAG_SYNC_OUTPUT) { q=irq_ptr->output_qs[queue_number]; if (!q) return -EINVAL; - cc = do_siga_sync(q->irq, q->mask, 0); + if (!(irq_ptr->is_qebsm)) + cc = do_siga_sync(0x10000|q->irq, q->mask, 0); } else return -EINVAL; @@ -1945,12 +2224,49 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags, return cc; } -static unsigned char -qdio_check_siga_needs(int sch) +static inline void +qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac, + unsigned long token) +{ + struct qdio_q *q; + int i; + unsigned int count, start_buf; + char dbf_text[15]; + + /*check if QEBSM is disabled */ + if (!(irq_ptr->is_qebsm) || !(qdioac & 0x01)) { + irq_ptr->is_qebsm = 0; + irq_ptr->sch_token = 0; + irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; + QDIO_DBF_TEXT0(0,setup,"noV=V"); + return; + } + irq_ptr->sch_token = token; + /*input queue*/ + for (i = 0; i < irq_ptr->no_input_qs;i++) { + q = irq_ptr->input_qs[i]; + count = QDIO_MAX_BUFFERS_PER_Q; + start_buf = 0; + set_slsb(q, &start_buf, SLSB_P_INPUT_NOT_INIT, &count); + } + sprintf(dbf_text,"V=V:%2x",irq_ptr->is_qebsm); + QDIO_DBF_TEXT0(0,setup,dbf_text); + sprintf(dbf_text,"%8lx",irq_ptr->sch_token); + QDIO_DBF_TEXT0(0,setup,dbf_text); + /*output queue*/ + for (i = 0; i < irq_ptr->no_output_qs; i++) { + q = irq_ptr->output_qs[i]; + count = QDIO_MAX_BUFFERS_PER_Q; + start_buf = 0; + set_slsb(q, &start_buf, SLSB_P_OUTPUT_NOT_INIT, &count); + } +} + +static void +qdio_get_ssqd_information(struct qdio_irq *irq_ptr) { int result; unsigned char qdioac; - struct { struct chsc_header request; u16 reserved1; @@ -1964,67 +2280,80 @@ qdio_check_siga_needs(int sch) u8 reserved5; u16 sch; u8 qfmt; - u8 reserved6; - u8 qdioac; + u8 parm; + u8 qdioac1; u8 sch_class; u8 reserved7; u8 icnt; u8 reserved8; u8 ocnt; + u8 reserved9; + u8 mbccnt; + u16 qdioac2; + u64 sch_token; } *ssqd_area; + QDIO_DBF_TEXT0(0,setup,"getssqd"); + qdioac = 0; ssqd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!ssqd_area) { QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \ - "SIGAs for sch x%x.\n", sch); - return CHSC_FLAG_SIGA_INPUT_NECESSARY || - CHSC_FLAG_SIGA_OUTPUT_NECESSARY || - CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ + "SIGAs for sch x%x.\n", irq_ptr->irq); + irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY || + CHSC_FLAG_SIGA_OUTPUT_NECESSARY || + CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ + irq_ptr->is_qebsm = 0; + irq_ptr->sch_token = 0; + irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; + return; } + ssqd_area->request = (struct chsc_header) { .length = 0x0010, .code = 0x0024, }; - - ssqd_area->first_sch = sch; - ssqd_area->last_sch = sch; - - result=chsc(ssqd_area); + ssqd_area->first_sch = irq_ptr->irq; + ssqd_area->last_sch = irq_ptr->irq; + result = chsc(ssqd_area); if (result) { QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \ "SIGAs for sch x%x.\n", - result,sch); + result, irq_ptr->irq); qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY || CHSC_FLAG_SIGA_OUTPUT_NECESSARY || CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ + irq_ptr->is_qebsm = 0; goto out; } if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) { QDIO_PRINT_WARN("response upon checking SIGA needs " \ "is 0x%x. Using all SIGAs for sch x%x.\n", - ssqd_area->response.code, sch); + ssqd_area->response.code, irq_ptr->irq); qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY || CHSC_FLAG_SIGA_OUTPUT_NECESSARY || CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ + irq_ptr->is_qebsm = 0; goto out; } if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) || !(ssqd_area->flags & CHSC_FLAG_VALIDITY) || - (ssqd_area->sch != sch)) { + (ssqd_area->sch != irq_ptr->irq)) { QDIO_PRINT_WARN("huh? problems checking out sch x%x... " \ - "using all SIGAs.\n",sch); + "using all SIGAs.\n",irq_ptr->irq); qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY | CHSC_FLAG_SIGA_OUTPUT_NECESSARY | CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */ + irq_ptr->is_qebsm = 0; goto out; } - - qdioac = ssqd_area->qdioac; + qdioac = ssqd_area->qdioac1; out: + qdio_check_subchannel_qebsm(irq_ptr, qdioac, + ssqd_area->sch_token); free_page ((unsigned long) ssqd_area); - return qdioac; + irq_ptr->qdioac = qdioac; } static unsigned int @@ -2055,6 +2384,13 @@ tiqdio_check_chsc_availability(void) sprintf(dbf_text,"hydrati%1x", hydra_thinints); QDIO_DBF_TEXT0(0,setup,dbf_text); +#ifdef CONFIG_ARCH_S390X + /* Check for QEBSM support in general (bit 58). */ + is_passthrough = css_general_characteristics.qebsm; +#endif + sprintf(dbf_text,"cssQBS:%1x", is_passthrough); + QDIO_DBF_TEXT0(0,setup,dbf_text); + /* Check for aif time delay disablement fac (bit 56). If installed, * omit svs even under lpar (good point by rick again) */ omit_svs = css_general_characteristics.aif_tdd; @@ -2698,7 +3034,7 @@ int qdio_fill_irq(struct qdio_initialize *init_data) QDIO_DBF_TEXT2(0,setup,dbf_text); if (irq_ptr->is_thinint_irq) { - irq_ptr->dev_st_chg_ind=qdio_get_indicator(); + irq_ptr->dev_st_chg_ind = qdio_get_indicator(); QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*)); if (!irq_ptr->dev_st_chg_ind) { QDIO_PRINT_WARN("no indicator location available " \ @@ -2747,6 +3083,10 @@ int qdio_fill_irq(struct qdio_initialize *init_data) irq_ptr->qdr->qkey=QDIO_STORAGE_KEY; /* fill in qib */ + irq_ptr->is_qebsm = is_passthrough; + if (irq_ptr->is_qebsm) + irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM; + irq_ptr->qib.qfmt=init_data->q_format; if (init_data->no_input_qs) irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib); @@ -2884,7 +3224,7 @@ qdio_establish(struct qdio_initialize *init_data) return -EIO; } - irq_ptr->qdioac=qdio_check_siga_needs(irq_ptr->irq); + qdio_get_ssqd_information(irq_ptr); /* if this gets set once, we're running under VM and can omit SVSes */ if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY) omit_svs=1; @@ -3015,30 +3355,40 @@ static inline void qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, unsigned int count, struct qdio_buffer *buffers) { + struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; + qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1); + if (irq->is_qebsm) { + while (count) + set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count); + return; + } for (;;) { - set_slsb(&q->slsb.acc.val[qidx],SLSB_CU_INPUT_EMPTY); + set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count); count--; if (!count) break; - qidx=(qidx+1)&(QDIO_MAX_BUFFERS_PER_Q-1); + qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1); } - - /* not necessary, as the queues are synced during the SIGA read */ - /*SYNC_MEMORY;*/ } static inline void qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, unsigned int count, struct qdio_buffer *buffers) { + struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; + + qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1); + if (irq->is_qebsm) { + while (count) + set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count); + return; + } + for (;;) { - set_slsb(&q->slsb.acc.val[qidx],SLSB_CU_OUTPUT_PRIMED); + set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count); count--; if (!count) break; - qidx=(qidx+1)&(QDIO_MAX_BUFFERS_PER_Q-1); + qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1); } - - /* SIGA write will sync the queues */ - /*SYNC_MEMORY;*/ } static inline void @@ -3083,6 +3433,9 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, struct qdio_buffer *buffers) { int used_elements; + unsigned int cnt, start_buf; + unsigned char state = 0; + struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; /* This is the outbound handling of queues */ #ifdef QDIO_PERFORMANCE_STATS @@ -3115,9 +3468,15 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, * SYNC_MEMORY :-/ ), we try to * fast-requeue buffers */ - if (q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1) - &(QDIO_MAX_BUFFERS_PER_Q-1)]!= - SLSB_CU_OUTPUT_PRIMED) { + if (irq->is_qebsm) { + cnt = 1; + start_buf = ((qidx+QDIO_MAX_BUFFERS_PER_Q-1) & + (QDIO_MAX_BUFFERS_PER_Q-1)); + qdio_do_eqbs(q, &state, &start_buf, &cnt); + } else + state = q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1) + &(QDIO_MAX_BUFFERS_PER_Q-1) ]; + if (state != SLSB_CU_OUTPUT_PRIMED) { qdio_kick_outbound_q(q); } else { QDIO_DBF_TEXT3(0,trace, "fast-req"); diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 328e31cc6854..b5d303e79a24 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -3,14 +3,13 @@ #include -#define VERSION_CIO_QDIO_H "$Revision: 1.33 $" +#define VERSION_CIO_QDIO_H "$Revision: 1.37 $" #ifdef CONFIG_QDIO_DEBUG #define QDIO_VERBOSE_LEVEL 9 #else /* CONFIG_QDIO_DEBUG */ #define QDIO_VERBOSE_LEVEL 5 #endif /* CONFIG_QDIO_DEBUG */ - #define QDIO_USE_PROCESSING_STATE #ifdef CONFIG_QDIO_PERF_STATS @@ -265,6 +264,58 @@ QDIO_PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ /* * Some instructions as assembly */ + +static inline int +do_sqbs(unsigned long sch, unsigned char state, int queue, + unsigned int *start, unsigned int *count) +{ +#ifdef CONFIG_ARCH_S390X + register unsigned long _ccq asm ("0") = *count; + register unsigned long _sch asm ("1") = sch; + unsigned long _queuestart = ((unsigned long)queue << 32) | *start; + + asm volatile ( + " .insn rsy,0xeb000000008A,%1,0,0(%2)\n\t" + : "+d" (_ccq), "+d" (_queuestart) + : "d" ((unsigned long)state), "d" (_sch) + : "memory", "cc" + ); + *count = _ccq & 0xff; + *start = _queuestart & 0xff; + + return (_ccq >> 32) & 0xff; +#else + return 0; +#endif +} + +static inline int +do_eqbs(unsigned long sch, unsigned char *state, int queue, + unsigned int *start, unsigned int *count) +{ +#ifdef CONFIG_ARCH_S390X + register unsigned long _ccq asm ("0") = *count; + register unsigned long _sch asm ("1") = sch; + unsigned long _queuestart = ((unsigned long)queue << 32) | *start; + unsigned long _state = 0; + + asm volatile ( + " .insn rrf,0xB99c0000,%1,%2,0,0 \n\t" + : "+d" (_ccq), "+d" (_queuestart), "+d" (_state) + : "d" (_sch) + : "memory", "cc" + ); + *count = _ccq & 0xff; + *start = _queuestart & 0xff; + *state = _state & 0xff; + + return (_ccq >> 32) & 0xff; +#else + return 0; +#endif +} + + static inline int do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2) { @@ -280,7 +331,7 @@ do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2) "ipm %0 \n\t" "srl %0,28 \n\t" : "=d" (cc) - : "d" (0x10000|irq), "d" (mask1), "d" (mask2) + : "d" (irq), "d" (mask1), "d" (mask2) : "cc", "0", "1", "2", "3" ); #else /* CONFIG_ARCH_S390X */ @@ -293,7 +344,7 @@ do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2) "ipm %0 \n\t" "srl %0,28 \n\t" : "=d" (cc) - : "d" (0x10000|irq), "d" (mask1), "d" (mask2) + : "d" (irq), "d" (mask1), "d" (mask2) : "cc", "0", "1", "2", "3" ); #endif /* CONFIG_ARCH_S390X */ @@ -314,7 +365,7 @@ do_siga_input(unsigned int irq, unsigned int mask) "ipm %0 \n\t" "srl %0,28 \n\t" : "=d" (cc) - : "d" (0x10000|irq), "d" (mask) + : "d" (irq), "d" (mask) : "cc", "0", "1", "2", "memory" ); #else /* CONFIG_ARCH_S390X */ @@ -326,7 +377,7 @@ do_siga_input(unsigned int irq, unsigned int mask) "ipm %0 \n\t" "srl %0,28 \n\t" : "=d" (cc) - : "d" (0x10000|irq), "d" (mask) + : "d" (irq), "d" (mask) : "cc", "0", "1", "2", "memory" ); #endif /* CONFIG_ARCH_S390X */ @@ -335,7 +386,8 @@ do_siga_input(unsigned int irq, unsigned int mask) } static inline int -do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb) +do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb, + unsigned int fc) { int cc; __u32 busy_bit; @@ -366,14 +418,14 @@ do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb) ".long 0b,2b \n\t" ".previous \n\t" : "=d" (cc), "=d" (busy_bit) - : "d" (0x10000|irq), "d" (mask), + : "d" (irq), "d" (mask), "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION) : "cc", "0", "1", "2", "memory" ); #else /* CONFIG_ARCH_S390X */ asm volatile ( - "lghi 0,0 \n\t" - "llgfr 1,%2 \n\t" + "llgfr 0,%5 \n\t" + "lgr 1,%2 \n\t" "llgfr 2,%3 \n\t" "siga 0 \n\t" "0:" @@ -391,8 +443,8 @@ do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb) ".quad 0b,1b \n\t" ".previous \n\t" : "=d" (cc), "=d" (busy_bit) - : "d" (0x10000|irq), "d" (mask), - "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION) + : "d" (irq), "d" (mask), + "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION), "d" (fc) : "cc", "0", "1", "2", "memory" ); #endif /* CONFIG_ARCH_S390X */ @@ -494,33 +546,12 @@ struct qdio_perf_stats { #define QDIO_GET_ADDR(x) ((__u32)(long)x) #endif /* CONFIG_ARCH_S390X */ -#ifdef CONFIG_QDIO_DEBUG -#define set_slsb(x,y) \ - if(q->queue_type==QDIO_TRACE_QTYPE) { \ - if(q->is_input_q) { \ - QDIO_DBF_HEX2(0,slsb_in,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \ - } else { \ - QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \ - } \ - } \ - qdio_set_slsb(x,y); \ - if(q->queue_type==QDIO_TRACE_QTYPE) { \ - if(q->is_input_q) { \ - QDIO_DBF_HEX2(0,slsb_in,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \ - } else { \ - QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \ - } \ - } -#else /* CONFIG_QDIO_DEBUG */ -#define set_slsb(x,y) qdio_set_slsb(x,y) -#endif /* CONFIG_QDIO_DEBUG */ - struct qdio_q { volatile struct slsb slsb; char unused[QDIO_MAX_BUFFERS_PER_Q]; - __u32 * volatile dev_st_chg_ind; + __u32 * dev_st_chg_ind; int is_input_q; int irq; @@ -568,6 +599,7 @@ struct qdio_q { struct tasklet_struct tasklet; #endif /* QDIO_USE_TIMERS_FOR_POLLING */ + enum qdio_irq_states state; /* used to store the error condition during a data transfer */ @@ -624,6 +656,10 @@ struct qdio_irq { unsigned int hydra_gives_outbound_pcis; unsigned int sync_done_on_outb_pcis; + /* QEBSM facility */ + unsigned int is_qebsm; + unsigned long sch_token; + enum qdio_irq_states state; unsigned int no_input_qs; -- cgit v1.2.3 From a8237fc4108060402d904bea5e1062e22e731969 Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Fri, 6 Jan 2006 00:19:21 -0800 Subject: [PATCH] s390: introduce struct subchannel_id This patch introduces a struct subchannel_id containing the subchannel number (formerly referred to as "irq") and switches code formerly relying on the subchannel number over to it. While we're touching inline assemblies anyway, make sure they have correct memory constraints. Signed-off-by: Cornelia Huck Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/s390/cio/blacklist.c | 23 +++++---- drivers/s390/cio/chsc.c | 63 +++++++++++++---------- drivers/s390/cio/cio.c | 84 ++++++++++++++++-------------- drivers/s390/cio/cio.h | 11 ++-- drivers/s390/cio/cmf.c | 8 +-- drivers/s390/cio/css.c | 69 +++++++++++++------------ drivers/s390/cio/css.h | 13 ++--- drivers/s390/cio/device.c | 17 +++++-- drivers/s390/cio/device.h | 1 + drivers/s390/cio/device_fsm.c | 18 +++---- drivers/s390/cio/device_id.c | 6 +-- drivers/s390/cio/device_ops.c | 4 +- drivers/s390/cio/device_pgid.c | 13 ++--- drivers/s390/cio/device_status.c | 8 +-- drivers/s390/cio/ioasm.h | 55 +++++++++++--------- drivers/s390/cio/qdio.c | 107 ++++++++++++++++++++------------------- drivers/s390/cio/qdio.h | 26 +++++----- drivers/s390/cio/schid.h | 25 +++++++++ 18 files changed, 313 insertions(+), 238 deletions(-) create mode 100644 drivers/s390/cio/schid.h (limited to 'drivers') diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index a1c52a682191..a4b03031ff50 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c @@ -35,7 +35,7 @@ */ /* 65536 bits to indicate if a devno is blacklisted or not */ -#define __BL_DEV_WORDS ((__MAX_SUBCHANNELS + (8*sizeof(long) - 1)) / \ +#define __BL_DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \ (8*sizeof(long))) static unsigned long bl_dev[__BL_DEV_WORDS]; typedef enum {add, free} range_action; @@ -50,7 +50,7 @@ blacklist_range (range_action action, unsigned int from, unsigned int to) if (!to) to = from; - if (from > to || to > __MAX_SUBCHANNELS) { + if (from > to || to > __MAX_SUBCHANNEL) { printk (KERN_WARNING "Invalid blacklist range " "0x%04x to 0x%04x, skipping\n", from, to); return; @@ -143,7 +143,7 @@ blacklist_parse_parameters (char *str, range_action action) if (strncmp(str,"all,",4) == 0 || strcmp(str,"all") == 0 || strncmp(str,"all\n",4) == 0 || strncmp(str,"all ",4) == 0) { from = 0; - to = __MAX_SUBCHANNELS; + to = __MAX_SUBCHANNEL; str += 3; } else { int rc; @@ -226,20 +226,21 @@ is_blacklisted (int devno) static inline void s390_redo_validation (void) { - unsigned int irq; + struct subchannel_id schid; CIO_TRACE_EVENT (0, "redoval"); - for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { + init_subchannel_id(&schid); + do { int ret; struct subchannel *sch; - sch = get_subchannel_by_schid(irq); + sch = get_subchannel_by_schid(schid); if (sch) { /* Already known. */ put_device(&sch->dev); continue; } - ret = css_probe_device(irq); + ret = css_probe_device(schid); if (ret == -ENXIO) break; /* We're through. */ if (ret == -ENOMEM) @@ -248,7 +249,7 @@ s390_redo_validation (void) * panic. */ break; - } + } while (schid.sch_no++ < __MAX_SUBCHANNEL); } /* @@ -289,12 +290,12 @@ static int cio_ignore_read (char *page, char **start, off_t off, len = 0; for (devno = off; /* abuse the page variable * as counter, see fs/proc/generic.c */ - devno < __MAX_SUBCHANNELS && len + entry_size < count; devno++) { + devno < __MAX_SUBCHANNEL && len + entry_size < count; devno++) { if (!test_bit(devno, bl_dev)) continue; len += sprintf(page + len, "0.0.%04lx", devno); if (test_bit(devno + 1, bl_dev)) { /* print range */ - while (++devno < __MAX_SUBCHANNELS) + while (++devno < __MAX_SUBCHANNEL) if (!test_bit(devno, bl_dev)) break; len += sprintf(page + len, "-0.0.%04lx", --devno); @@ -302,7 +303,7 @@ static int cio_ignore_read (char *page, char **start, off_t off, len += sprintf(page + len, "\n"); } - if (devno < __MAX_SUBCHANNELS) + if (devno < __MAX_SUBCHANNEL) *eof = 1; *start = (char *) (devno - off); /* number of checked entries */ return len; diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index fa3c23b80e3a..aff5d149b729 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -104,8 +104,8 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page) .code = 0x0004, }; - ssd_area->f_sch = sch->irq; - ssd_area->l_sch = sch->irq; + ssd_area->f_sch = sch->schid.sch_no; + ssd_area->l_sch = sch->schid.sch_no; ccode = chsc(ssd_area); if (ccode > 0) { @@ -147,7 +147,8 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page) */ if (ssd_area->st > 3) { /* uhm, that looks strange... */ CIO_CRW_EVENT(0, "Strange subchannel type %d" - " for sch %04x\n", ssd_area->st, sch->irq); + " for sch %04x\n", ssd_area->st, + sch->schid.sch_no); /* * There may have been a new subchannel type defined in the * time since this code was written; since we don't know which @@ -157,7 +158,7 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page) } else { const char *type[4] = {"I/O", "chsc", "message", "ADM"}; CIO_CRW_EVENT(6, "ssd: sch %04x is %s subchannel\n", - sch->irq, type[ssd_area->st]); + sch->schid.sch_no, type[ssd_area->st]); sch->ssd_info.valid = 1; sch->ssd_info.type = ssd_area->st; @@ -232,7 +233,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) mask = 0x80 >> j; spin_lock(&sch->lock); - stsch(sch->irq, &schib); + stsch(sch->schid, &schib); if (!schib.pmcw.dnv) goto out_unreg; memcpy(&sch->schib, &schib, sizeof(struct schib)); @@ -284,7 +285,7 @@ out_unlock: out_unreg: spin_unlock(&sch->lock); sch->lpm = 0; - if (css_enqueue_subchannel_slow(sch->irq)) { + if (css_enqueue_subchannel_slow(sch->schid)) { css_clear_subchannel_slow_list(); need_rescan = 1; } @@ -337,7 +338,7 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask, * new path information and eventually check for logically * offline chpids. */ - ccode = stsch(sch->irq, &sch->schib); + ccode = stsch(sch->schid, &sch->schib); if (ccode > 0) return 0; @@ -348,7 +349,8 @@ static int s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask) { struct subchannel *sch; - int irq, rc; + int rc; + struct subchannel_id schid; char dbf_txt[15]; sprintf(dbf_txt, "accpr%x", chpid); @@ -370,10 +372,11 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask) return 0; /* no need to do the rest */ rc = 0; - for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { + init_subchannel_id(&schid); + do { int chp_mask, old_lpm; - sch = get_subchannel_by_schid(irq); + sch = get_subchannel_by_schid(schid); if (!sch) { struct schib schib; int ret; @@ -385,7 +388,7 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask) * that beast may be on we'll have to do a stsch * on all devices, grr... */ - if (stsch(irq, &schib)) { + if (stsch(schid, &schib)) { /* We're through */ if (need_rescan) rc = -EAGAIN; @@ -396,7 +399,7 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask) continue; } /* Put it on the slow path. */ - ret = css_enqueue_subchannel_slow(irq); + ret = css_enqueue_subchannel_slow(schid); if (ret) { css_clear_subchannel_slow_list(); need_rescan = 1; @@ -428,7 +431,7 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask) put_device(&sch->dev); if (fla_mask == 0xffff) break; - } + } while (schid.sch_no++ < __MAX_SUBCHANNEL); return rc; } @@ -608,7 +611,8 @@ static int chp_add(int chpid) { struct subchannel *sch; - int irq, ret, rc; + int ret, rc; + struct subchannel_id schid; char dbf_txt[15]; if (!get_chp_status(chpid)) @@ -618,14 +622,15 @@ chp_add(int chpid) CIO_TRACE_EVENT(2, dbf_txt); rc = 0; - for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { + init_subchannel_id(&schid); + do { int i; - sch = get_subchannel_by_schid(irq); + sch = get_subchannel_by_schid(schid); if (!sch) { struct schib schib; - if (stsch(irq, &schib)) { + if (stsch(schid, &schib)) { /* We're through */ if (need_rescan) rc = -EAGAIN; @@ -636,7 +641,7 @@ chp_add(int chpid) continue; } /* Put it on the slow path. */ - ret = css_enqueue_subchannel_slow(irq); + ret = css_enqueue_subchannel_slow(schid); if (ret) { css_clear_subchannel_slow_list(); need_rescan = 1; @@ -648,7 +653,7 @@ chp_add(int chpid) spin_lock(&sch->lock); for (i=0; i<8; i++) if (sch->schib.pmcw.chpid[i] == chpid) { - if (stsch(sch->irq, &sch->schib) != 0) { + if (stsch(sch->schid, &sch->schib) != 0) { /* Endgame. */ spin_unlock(&sch->lock); return rc; @@ -669,7 +674,7 @@ chp_add(int chpid) spin_unlock(&sch->lock); put_device(&sch->dev); - } + } while (schid.sch_no++ < __MAX_SUBCHANNEL); return rc; } @@ -702,7 +707,7 @@ __check_for_io_and_kill(struct subchannel *sch, int index) if (!device_is_online(sch)) /* cio could be doing I/O. */ return 0; - cc = stsch(sch->irq, &sch->schib); + cc = stsch(sch->schid, &sch->schib); if (cc) return 0; if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) { @@ -743,7 +748,7 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) * just varied off path. Then kill it. */ if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) { - if (css_enqueue_subchannel_slow(sch->irq)) { + if (css_enqueue_subchannel_slow(sch->schid)) { css_clear_subchannel_slow_list(); need_rescan = 1; } @@ -789,7 +794,8 @@ static int s390_vary_chpid( __u8 chpid, int on) { char dbf_text[15]; - int status, irq, ret; + int status, ret; + struct subchannel_id schid; struct subchannel *sch; sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); @@ -818,26 +824,27 @@ s390_vary_chpid( __u8 chpid, int on) if (!on) goto out; /* Scan for new devices on varied on path. */ - for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { + init_subchannel_id(&schid); + do { struct schib schib; if (need_rescan) break; - sch = get_subchannel_by_schid(irq); + sch = get_subchannel_by_schid(schid); if (sch) { put_device(&sch->dev); continue; } - if (stsch(irq, &schib)) + if (stsch(schid, &schib)) /* We're through */ break; /* Put it on the slow path. */ - ret = css_enqueue_subchannel_slow(irq); + ret = css_enqueue_subchannel_slow(schid); if (ret) { css_clear_subchannel_slow_list(); need_rescan = 1; } - } + } while (schid.sch_no++ < __MAX_SUBCHANNEL); out: if (need_rescan || css_slow_subchannels_exist()) queue_work(slow_path_wq, &slow_path_work); diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 185bc73c3ecd..396bada65f86 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -135,7 +135,7 @@ cio_tpi(void) return 0; irb = (struct irb *) __LC_IRB; /* Store interrupt response block to lowcore. */ - if (tsch (tpi_info->irq, irb) != 0) + if (tsch (tpi_info->schid, irb) != 0) /* Not status pending or not operational. */ return 1; sch = (struct subchannel *)(unsigned long)tpi_info->intparm; @@ -163,10 +163,10 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) else sch->lpm = 0; - stsch (sch->irq, &sch->schib); + stsch (sch->schid, &sch->schib); CIO_MSG_EVENT(0, "cio_start: 'not oper' status for " - "subchannel %04x!\n", sch->irq); + "subchannel %04x!\n", sch->schid.sch_no); sprintf(dbf_text, "no%s", sch->dev.bus_id); CIO_TRACE_EVENT(0, dbf_text); CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); @@ -204,7 +204,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */ sch->orb.key = key >> 4; /* issue "Start Subchannel" */ sch->orb.cpa = (__u32) __pa (cpa); - ccode = ssch (sch->irq, &sch->orb); + ccode = ssch (sch->schid, &sch->orb); /* process condition code */ sprintf (dbf_txt, "ccode:%d", ccode); @@ -243,7 +243,7 @@ cio_resume (struct subchannel *sch) CIO_TRACE_EVENT (4, "resIO"); CIO_TRACE_EVENT (4, sch->dev.bus_id); - ccode = rsch (sch->irq); + ccode = rsch (sch->schid); sprintf (dbf_txt, "ccode:%d", ccode); CIO_TRACE_EVENT (4, dbf_txt); @@ -283,7 +283,7 @@ cio_halt(struct subchannel *sch) /* * Issue "Halt subchannel" and process condition code */ - ccode = hsch (sch->irq); + ccode = hsch (sch->schid); sprintf (dbf_txt, "ccode:%d", ccode); CIO_TRACE_EVENT (2, dbf_txt); @@ -318,7 +318,7 @@ cio_clear(struct subchannel *sch) /* * Issue "Clear subchannel" and process condition code */ - ccode = csch (sch->irq); + ccode = csch (sch->schid); sprintf (dbf_txt, "ccode:%d", ccode); CIO_TRACE_EVENT (2, dbf_txt); @@ -351,7 +351,7 @@ cio_cancel (struct subchannel *sch) CIO_TRACE_EVENT (2, "cancelIO"); CIO_TRACE_EVENT (2, sch->dev.bus_id); - ccode = xsch (sch->irq); + ccode = xsch (sch->schid); sprintf (dbf_txt, "ccode:%d", ccode); CIO_TRACE_EVENT (2, dbf_txt); @@ -359,7 +359,7 @@ cio_cancel (struct subchannel *sch) switch (ccode) { case 0: /* success */ /* Update information in scsw. */ - stsch (sch->irq, &sch->schib); + stsch (sch->schid, &sch->schib); return 0; case 1: /* status pending */ return -EBUSY; @@ -381,7 +381,7 @@ cio_modify (struct subchannel *sch) ret = 0; for (retry = 0; retry < 5; retry++) { - ccode = msch_err (sch->irq, &sch->schib); + ccode = msch_err (sch->schid, &sch->schib); if (ccode < 0) /* -EIO if msch gets a program check. */ return ccode; switch (ccode) { @@ -414,7 +414,7 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc) CIO_TRACE_EVENT (2, "ensch"); CIO_TRACE_EVENT (2, sch->dev.bus_id); - ccode = stsch (sch->irq, &sch->schib); + ccode = stsch (sch->schid, &sch->schib); if (ccode) return -ENODEV; @@ -432,13 +432,13 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc) */ sch->schib.pmcw.csense = 0; if (ret == 0) { - stsch (sch->irq, &sch->schib); + stsch (sch->schid, &sch->schib); if (sch->schib.pmcw.ena) break; } if (ret == -EBUSY) { struct irb irb; - if (tsch(sch->irq, &irb) != 0) + if (tsch(sch->schid, &irb) != 0) break; } } @@ -461,7 +461,7 @@ cio_disable_subchannel (struct subchannel *sch) CIO_TRACE_EVENT (2, "dissch"); CIO_TRACE_EVENT (2, sch->dev.bus_id); - ccode = stsch (sch->irq, &sch->schib); + ccode = stsch (sch->schid, &sch->schib); if (ccode == 3) /* Not operational. */ return -ENODEV; @@ -485,7 +485,7 @@ cio_disable_subchannel (struct subchannel *sch) */ break; if (ret == 0) { - stsch (sch->irq, &sch->schib); + stsch (sch->schid, &sch->schib); if (!sch->schib.pmcw.ena) break; } @@ -508,12 +508,12 @@ cio_disable_subchannel (struct subchannel *sch) * -ENODEV for subchannels with invalid device number or blacklisted devices */ int -cio_validate_subchannel (struct subchannel *sch, unsigned int irq) +cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) { char dbf_txt[15]; int ccode; - sprintf (dbf_txt, "valsch%x", irq); + sprintf (dbf_txt, "valsch%x", schid.sch_no); CIO_TRACE_EVENT (4, dbf_txt); /* Nuke all fields. */ @@ -522,17 +522,17 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq) spin_lock_init(&sch->lock); /* Set a name for the subchannel */ - snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", irq); + snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", schid.sch_no); /* * The first subchannel that is not-operational (ccode==3) * indicates that there aren't any more devices available. */ - sch->irq = irq; - ccode = stsch (irq, &sch->schib); + ccode = stsch (schid, &sch->schib); if (ccode) return -ENXIO; + sch->schid = schid; /* Copy subchannel type from path management control word. */ sch->st = sch->schib.pmcw.st; @@ -543,7 +543,7 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq) CIO_DEBUG(KERN_INFO, 0, "Subchannel %04X reports " "non-I/O subchannel type %04X\n", - sch->irq, sch->st); + sch->schid.sch_no, sch->st); /* We stop here for non-io subchannels. */ return sch->st; } @@ -573,7 +573,7 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq) CIO_DEBUG(KERN_INFO, 0, "Detected device %04X on subchannel %04X" " - PIM = %02X, PAM = %02X, POM = %02X\n", - sch->schib.pmcw.dev, sch->irq, sch->schib.pmcw.pim, + sch->schib.pmcw.dev, sch->schid.sch_no, sch->schib.pmcw.pim, sch->schib.pmcw.pam, sch->schib.pmcw.pom); /* @@ -632,7 +632,7 @@ do_IRQ (struct pt_regs *regs) if (sch) spin_lock(&sch->lock); /* Store interrupt response block to lowcore. */ - if (tsch (tpi_info->irq, irb) == 0 && sch) { + if (tsch (tpi_info->schid, irb) == 0 && sch) { /* Keep subchannel information word up to date. */ memcpy (&sch->schib.scsw, &irb->scsw, sizeof (irb->scsw)); @@ -693,26 +693,28 @@ wait_cons_dev (void) static int cio_console_irq(void) { - int irq; + struct subchannel_id schid; + init_subchannel_id(&schid); if (console_irq != -1) { /* VM provided us with the irq number of the console. */ - if (stsch(console_irq, &console_subchannel.schib) != 0 || + schid.sch_no = console_irq; + if (stsch(schid, &console_subchannel.schib) != 0 || !console_subchannel.schib.pmcw.dnv) return -1; console_devno = console_subchannel.schib.pmcw.dev; } else if (console_devno != -1) { /* At least the console device number is known. */ - for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { - if (stsch(irq, &console_subchannel.schib) != 0) + do { + if (stsch(schid, &console_subchannel.schib) != 0) break; if (console_subchannel.schib.pmcw.dnv && console_subchannel.schib.pmcw.dev == console_devno) { - console_irq = irq; + console_irq = schid.sch_no; break; } - } + } while (schid.sch_no++ < __MAX_SUBCHANNEL); if (console_irq == -1) return -1; } else { @@ -729,6 +731,7 @@ struct subchannel * cio_probe_console(void) { int irq, ret; + struct subchannel_id schid; if (xchg(&console_subchannel_in_use, 1) != 0) return ERR_PTR(-EBUSY); @@ -738,7 +741,9 @@ cio_probe_console(void) return ERR_PTR(-ENODEV); } memset(&console_subchannel, 0, sizeof(struct subchannel)); - ret = cio_validate_subchannel(&console_subchannel, irq); + init_subchannel_id(&schid); + schid.sch_no = irq; + ret = cio_validate_subchannel(&console_subchannel, schid); if (ret) { console_subchannel_in_use = 0; return ERR_PTR(-ENODEV); @@ -770,11 +775,11 @@ cio_release_console(void) /* Bah... hack to catch console special sausages. */ int -cio_is_console(int irq) +cio_is_console(struct subchannel_id schid) { if (!console_subchannel_in_use) return 0; - return (irq == console_subchannel.irq); + return schid_equal(&schid, &console_subchannel.schid); } struct subchannel * @@ -787,7 +792,7 @@ cio_get_console_subchannel(void) #endif static inline int -__disable_subchannel_easy(unsigned int schid, struct schib *schib) +__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) { int retry, cc; @@ -805,7 +810,7 @@ __disable_subchannel_easy(unsigned int schid, struct schib *schib) } static inline int -__clear_subchannel_easy(unsigned int schid) +__clear_subchannel_easy(struct subchannel_id schid) { int retry; @@ -815,8 +820,8 @@ __clear_subchannel_easy(unsigned int schid) struct tpi_info ti; if (tpi(&ti)) { - tsch(ti.irq, (struct irb *)__LC_IRB); - if (ti.irq == schid) + tsch(ti.schid, (struct irb *)__LC_IRB); + if (schid_equal(&ti.schid, &schid)) return 0; } udelay(100); @@ -830,10 +835,11 @@ extern void do_reipl(unsigned long devno); void clear_all_subchannels(void) { - unsigned int schid; + struct subchannel_id schid; local_irq_disable(); - for (schid=0;schid<=highest_subchannel;schid++) { + init_subchannel_id(&schid); + do { struct schib schib; if (stsch(schid, &schib)) break; /* break out of the loop */ @@ -849,7 +855,7 @@ clear_all_subchannels(void) stsch(schid, &schib); __disable_subchannel_easy(schid, &schib); } - } + } while (schid.sch_no++ < __MAX_SUBCHANNEL); } /* Make sure all subchannels are quiet before we re-ipl an lpar. */ diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index c50a9da420a9..0ca987344e07 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -1,6 +1,8 @@ #ifndef S390_CIO_H #define S390_CIO_H +#include "schid.h" + /* * where we put the ssd info */ @@ -83,7 +85,7 @@ struct orb { /* subchannel data structure used by I/O subroutines */ struct subchannel { - unsigned int irq; /* aka. subchannel number */ + struct subchannel_id schid; spinlock_t lock; /* subchannel lock */ enum { @@ -114,7 +116,7 @@ struct subchannel { #define to_subchannel(n) container_of(n, struct subchannel, dev) -extern int cio_validate_subchannel (struct subchannel *, unsigned int); +extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id); extern int cio_enable_subchannel (struct subchannel *, unsigned int); extern int cio_disable_subchannel (struct subchannel *); extern int cio_cancel (struct subchannel *); @@ -127,14 +129,15 @@ extern int cio_cancel (struct subchannel *); extern int cio_set_options (struct subchannel *, int); extern int cio_get_options (struct subchannel *); extern int cio_modify (struct subchannel *); + /* Use with care. */ #ifdef CONFIG_CCW_CONSOLE extern struct subchannel *cio_probe_console(void); extern void cio_release_console(void); -extern int cio_is_console(int irq); +extern int cio_is_console(struct subchannel_id); extern struct subchannel *cio_get_console_subchannel(void); #else -#define cio_is_console(irq) 0 +#define cio_is_console(schid) 0 #define cio_get_console_subchannel() NULL #endif diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index b978f7fe8327..0b03714e696a 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -1,5 +1,5 @@ /* - * linux/drivers/s390/cio/cmf.c ($Revision: 1.16 $) + * linux/drivers/s390/cio/cmf.c ($Revision: 1.19 $) * * Linux on zSeries Channel Measurement Facility support * @@ -178,7 +178,7 @@ set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address) /* msch can silently fail, so do it again if necessary */ for (retry = 0; retry < 3; retry++) { /* prepare schib */ - stsch(sch->irq, schib); + stsch(sch->schid, schib); schib->pmcw.mme = mme; schib->pmcw.mbfc = mbfc; /* address can be either a block address or a block index */ @@ -188,7 +188,7 @@ set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address) schib->pmcw.mbi = address; /* try to submit it */ - switch(ret = msch_err(sch->irq, schib)) { + switch(ret = msch_err(sch->schid, schib)) { case 0: break; case 1: @@ -202,7 +202,7 @@ set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address) ret = -EINVAL; break; } - stsch(sch->irq, schib); /* restore the schib */ + stsch(sch->schid, schib); /* restore the schib */ if (ret) break; diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 7e4d57b4266f..5137dafd1e8d 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -33,7 +33,7 @@ struct device css_bus_device = { }; static struct subchannel * -css_alloc_subchannel(int irq) +css_alloc_subchannel(struct subchannel_id schid) { struct subchannel *sch; int ret; @@ -41,13 +41,11 @@ css_alloc_subchannel(int irq) sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); if (sch == NULL) return ERR_PTR(-ENOMEM); - ret = cio_validate_subchannel (sch, irq); + ret = cio_validate_subchannel (sch, schid); if (ret < 0) { kfree(sch); return ERR_PTR(ret); } - if (irq > highest_subchannel) - highest_subchannel = irq; if (sch->st != SUBCHANNEL_TYPE_IO) { /* For now we ignore all non-io subchannels. */ @@ -87,7 +85,7 @@ css_subchannel_release(struct device *dev) struct subchannel *sch; sch = to_subchannel(dev); - if (!cio_is_console(sch->irq)) + if (!cio_is_console(sch->schid)) kfree(sch); } @@ -114,12 +112,12 @@ css_register_subchannel(struct subchannel *sch) } int -css_probe_device(int irq) +css_probe_device(struct subchannel_id schid) { int ret; struct subchannel *sch; - sch = css_alloc_subchannel(irq); + sch = css_alloc_subchannel(schid); if (IS_ERR(sch)) return PTR_ERR(sch); ret = css_register_subchannel(sch); @@ -132,26 +130,26 @@ static int check_subchannel(struct device * dev, void * data) { struct subchannel *sch; - int irq = (unsigned long)data; + struct subchannel_id *schid = data; sch = to_subchannel(dev); - return (sch->irq == irq); + return schid_equal(&sch->schid, schid); } struct subchannel * -get_subchannel_by_schid(int irq) +get_subchannel_by_schid(struct subchannel_id schid) { struct device *dev; dev = bus_find_device(&css_bus_type, NULL, - (void *)(unsigned long)irq, check_subchannel); + (void *)&schid, check_subchannel); return dev ? to_subchannel(dev) : NULL; } static inline int -css_get_subchannel_status(struct subchannel *sch, int schid) +css_get_subchannel_status(struct subchannel *sch, struct subchannel_id schid) { struct schib schib; int cc; @@ -170,13 +168,13 @@ css_get_subchannel_status(struct subchannel *sch, int schid) } static int -css_evaluate_subchannel(int irq, int slow) +css_evaluate_subchannel(struct subchannel_id schid, int slow) { int event, ret, disc; struct subchannel *sch; unsigned long flags; - sch = get_subchannel_by_schid(irq); + sch = get_subchannel_by_schid(schid); disc = sch ? device_is_disconnected(sch) : 0; if (disc && slow) { if (sch) @@ -194,9 +192,10 @@ css_evaluate_subchannel(int irq, int slow) put_device(&sch->dev); return -EAGAIN; /* Will be done on the slow path. */ } - event = css_get_subchannel_status(sch, irq); + event = css_get_subchannel_status(sch, schid); CIO_MSG_EVENT(4, "Evaluating schid %04x, event %d, %s, %s path.\n", - irq, event, sch?(disc?"disconnected":"normal"):"unknown", + schid.sch_no, event, + sch?(disc?"disconnected":"normal"):"unknown", slow?"slow":"fast"); switch (event) { case CIO_NO_PATH: @@ -253,7 +252,7 @@ css_evaluate_subchannel(int irq, int slow) sch->schib.pmcw.intparm = 0; cio_modify(sch); put_device(&sch->dev); - ret = css_probe_device(irq); + ret = css_probe_device(schid); } else { /* * We can't immediately deregister the disconnected @@ -272,7 +271,7 @@ css_evaluate_subchannel(int irq, int slow) device_trigger_reprobe(sch); spin_unlock_irqrestore(&sch->lock, flags); } - ret = sch ? 0 : css_probe_device(irq); + ret = sch ? 0 : css_probe_device(schid); break; default: BUG(); @@ -284,10 +283,12 @@ css_evaluate_subchannel(int irq, int slow) static void css_rescan_devices(void) { - int irq, ret; + int ret; + struct subchannel_id schid; - for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { - ret = css_evaluate_subchannel(irq, 1); + init_subchannel_id(&schid); + do { + ret = css_evaluate_subchannel(schid, 1); /* No more memory. It doesn't make sense to continue. No * panic because this can happen in midflight and just * because we can't use a new device is no reason to crash @@ -297,12 +298,12 @@ css_rescan_devices(void) /* -ENXIO indicates that there are no more subchannels. */ if (ret == -ENXIO) break; - } + } while (schid.sch_no++ < __MAX_SUBCHANNEL); } struct slow_subchannel { struct list_head slow_list; - unsigned long schid; + struct subchannel_id schid; }; static LIST_HEAD(slow_subchannels_head); @@ -357,20 +358,24 @@ int css_process_crw(int irq) { int ret; + struct subchannel_id mchk_schid; CIO_CRW_EVENT(2, "source is subchannel %04X\n", irq); if (need_rescan) /* We need to iterate all subchannels anyway. */ return -EAGAIN; + + init_subchannel_id(&mchk_schid); + mchk_schid.sch_no = irq; /* * Since we are always presented with IPI in the CRW, we have to * use stsch() to find out if the subchannel in question has come * or gone. */ - ret = css_evaluate_subchannel(irq, 0); + ret = css_evaluate_subchannel(mchk_schid, 0); if (ret == -EAGAIN) { - if (css_enqueue_subchannel_slow(irq)) { + if (css_enqueue_subchannel_slow(mchk_schid)) { css_clear_subchannel_slow_list(); need_rescan = 1; } @@ -404,7 +409,8 @@ css_generate_pgid(void) static int __init init_channel_subsystem (void) { - int ret, irq; + int ret; + struct subchannel_id schid; if (chsc_determine_css_characteristics() == 0) css_characteristics_avail = 1; @@ -420,13 +426,14 @@ init_channel_subsystem (void) ctl_set_bit(6, 28); - for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { + init_subchannel_id(&schid); + do { struct subchannel *sch; - if (cio_is_console(irq)) + if (cio_is_console(schid)) sch = cio_get_console_subchannel(); else { - sch = css_alloc_subchannel(irq); + sch = css_alloc_subchannel(schid); if (IS_ERR(sch)) ret = PTR_ERR(sch); else @@ -448,7 +455,7 @@ init_channel_subsystem (void) * console subchannel. */ css_register_subchannel(sch); - } + } while (schid.sch_no++ < __MAX_SUBCHANNEL); return 0; out_bus: @@ -482,7 +489,7 @@ struct bus_type css_bus_type = { subsys_initcall(init_channel_subsystem); int -css_enqueue_subchannel_slow(unsigned long schid) +css_enqueue_subchannel_slow(struct subchannel_id schid) { struct slow_subchannel *new_slow_sch; unsigned long flags; diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index 2004a6c49388..f26e16daecb5 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -6,6 +6,8 @@ #include +#include "schid.h" + /* * path grouping stuff */ @@ -68,7 +70,7 @@ struct ccw_device_private { atomic_t onoff; unsigned long registered; __u16 devno; /* device number */ - __u16 irq; /* subchannel number */ + __u16 sch_no; /* subchannel number */ __u8 imask; /* lpm mask for SNID/SID/SPGID */ int iretry; /* retry counter SNID/SID/SPGID */ struct { @@ -121,12 +123,11 @@ struct css_driver { extern struct bus_type css_bus_type; extern struct css_driver io_subchannel_driver; -int css_probe_device(int irq); -extern struct subchannel * get_subchannel_by_schid(int irq); -extern unsigned int highest_subchannel; +extern int css_probe_device(struct subchannel_id); +extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); extern int css_init_done; -#define __MAX_SUBCHANNELS 65536 +#define __MAX_SUBCHANNEL 65535 extern struct bus_type css_bus_type; extern struct device css_bus_device; @@ -144,7 +145,7 @@ void device_set_waiting(struct subchannel *); void device_kill_pending_timer(struct subchannel *); /* Helper functions to build lists for the slow path. */ -int css_enqueue_subchannel_slow(unsigned long schid); +extern int css_enqueue_subchannel_slow(struct subchannel_id schid); void css_walk_subchannel_slow_list(void (*fn)(unsigned long)); void css_clear_subchannel_slow_list(void); int css_slow_subchannels_exist(void); diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 0590cffe62aa..9ac07aeffbe6 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -622,7 +622,7 @@ ccw_device_do_unreg_rereg(void *data) other_sch = to_subchannel(other_cdev->dev.parent); if (get_device(&other_sch->dev)) { - stsch(other_sch->irq, &other_sch->schib); + stsch(other_sch->schid, &other_sch->schib); if (other_sch->schib.pmcw.dnv) { other_sch->schib.pmcw.intparm = 0; cio_modify(other_sch); @@ -772,7 +772,7 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) /* Init private data. */ priv = cdev->private; priv->devno = sch->schib.pmcw.dev; - priv->irq = sch->irq; + priv->sch_no = sch->schid.sch_no; priv->state = DEV_STATE_NOT_OPER; INIT_LIST_HEAD(&priv->cmb_list); init_waitqueue_head(&priv->wait_q); @@ -951,7 +951,7 @@ io_subchannel_shutdown(struct device *dev) sch = to_subchannel(dev); cdev = dev->driver_data; - if (cio_is_console(sch->irq)) + if (cio_is_console(sch->schid)) return; if (!sch->schib.pmcw.ena) /* Nothing to do. */ @@ -1146,6 +1146,16 @@ ccw_driver_unregister (struct ccw_driver *cdriver) driver_unregister(&cdriver->driver); } +/* Helper func for qdio. */ +struct subchannel_id +ccw_device_get_subchannel_id(struct ccw_device *cdev) +{ + struct subchannel *sch; + + sch = to_subchannel(cdev->dev.parent); + return sch->schid; +} + MODULE_LICENSE("GPL"); EXPORT_SYMBOL(ccw_device_set_online); EXPORT_SYMBOL(ccw_device_set_offline); @@ -1155,3 +1165,4 @@ EXPORT_SYMBOL(get_ccwdev_by_busid); EXPORT_SYMBOL(ccw_bus_type); EXPORT_SYMBOL(ccw_device_work); EXPORT_SYMBOL(ccw_device_notify_work); +EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id); diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index a3aa056d7245..11587ebb7289 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h @@ -110,6 +110,7 @@ int ccw_device_stlck(struct ccw_device *); /* qdio needs this. */ void ccw_device_set_timeout(struct ccw_device *, int); +extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); void retry_set_schib(struct ccw_device *cdev); #endif diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index c1c89f4fd4e3..9efeae75ad28 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -133,7 +133,7 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) int ret; sch = to_subchannel(cdev->dev.parent); - ret = stsch(sch->irq, &sch->schib); + ret = stsch(sch->schid, &sch->schib); if (ret || !sch->schib.pmcw.dnv) return -ENODEV; if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0) @@ -231,7 +231,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) * through ssch() and the path information is up to date. */ old_lpm = sch->lpm; - stsch(sch->irq, &sch->schib); + stsch(sch->schid, &sch->schib); sch->lpm = sch->schib.pmcw.pim & sch->schib.pmcw.pam & sch->schib.pmcw.pom & @@ -258,7 +258,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) case DEV_STATE_NOT_OPER: CIO_DEBUG(KERN_WARNING, 2, "SenseID : unknown device %04x on subchannel %04x\n", - cdev->private->devno, sch->irq); + cdev->private->devno, sch->schid.sch_no); break; case DEV_STATE_OFFLINE: if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) { @@ -291,7 +291,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) case DEV_STATE_BOXED: CIO_DEBUG(KERN_WARNING, 2, "SenseID : boxed device %04x on subchannel %04x\n", - cdev->private->devno, sch->irq); + cdev->private->devno, sch->schid.sch_no); break; } cdev->private->state = state; @@ -359,7 +359,7 @@ ccw_device_done(struct ccw_device *cdev, int state) if (state == DEV_STATE_BOXED) CIO_DEBUG(KERN_WARNING, 2, "Boxed device %04x on subchannel %04x\n", - cdev->private->devno, sch->irq); + cdev->private->devno, sch->schid.sch_no); if (cdev->private->flags.donotify) { cdev->private->flags.donotify = 0; @@ -592,7 +592,7 @@ ccw_device_offline(struct ccw_device *cdev) struct subchannel *sch; sch = to_subchannel(cdev->dev.parent); - if (stsch(sch->irq, &sch->schib) || !sch->schib.pmcw.dnv) + if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv) return -ENODEV; if (cdev->private->state != DEV_STATE_ONLINE) { if (sch->schib.scsw.actl != 0) @@ -711,7 +711,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) * Since we might not just be coming from an interrupt from the * subchannel we have to update the schib. */ - stsch(sch->irq, &sch->schib); + stsch(sch->schid, &sch->schib); if (sch->schib.scsw.actl != 0 || (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { @@ -923,7 +923,7 @@ ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event) /* Iff device is idle, reset timeout. */ sch = to_subchannel(cdev->dev.parent); - if (!stsch(sch->irq, &sch->schib)) + if (!stsch(sch->schid, &sch->schib)) if (sch->schib.scsw.actl == 0) ccw_device_set_timeout(cdev, 0); /* Call the handler. */ @@ -1035,7 +1035,7 @@ device_trigger_reprobe(struct subchannel *sch) return; /* Update some values. */ - if (stsch(sch->irq, &sch->schib)) + if (stsch(sch->schid, &sch->schib)) return; /* diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c index 0e68fb511dc9..207881ec7aaf 100644 --- a/drivers/s390/cio/device_id.c +++ b/drivers/s390/cio/device_id.c @@ -258,7 +258,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev) */ CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel %04x " "reports cmd reject\n", - cdev->private->devno, sch->irq); + cdev->private->devno, sch->schid.sch_no); return -EOPNOTSUPP; } if (irb->esw.esw0.erw.cons) { @@ -280,13 +280,13 @@ ccw_device_check_sense_id(struct ccw_device *cdev) CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x on" " subchannel %04x is 'not operational'\n", sch->orb.lpm, cdev->private->devno, - sch->irq); + sch->schid.sch_no); return -EACCES; } /* Hmm, whatever happened, try again. */ CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on " "subchannel %04x returns status %02X%02X\n", - cdev->private->devno, sch->irq, + cdev->private->devno, sch->schid.sch_no, irb->scsw.dstat, irb->scsw.cstat); return -EAGAIN; } diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 85a3026e6900..143b6c25a4e6 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -1,7 +1,7 @@ /* * drivers/s390/cio/device_ops.c * - * $Revision: 1.57 $ + * $Revision: 1.58 $ * * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, * IBM Corporation @@ -570,7 +570,7 @@ ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) int _ccw_device_get_subchannel_number(struct ccw_device *cdev) { - return cdev->private->irq; + return cdev->private->sch_no; } int diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 757b2706d5a9..f08e84cc3563 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c @@ -59,7 +59,7 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev) CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel " "%04x, lpm %02X, became 'not " "operational'\n", - cdev->private->devno, sch->irq, + cdev->private->devno, sch->schid.sch_no, cdev->private->imask); } @@ -121,13 +121,14 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev) if (irb->scsw.cc == 3) { CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel " "%04x, lpm %02X, became 'not operational'\n", - cdev->private->devno, sch->irq, sch->orb.lpm); + cdev->private->devno, sch->schid.sch_no, + sch->orb.lpm); return -EACCES; } if (cdev->private->pgid.inf.ps.state2 == SNID_STATE2_RESVD_ELSE) { CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel %04x " "is reserved by someone else\n", - cdev->private->devno, sch->irq); + cdev->private->devno, sch->schid.sch_no); return -EUSERS; } return 0; @@ -237,7 +238,7 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func) sch->vpm &= ~cdev->private->imask; CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " "%04x, lpm %02X, became 'not operational'\n", - cdev->private->devno, sch->irq, cdev->private->imask); + cdev->private->devno, sch->schid.sch_no, cdev->private->imask); return ret; } @@ -271,7 +272,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev) if (irb->scsw.cc == 3) { CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " "%04x, lpm %02X, became 'not operational'\n", - cdev->private->devno, sch->irq, + cdev->private->devno, sch->schid.sch_no, cdev->private->imask); return -EACCES; } @@ -373,7 +374,7 @@ ccw_device_verify_start(struct ccw_device *cdev) * Update sch->lpm with current values to catch paths becoming * available again. */ - if (stsch(sch->irq, &sch->schib)) { + if (stsch(sch->schid, &sch->schib)) { ccw_device_verify_done(cdev, -ENODEV); return; } diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index 12a24d4331a2..929f8fb505f2 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c @@ -38,13 +38,13 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) "received" " ... device %04X on subchannel %04X, dev_stat " ": %02X sch_stat : %02X\n", - cdev->private->devno, cdev->private->irq, + cdev->private->devno, cdev->private->sch_no, irb->scsw.dstat, irb->scsw.cstat); if (irb->scsw.cc != 3) { char dbf_text[15]; - sprintf(dbf_text, "chk%x", cdev->private->irq); + sprintf(dbf_text, "chk%x", cdev->private->sch_no); CIO_TRACE_EVENT(0, dbf_text); CIO_HEX_EVENT(0, irb, sizeof (struct irb)); } @@ -59,10 +59,10 @@ ccw_device_path_notoper(struct ccw_device *cdev) struct subchannel *sch; sch = to_subchannel(cdev->dev.parent); - stsch (sch->irq, &sch->schib); + stsch (sch->schid, &sch->schib); CIO_MSG_EVENT(0, "%s(%04x) - path(s) %02x are " - "not operational \n", __FUNCTION__, sch->irq, + "not operational \n", __FUNCTION__, sch->schid.sch_no, sch->schib.pmcw.pnom); sch->lpm &= ~sch->schib.pmcw.pnom; diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h index 45480a2bc4c0..66c882e52f32 100644 --- a/drivers/s390/cio/ioasm.h +++ b/drivers/s390/cio/ioasm.h @@ -1,12 +1,13 @@ #ifndef S390_CIO_IOASM_H #define S390_CIO_IOASM_H +#include "schid.h" + /* * TPI info structure */ struct tpi_info { - __u32 reserved1 : 16; /* reserved 0x00000001 */ - __u32 irq : 16; /* aka. subchannel number */ + struct subchannel_id schid; __u32 intparm; /* interruption parameter */ __u32 adapter_IO : 1; __u32 reserved2 : 1; @@ -21,7 +22,8 @@ struct tpi_info { * Some S390 specific IO instructions as inline */ -static inline int stsch(int irq, volatile struct schib *addr) +static inline int stsch(struct subchannel_id schid, + volatile struct schib *addr) { int ccode; @@ -31,12 +33,13 @@ static inline int stsch(int irq, volatile struct schib *addr) " ipm %0\n" " srl %0,28" : "=d" (ccode) - : "d" (irq | 0x10000), "a" (addr) + : "d" (schid), "a" (addr), "m" (*addr) : "cc", "1" ); return ccode; } -static inline int msch(int irq, volatile struct schib *addr) +static inline int msch(struct subchannel_id schid, + volatile struct schib *addr) { int ccode; @@ -46,12 +49,13 @@ static inline int msch(int irq, volatile struct schib *addr) " ipm %0\n" " srl %0,28" : "=d" (ccode) - : "d" (irq | 0x10000L), "a" (addr) + : "d" (schid), "a" (addr), "m" (*addr) : "cc", "1" ); return ccode; } -static inline int msch_err(int irq, volatile struct schib *addr) +static inline int msch_err(struct subchannel_id schid, + volatile struct schib *addr) { int ccode; @@ -74,12 +78,13 @@ static inline int msch_err(int irq, volatile struct schib *addr) ".previous" #endif : "=&d" (ccode) - : "d" (irq | 0x10000L), "a" (addr), "K" (-EIO) + : "d" (schid), "a" (addr), "K" (-EIO), "m" (*addr) : "cc", "1" ); return ccode; } -static inline int tsch(int irq, volatile struct irb *addr) +static inline int tsch(struct subchannel_id schid, + volatile struct irb *addr) { int ccode; @@ -89,7 +94,7 @@ static inline int tsch(int irq, volatile struct irb *addr) " ipm %0\n" " srl %0,28" : "=d" (ccode) - : "d" (irq | 0x10000L), "a" (addr) + : "d" (schid), "a" (addr), "m" (*addr) : "cc", "1" ); return ccode; } @@ -103,12 +108,13 @@ static inline int tpi( volatile struct tpi_info *addr) " ipm %0\n" " srl %0,28" : "=d" (ccode) - : "a" (addr) + : "a" (addr), "m" (*addr) : "cc", "1" ); return ccode; } -static inline int ssch(int irq, volatile struct orb *addr) +static inline int ssch(struct subchannel_id schid, + volatile struct orb *addr) { int ccode; @@ -118,12 +124,12 @@ static inline int ssch(int irq, volatile struct orb *addr) " ipm %0\n" " srl %0,28" : "=d" (ccode) - : "d" (irq | 0x10000L), "a" (addr) + : "d" (schid), "a" (addr), "m" (*addr) : "cc", "1" ); return ccode; } -static inline int rsch(int irq) +static inline int rsch(struct subchannel_id schid) { int ccode; @@ -133,12 +139,12 @@ static inline int rsch(int irq) " ipm %0\n" " srl %0,28" : "=d" (ccode) - : "d" (irq | 0x10000L) + : "d" (schid) : "cc", "1" ); return ccode; } -static inline int csch(int irq) +static inline int csch(struct subchannel_id schid) { int ccode; @@ -148,12 +154,12 @@ static inline int csch(int irq) " ipm %0\n" " srl %0,28" : "=d" (ccode) - : "d" (irq | 0x10000L) + : "d" (schid) : "cc", "1" ); return ccode; } -static inline int hsch(int irq) +static inline int hsch(struct subchannel_id schid) { int ccode; @@ -163,12 +169,12 @@ static inline int hsch(int irq) " ipm %0\n" " srl %0,28" : "=d" (ccode) - : "d" (irq | 0x10000L) + : "d" (schid) : "cc", "1" ); return ccode; } -static inline int xsch(int irq) +static inline int xsch(struct subchannel_id schid) { int ccode; @@ -178,21 +184,22 @@ static inline int xsch(int irq) " ipm %0\n" " srl %0,28" : "=d" (ccode) - : "d" (irq | 0x10000L) + : "d" (schid) : "cc", "1" ); return ccode; } static inline int chsc(void *chsc_area) { + typedef struct { char _[4096]; } addr_type; int cc; __asm__ __volatile__ ( - ".insn rre,0xb25f0000,%1,0 \n\t" + ".insn rre,0xb25f0000,%2,0 \n\t" "ipm %0 \n\t" "srl %0,28 \n\t" - : "=d" (cc) - : "d" (chsc_area) + : "=d" (cc), "=m" (*(addr_type *) chsc_area) + : "d" (chsc_area), "m" (*(addr_type *) chsc_area) : "cc" ); return cc; diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index e8bdfcd1d02a..5c7001b5c7a1 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c @@ -270,7 +270,7 @@ qdio_siga_sync(struct qdio_q *q, unsigned int gpr2, perf_stats.siga_syncs++; #endif /* QDIO_PERFORMANCE_STATS */ - cc = do_siga_sync(0x10000|q->irq, gpr2, gpr3); + cc = do_siga_sync(q->schid, gpr2, gpr3); if (cc) QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); @@ -290,12 +290,16 @@ __do_siga_output(struct qdio_q *q, unsigned int *busy_bit) { struct qdio_irq *irq; unsigned int fc = 0; + unsigned long schid; irq = (struct qdio_irq *) q->irq_ptr; if (!irq->is_qebsm) - return do_siga_output(0x10000|q->irq, q->mask, busy_bit, fc); - fc |= 0x80; - return do_siga_output(irq->sch_token, q->mask, busy_bit, fc); + schid = *((u32 *)&q->schid); + else { + schid = irq->sch_token; + fc |= 0x80; + } + return do_siga_output(schid, q->mask, busy_bit, fc); } /* @@ -349,7 +353,7 @@ qdio_siga_input(struct qdio_q *q) perf_stats.siga_ins++; #endif /* QDIO_PERFORMANCE_STATS */ - cc = do_siga_input(0x10000|q->irq, q->mask); + cc = do_siga_input(q->schid, q->mask); if (cc) QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); @@ -855,7 +859,7 @@ qdio_kick_outbound_q(struct qdio_q *q) /* went smooth this time, reset timestamp */ #ifdef CONFIG_QDIO_DEBUG QDIO_DBF_TEXT3(0,trace,"cc2reslv"); - sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no, + sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no, atomic_read(&q->busy_siga_counter)); QDIO_DBF_TEXT3(0,trace,dbf_text); #endif /* CONFIG_QDIO_DEBUG */ @@ -878,7 +882,7 @@ qdio_kick_outbound_q(struct qdio_q *q) } QDIO_DBF_TEXT2(0,trace,"cc2REPRT"); #ifdef CONFIG_QDIO_DEBUG - sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no, + sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no, atomic_read(&q->busy_siga_counter)); QDIO_DBF_TEXT3(0,trace,dbf_text); #endif /* CONFIG_QDIO_DEBUG */ @@ -1733,7 +1737,7 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev, void *ptr; int available; - sprintf(dbf_text,"qfqs%4x",cdev->private->irq); + sprintf(dbf_text,"qfqs%4x",cdev->private->sch_no); QDIO_DBF_TEXT0(0,setup,dbf_text); for (i=0;iinput_qs[i]; @@ -1753,7 +1757,7 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev, q->queue_type=q_format; q->int_parm=int_parm; - q->irq=irq_ptr->irq; + q->schid = irq_ptr->schid; q->irq_ptr = irq_ptr; q->cdev = cdev; q->mask=1<<(31-i); @@ -1826,7 +1830,7 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev, q->queue_type=q_format; q->int_parm=int_parm; q->is_input_q=0; - q->irq=irq_ptr->irq; + q->schid = irq_ptr->schid; q->cdev = cdev; q->irq_ptr = irq_ptr; q->mask=1<<(31-i); @@ -1933,7 +1937,7 @@ qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state) char dbf_text[15]; QDIO_DBF_TEXT5(0,trace,"newstate"); - sprintf(dbf_text,"%4x%4x",irq_ptr->irq,state); + sprintf(dbf_text,"%4x%4x",irq_ptr->schid.sch_no,state); QDIO_DBF_TEXT5(0,trace,dbf_text); #endif /* CONFIG_QDIO_DEBUG */ @@ -1946,12 +1950,12 @@ qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state) } static inline void -qdio_irq_check_sense(int irq, struct irb *irb) +qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) { char dbf_text[15]; if (irb->esw.esw0.erw.cons) { - sprintf(dbf_text,"sens%4x",irq); + sprintf(dbf_text,"sens%4x",schid.sch_no); QDIO_DBF_TEXT2(1,trace,dbf_text); QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN); @@ -2063,20 +2067,20 @@ qdio_timeout_handler(struct ccw_device *cdev) switch (irq_ptr->state) { case QDIO_IRQ_STATE_INACTIVE: QDIO_PRINT_ERR("establish queues on irq %04x: timed out\n", - irq_ptr->irq); + irq_ptr->schid.sch_no); QDIO_DBF_TEXT2(1,setup,"eq:timeo"); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); break; case QDIO_IRQ_STATE_CLEANUP: QDIO_PRINT_INFO("Did not get interrupt on cleanup, irq=0x%x.\n", - irq_ptr->irq); + irq_ptr->schid.sch_no); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); break; case QDIO_IRQ_STATE_ESTABLISHED: case QDIO_IRQ_STATE_ACTIVE: /* I/O has been terminated by common I/O layer. */ QDIO_PRINT_INFO("Queues on irq %04x killed by cio.\n", - irq_ptr->irq); + irq_ptr->schid.sch_no); QDIO_DBF_TEXT2(1, trace, "cio:term"); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); if (get_device(&cdev->dev)) { @@ -2139,7 +2143,7 @@ qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) } } - qdio_irq_check_sense(irq_ptr->irq, irb); + qdio_irq_check_sense(irq_ptr->schid, irb); #ifdef CONFIG_QDIO_DEBUG sprintf(dbf_text, "state:%d", irq_ptr->state); @@ -2195,7 +2199,7 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags, return -ENODEV; #ifdef CONFIG_QDIO_DEBUG - *((int*)(&dbf_text[4])) = irq_ptr->irq; + *((int*)(&dbf_text[4])) = irq_ptr->schid.sch_no; QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN); *((int*)(&dbf_text[0]))=flags; *((int*)(&dbf_text[4]))=queue_number; @@ -2207,13 +2211,13 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags, if (!q) return -EINVAL; if (!(irq_ptr->is_qebsm)) - cc = do_siga_sync(0x10000|q->irq, 0, q->mask); + cc = do_siga_sync(q->schid, 0, q->mask); } else if (flags&QDIO_FLAG_SYNC_OUTPUT) { q=irq_ptr->output_qs[queue_number]; if (!q) return -EINVAL; if (!(irq_ptr->is_qebsm)) - cc = do_siga_sync(0x10000|q->irq, q->mask, 0); + cc = do_siga_sync(q->schid, q->mask, 0); } else return -EINVAL; @@ -2298,7 +2302,7 @@ qdio_get_ssqd_information(struct qdio_irq *irq_ptr) ssqd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!ssqd_area) { QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \ - "SIGAs for sch x%x.\n", irq_ptr->irq); + "SIGAs for sch x%x.\n", irq_ptr->schid.sch_no); irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY || CHSC_FLAG_SIGA_OUTPUT_NECESSARY || CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ @@ -2312,14 +2316,14 @@ qdio_get_ssqd_information(struct qdio_irq *irq_ptr) .length = 0x0010, .code = 0x0024, }; - ssqd_area->first_sch = irq_ptr->irq; - ssqd_area->last_sch = irq_ptr->irq; + ssqd_area->first_sch = irq_ptr->schid.sch_no; + ssqd_area->last_sch = irq_ptr->schid.sch_no; result = chsc(ssqd_area); if (result) { QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \ "SIGAs for sch x%x.\n", - result, irq_ptr->irq); + result, irq_ptr->schid.sch_no); qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY || CHSC_FLAG_SIGA_OUTPUT_NECESSARY || CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ @@ -2330,7 +2334,7 @@ qdio_get_ssqd_information(struct qdio_irq *irq_ptr) if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) { QDIO_PRINT_WARN("response upon checking SIGA needs " \ "is 0x%x. Using all SIGAs for sch x%x.\n", - ssqd_area->response.code, irq_ptr->irq); + ssqd_area->response.code, irq_ptr->schid.sch_no); qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY || CHSC_FLAG_SIGA_OUTPUT_NECESSARY || CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ @@ -2339,9 +2343,9 @@ qdio_get_ssqd_information(struct qdio_irq *irq_ptr) } if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) || !(ssqd_area->flags & CHSC_FLAG_VALIDITY) || - (ssqd_area->sch != irq_ptr->irq)) { + (ssqd_area->sch != irq_ptr->schid.sch_no)) { QDIO_PRINT_WARN("huh? problems checking out sch x%x... " \ - "using all SIGAs.\n",irq_ptr->irq); + "using all SIGAs.\n",irq_ptr->schid.sch_no); qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY | CHSC_FLAG_SIGA_OUTPUT_NECESSARY | CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */ @@ -2427,7 +2431,7 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero) /* set to 0x10000000 to enable * time delay disablement facility */ u32 reserved5; - u32 subsystem_id; + struct subchannel_id schid; u32 reserved6[1004]; struct chsc_header response; u32 reserved7; @@ -2449,7 +2453,7 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero) scssc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!scssc_area) { QDIO_PRINT_WARN("No memory for setting indicators on " \ - "subchannel x%x.\n", irq_ptr->irq); + "subchannel x%x.\n", irq_ptr->schid.sch_no); return -ENOMEM; } scssc_area->request = (struct chsc_header) { @@ -2463,7 +2467,7 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero) scssc_area->ks = QDIO_STORAGE_KEY; scssc_area->kc = QDIO_STORAGE_KEY; scssc_area->isc = TIQDIO_THININT_ISC; - scssc_area->subsystem_id = (1<<16) + irq_ptr->irq; + scssc_area->schid = irq_ptr->schid; /* enables the time delay disablement facility. Don't care * whether it is really there (i.e. we haven't checked for * it) */ @@ -2473,12 +2477,10 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero) QDIO_PRINT_WARN("Time delay disablement facility " \ "not available\n"); - - result = chsc(scssc_area); if (result) { QDIO_PRINT_WARN("could not set indicators on irq x%x, " \ - "cc=%i.\n",irq_ptr->irq,result); + "cc=%i.\n",irq_ptr->schid.sch_no,result); result = -EIO; goto out; } @@ -2534,7 +2536,7 @@ tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target) scsscf_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!scsscf_area) { QDIO_PRINT_WARN("No memory for setting delay target on " \ - "subchannel x%x.\n", irq_ptr->irq); + "subchannel x%x.\n", irq_ptr->schid.sch_no); return -ENOMEM; } scsscf_area->request = (struct chsc_header) { @@ -2547,7 +2549,8 @@ tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target) result=chsc(scsscf_area); if (result) { QDIO_PRINT_WARN("could not set delay target on irq x%x, " \ - "cc=%i. Continuing.\n",irq_ptr->irq,result); + "cc=%i. Continuing.\n",irq_ptr->schid.sch_no, + result); result = -EIO; goto out; } @@ -2581,7 +2584,7 @@ qdio_cleanup(struct ccw_device *cdev, int how) if (!irq_ptr) return -ENODEV; - sprintf(dbf_text,"qcln%4x",irq_ptr->irq); + sprintf(dbf_text,"qcln%4x",irq_ptr->schid.sch_no); QDIO_DBF_TEXT1(0,trace,dbf_text); QDIO_DBF_TEXT0(0,setup,dbf_text); @@ -2608,7 +2611,7 @@ qdio_shutdown(struct ccw_device *cdev, int how) down(&irq_ptr->setting_up_sema); - sprintf(dbf_text,"qsqs%4x",irq_ptr->irq); + sprintf(dbf_text,"qsqs%4x",irq_ptr->schid.sch_no); QDIO_DBF_TEXT1(0,trace,dbf_text); QDIO_DBF_TEXT0(0,setup,dbf_text); @@ -2714,7 +2717,7 @@ qdio_free(struct ccw_device *cdev) down(&irq_ptr->setting_up_sema); - sprintf(dbf_text,"qfqs%4x",irq_ptr->irq); + sprintf(dbf_text,"qfqs%4x",irq_ptr->schid.sch_no); QDIO_DBF_TEXT1(0,trace,dbf_text); QDIO_DBF_TEXT0(0,setup,dbf_text); @@ -2862,13 +2865,13 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, irq_ptr = cdev->private->qdio_data; if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) { - sprintf(dbf_text,"ick1%4x",irq_ptr->irq); + sprintf(dbf_text,"ick1%4x",irq_ptr->schid.sch_no); QDIO_DBF_TEXT2(1,trace,dbf_text); QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int)); QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int)); QDIO_PRINT_ERR("received check condition on establish " \ "queues on irq 0x%x (cs=x%x, ds=x%x).\n", - irq_ptr->irq,cstat,dstat); + irq_ptr->schid.sch_no,cstat,dstat); qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR); } @@ -2878,7 +2881,7 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat)); QDIO_PRINT_ERR("establish queues on irq %04x: didn't get " "device end: dstat=%02x, cstat=%02x\n", - irq_ptr->irq, dstat, cstat); + irq_ptr->schid.sch_no, dstat, cstat); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); return 1; } @@ -2890,7 +2893,7 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, QDIO_PRINT_ERR("establish queues on irq %04x: got " "the following devstat: dstat=%02x, " "cstat=%02x\n", - irq_ptr->irq, dstat, cstat); + irq_ptr->schid.sch_no, dstat, cstat); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); return 1; } @@ -2905,7 +2908,7 @@ qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat) irq_ptr = cdev->private->qdio_data; - sprintf(dbf_text,"qehi%4x",cdev->private->irq); + sprintf(dbf_text,"qehi%4x",cdev->private->sch_no); QDIO_DBF_TEXT0(0,setup,dbf_text); QDIO_DBF_TEXT0(0,trace,dbf_text); @@ -2924,7 +2927,7 @@ qdio_initialize(struct qdio_initialize *init_data) int rc; char dbf_text[15]; - sprintf(dbf_text,"qini%4x",init_data->cdev->private->irq); + sprintf(dbf_text,"qini%4x",init_data->cdev->private->sch_no); QDIO_DBF_TEXT0(0,setup,dbf_text); QDIO_DBF_TEXT0(0,trace,dbf_text); @@ -2945,7 +2948,7 @@ qdio_allocate(struct qdio_initialize *init_data) struct qdio_irq *irq_ptr; char dbf_text[15]; - sprintf(dbf_text,"qalc%4x",init_data->cdev->private->irq); + sprintf(dbf_text,"qalc%4x",init_data->cdev->private->sch_no); QDIO_DBF_TEXT0(0,setup,dbf_text); QDIO_DBF_TEXT0(0,trace,dbf_text); if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) || @@ -3018,7 +3021,7 @@ int qdio_fill_irq(struct qdio_initialize *init_data) irq_ptr->int_parm=init_data->int_parm; - irq_ptr->irq = init_data->cdev->private->irq; + irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev); irq_ptr->no_input_qs=init_data->no_input_qs; irq_ptr->no_output_qs=init_data->no_output_qs; @@ -3038,7 +3041,7 @@ int qdio_fill_irq(struct qdio_initialize *init_data) QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*)); if (!irq_ptr->dev_st_chg_ind) { QDIO_PRINT_WARN("no indicator location available " \ - "for irq 0x%x\n",irq_ptr->irq); + "for irq 0x%x\n",irq_ptr->schid.sch_no); qdio_release_irq_memory(irq_ptr); return -ENOBUFS; } @@ -3169,7 +3172,7 @@ qdio_establish(struct qdio_initialize *init_data) tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET); } - sprintf(dbf_text,"qest%4x",cdev->private->irq); + sprintf(dbf_text,"qest%4x",cdev->private->sch_no); QDIO_DBF_TEXT0(0,setup,dbf_text); QDIO_DBF_TEXT0(0,trace,dbf_text); @@ -3197,7 +3200,7 @@ qdio_establish(struct qdio_initialize *init_data) } QDIO_PRINT_WARN("establish queues on irq %04x: do_IO " \ "returned %i, next try returned %i\n", - irq_ptr->irq,result,result2); + irq_ptr->schid.sch_no,result,result2); result=result2; if (result) ccw_device_set_timeout(cdev, 0); @@ -3270,7 +3273,7 @@ qdio_activate(struct ccw_device *cdev, int flags) goto out; } - sprintf(dbf_text,"qact%4x", irq_ptr->irq); + sprintf(dbf_text,"qact%4x", irq_ptr->schid.sch_no); QDIO_DBF_TEXT2(0,setup,dbf_text); QDIO_DBF_TEXT2(0,trace,dbf_text); @@ -3297,7 +3300,7 @@ qdio_activate(struct ccw_device *cdev, int flags) } QDIO_PRINT_WARN("activate queues on irq %04x: do_IO " \ "returned %i, next try returned %i\n", - irq_ptr->irq,result,result2); + irq_ptr->schid.sch_no,result,result2); result=result2; } @@ -3509,7 +3512,7 @@ do_QDIO(struct ccw_device *cdev,unsigned int callflags, #ifdef CONFIG_QDIO_DEBUG char dbf_text[20]; - sprintf(dbf_text,"doQD%04x",cdev->private->irq); + sprintf(dbf_text,"doQD%04x",cdev->private->sch_no); QDIO_DBF_TEXT3(0,trace,dbf_text); #endif /* CONFIG_QDIO_DEBUG */ diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index b5d303e79a24..43b840af5300 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -3,7 +3,9 @@ #include -#define VERSION_CIO_QDIO_H "$Revision: 1.37 $" +#include "schid.h" + +#define VERSION_CIO_QDIO_H "$Revision: 1.40 $" #ifdef CONFIG_QDIO_DEBUG #define QDIO_VERBOSE_LEVEL 9 @@ -317,7 +319,7 @@ do_eqbs(unsigned long sch, unsigned char *state, int queue, static inline int -do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2) +do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2) { int cc; @@ -331,7 +333,7 @@ do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2) "ipm %0 \n\t" "srl %0,28 \n\t" : "=d" (cc) - : "d" (irq), "d" (mask1), "d" (mask2) + : "d" (schid), "d" (mask1), "d" (mask2) : "cc", "0", "1", "2", "3" ); #else /* CONFIG_ARCH_S390X */ @@ -344,7 +346,7 @@ do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2) "ipm %0 \n\t" "srl %0,28 \n\t" : "=d" (cc) - : "d" (irq), "d" (mask1), "d" (mask2) + : "d" (schid), "d" (mask1), "d" (mask2) : "cc", "0", "1", "2", "3" ); #endif /* CONFIG_ARCH_S390X */ @@ -352,7 +354,7 @@ do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2) } static inline int -do_siga_input(unsigned int irq, unsigned int mask) +do_siga_input(struct subchannel_id schid, unsigned int mask) { int cc; @@ -365,7 +367,7 @@ do_siga_input(unsigned int irq, unsigned int mask) "ipm %0 \n\t" "srl %0,28 \n\t" : "=d" (cc) - : "d" (irq), "d" (mask) + : "d" (schid), "d" (mask) : "cc", "0", "1", "2", "memory" ); #else /* CONFIG_ARCH_S390X */ @@ -377,7 +379,7 @@ do_siga_input(unsigned int irq, unsigned int mask) "ipm %0 \n\t" "srl %0,28 \n\t" : "=d" (cc) - : "d" (irq), "d" (mask) + : "d" (schid), "d" (mask) : "cc", "0", "1", "2", "memory" ); #endif /* CONFIG_ARCH_S390X */ @@ -386,7 +388,7 @@ do_siga_input(unsigned int irq, unsigned int mask) } static inline int -do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb, +do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb, unsigned int fc) { int cc; @@ -418,7 +420,7 @@ do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb, ".long 0b,2b \n\t" ".previous \n\t" : "=d" (cc), "=d" (busy_bit) - : "d" (irq), "d" (mask), + : "d" (schid), "d" (mask), "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION) : "cc", "0", "1", "2", "memory" ); @@ -443,7 +445,7 @@ do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb, ".quad 0b,1b \n\t" ".previous \n\t" : "=d" (cc), "=d" (busy_bit) - : "d" (irq), "d" (mask), + : "d" (schid), "d" (mask), "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION), "d" (fc) : "cc", "0", "1", "2", "memory" ); @@ -554,7 +556,7 @@ struct qdio_q { __u32 * dev_st_chg_ind; int is_input_q; - int irq; + struct subchannel_id schid; struct ccw_device *cdev; unsigned int is_iqdio_q; @@ -649,7 +651,7 @@ struct qdio_irq { __u32 * volatile dev_st_chg_ind; unsigned long int_parm; - int irq; + struct subchannel_id schid; unsigned int is_iqdio_irq; unsigned int is_thinint_irq; diff --git a/drivers/s390/cio/schid.h b/drivers/s390/cio/schid.h new file mode 100644 index 000000000000..220d97882341 --- /dev/null +++ b/drivers/s390/cio/schid.h @@ -0,0 +1,25 @@ +#ifndef S390_SCHID_H +#define S390_SCHID_H + +struct subchannel_id { + __u32 reserved:15; + __u32 one:1; + __u32 sch_no:16; +} __attribute__ ((packed,aligned(4))); + + +/* Helper function for sane state of pre-allocated subchannel_id. */ +static inline void +init_subchannel_id(struct subchannel_id *schid) +{ + memset(schid, 0, sizeof(struct subchannel_id)); + schid->one = 1; +} + +static inline int +schid_equal(struct subchannel_id *schid1, struct subchannel_id *schid2) +{ + return !memcmp(schid1, schid2, sizeof(struct subchannel_id)); +} + +#endif /* S390_SCHID_H */ -- cgit v1.2.3 From f97a56fb768e5fe9cd07c56ca47870136bb5530c Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Fri, 6 Jan 2006 00:19:22 -0800 Subject: [PATCH] s390: introduce for_each_subchannel for_each_subchannel() is an iterator calling a function for every possible subchannel id until non-zero is returned. Convert the current iterating functions to it. Signed-off-by: Cornelia Huck Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/s390/cio/blacklist.c | 46 +++--- drivers/s390/cio/chsc.c | 372 ++++++++++++++++++++++--------------------- drivers/s390/cio/cio.c | 79 ++++----- drivers/s390/cio/css.c | 110 +++++++------ drivers/s390/cio/css.h | 1 + 5 files changed, 318 insertions(+), 290 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index a4b03031ff50..25e98483d4e4 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c @@ -219,6 +219,27 @@ is_blacklisted (int devno) } #ifdef CONFIG_PROC_FS +static int +__s390_redo_validation(struct subchannel_id schid, void *data) +{ + int ret; + struct subchannel *sch; + + sch = get_subchannel_by_schid(schid); + if (sch) { + /* Already known. */ + put_device(&sch->dev); + return 0; + } + ret = css_probe_device(schid); + if (ret == -ENXIO) + return ret; /* We're through. */ + if (ret == -ENOMEM) + /* Stop validation for now. Bad, but no need for a panic. */ + return ret; + return 0; +} + /* * Function: s390_redo_validation * Look for no longer blacklisted devices @@ -226,30 +247,9 @@ is_blacklisted (int devno) static inline void s390_redo_validation (void) { - struct subchannel_id schid; - CIO_TRACE_EVENT (0, "redoval"); - init_subchannel_id(&schid); - do { - int ret; - struct subchannel *sch; - - sch = get_subchannel_by_schid(schid); - if (sch) { - /* Already known. */ - put_device(&sch->dev); - continue; - } - ret = css_probe_device(schid); - if (ret == -ENXIO) - break; /* We're through. */ - if (ret == -ENOMEM) - /* - * Stop validation for now. Bad, but no need for a - * panic. - */ - break; - } while (schid.sch_no++ < __MAX_SUBCHANNEL); + + for_each_subchannel(__s390_redo_validation, NULL); } /* diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index aff5d149b729..78e082311f48 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -310,9 +310,14 @@ s390_set_chpid_offline( __u8 chpid) queue_work(slow_path_wq, &slow_path_work); } +struct res_acc_data { + struct channel_path *chp; + u32 fla_mask; + u16 fla; +}; + static int -s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask, - struct subchannel *sch) +s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch) { int found; int chp; @@ -324,8 +329,9 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask, * check if chpid is in information updated by ssd */ if (sch->ssd_info.valid && - sch->ssd_info.chpid[chp] == chpid && - (sch->ssd_info.fla[chp] & fla_mask) == fla) { + sch->ssd_info.chpid[chp] == res_data->chp->id && + (sch->ssd_info.fla[chp] & res_data->fla_mask) + == res_data->fla) { found = 1; break; } @@ -345,18 +351,80 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask, return 0x80 >> chp; } +static inline int +s390_process_res_acc_new_sch(struct subchannel_id schid) +{ + struct schib schib; + int ret; + /* + * We don't know the device yet, but since a path + * may be available now to the device we'll have + * to do recognition again. + * Since we don't have any idea about which chpid + * that beast may be on we'll have to do a stsch + * on all devices, grr... + */ + if (stsch(schid, &schib)) + /* We're through */ + return need_rescan ? -EAGAIN : -ENXIO; + + /* Put it on the slow path. */ + ret = css_enqueue_subchannel_slow(schid); + if (ret) { + css_clear_subchannel_slow_list(); + need_rescan = 1; + return -EAGAIN; + } + return 0; +} + static int -s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask) +__s390_process_res_acc(struct subchannel_id schid, void *data) { + int chp_mask, old_lpm; + struct res_acc_data *res_data; struct subchannel *sch; + + res_data = (struct res_acc_data *)data; + sch = get_subchannel_by_schid(schid); + if (!sch) + /* Check if a subchannel is newly available. */ + return s390_process_res_acc_new_sch(schid); + + spin_lock_irq(&sch->lock); + + chp_mask = s390_process_res_acc_sch(res_data, sch); + + if (chp_mask == 0) { + spin_unlock_irq(&sch->lock); + return 0; + } + old_lpm = sch->lpm; + sch->lpm = ((sch->schib.pmcw.pim & + sch->schib.pmcw.pam & + sch->schib.pmcw.pom) + | chp_mask) & sch->opm; + if (!old_lpm && sch->lpm) + device_trigger_reprobe(sch); + else if (sch->driver && sch->driver->verify) + sch->driver->verify(&sch->dev); + + spin_unlock_irq(&sch->lock); + put_device(&sch->dev); + return (res_data->fla_mask == 0xffff) ? -ENODEV : 0; +} + + +static int +s390_process_res_acc (struct res_acc_data *res_data) +{ int rc; - struct subchannel_id schid; char dbf_txt[15]; - sprintf(dbf_txt, "accpr%x", chpid); + sprintf(dbf_txt, "accpr%x", res_data->chp->id); CIO_TRACE_EVENT( 2, dbf_txt); - if (fla != 0) { - sprintf(dbf_txt, "fla%x", fla); + if (res_data->fla != 0) { + sprintf(dbf_txt, "fla%x", res_data->fla); CIO_TRACE_EVENT( 2, dbf_txt); } @@ -367,71 +435,11 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask) * The more information we have (info), the less scanning * will we have to do. */ - - if (!get_chp_status(chpid)) - return 0; /* no need to do the rest */ - - rc = 0; - init_subchannel_id(&schid); - do { - int chp_mask, old_lpm; - - sch = get_subchannel_by_schid(schid); - if (!sch) { - struct schib schib; - int ret; - /* - * We don't know the device yet, but since a path - * may be available now to the device we'll have - * to do recognition again. - * Since we don't have any idea about which chpid - * that beast may be on we'll have to do a stsch - * on all devices, grr... - */ - if (stsch(schid, &schib)) { - /* We're through */ - if (need_rescan) - rc = -EAGAIN; - break; - } - if (need_rescan) { - rc = -EAGAIN; - continue; - } - /* Put it on the slow path. */ - ret = css_enqueue_subchannel_slow(schid); - if (ret) { - css_clear_subchannel_slow_list(); - need_rescan = 1; - } - rc = -EAGAIN; - continue; - } - - spin_lock_irq(&sch->lock); - - chp_mask = s390_process_res_acc_sch(chpid, fla, fla_mask, sch); - - if (chp_mask == 0) { - - spin_unlock_irq(&sch->lock); - continue; - } - old_lpm = sch->lpm; - sch->lpm = ((sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom) - | chp_mask) & sch->opm; - if (!old_lpm && sch->lpm) - device_trigger_reprobe(sch); - else if (sch->driver && sch->driver->verify) - sch->driver->verify(&sch->dev); - - spin_unlock_irq(&sch->lock); - put_device(&sch->dev); - if (fla_mask == 0xffff) - break; - } while (schid.sch_no++ < __MAX_SUBCHANNEL); + rc = for_each_subchannel(__s390_process_res_acc, res_data); + if (css_slow_subchannels_exist()) + rc = -EAGAIN; + else if (rc != -EAGAIN) + rc = 0; return rc; } @@ -469,6 +477,7 @@ int chsc_process_crw(void) { int chpid, ret; + struct res_acc_data res_data; struct { struct chsc_header request; u32 reserved1; @@ -503,7 +512,7 @@ chsc_process_crw(void) do { int ccode, status; memset(sei_area, 0, sizeof(*sei_area)); - + memset(&res_data, 0, sizeof(struct res_acc_data)); sei_area->request = (struct chsc_header) { .length = 0x0010, .code = 0x000e, @@ -576,26 +585,23 @@ chsc_process_crw(void) if (status < 0) new_channel_path(sei_area->rsid); else if (!status) - return 0; - if ((sei_area->vf & 0x80) == 0) { - pr_debug("chpid: %x\n", sei_area->rsid); - ret = s390_process_res_acc(sei_area->rsid, - 0, 0); - } else if ((sei_area->vf & 0xc0) == 0x80) { - pr_debug("chpid: %x link addr: %x\n", - sei_area->rsid, sei_area->fla); - ret = s390_process_res_acc(sei_area->rsid, - sei_area->fla, - 0xff00); - } else if ((sei_area->vf & 0xc0) == 0xc0) { - pr_debug("chpid: %x full link addr: %x\n", - sei_area->rsid, sei_area->fla); - ret = s390_process_res_acc(sei_area->rsid, - sei_area->fla, - 0xffff); + break; + res_data.chp = chps[sei_area->rsid]; + pr_debug("chpid: %x", sei_area->rsid); + if ((sei_area->vf & 0xc0) != 0) { + res_data.fla = sei_area->fla; + if ((sei_area->vf & 0xc0) == 0xc0) { + pr_debug(" full link addr: %x", + sei_area->fla); + res_data.fla_mask = 0xffff; + } else { + pr_debug(" link addr: %x", + sei_area->fla); + res_data.fla_mask = 0xff00; + } } - pr_debug("\n"); - + ret = s390_process_res_acc(&res_data); + pr_debug("\n\n"); break; default: /* other stuff */ @@ -607,12 +613,70 @@ chsc_process_crw(void) return ret; } +static inline int +__chp_add_new_sch(struct subchannel_id schid) +{ + struct schib schib; + int ret; + + if (stsch(schid, &schib)) + /* We're through */ + return need_rescan ? -EAGAIN : -ENXIO; + + /* Put it on the slow path. */ + ret = css_enqueue_subchannel_slow(schid); + if (ret) { + css_clear_subchannel_slow_list(); + need_rescan = 1; + return -EAGAIN; + } + return 0; +} + + static int -chp_add(int chpid) +__chp_add(struct subchannel_id schid, void *data) { + int i; + struct channel_path *chp; struct subchannel *sch; - int ret, rc; - struct subchannel_id schid; + + chp = (struct channel_path *)data; + sch = get_subchannel_by_schid(schid); + if (!sch) + /* Check if the subchannel is now available. */ + return __chp_add_new_sch(schid); + spin_lock(&sch->lock); + for (i=0; i<8; i++) + if (sch->schib.pmcw.chpid[i] == chp->id) { + if (stsch(sch->schid, &sch->schib) != 0) { + /* Endgame. */ + spin_unlock(&sch->lock); + return -ENXIO; + } + break; + } + if (i==8) { + spin_unlock(&sch->lock); + return 0; + } + sch->lpm = ((sch->schib.pmcw.pim & + sch->schib.pmcw.pam & + sch->schib.pmcw.pom) + | 0x80 >> i) & sch->opm; + + if (sch->driver && sch->driver->verify) + sch->driver->verify(&sch->dev); + + spin_unlock(&sch->lock); + put_device(&sch->dev); + return 0; +} + +static int +chp_add(int chpid) +{ + int rc; char dbf_txt[15]; if (!get_chp_status(chpid)) @@ -621,60 +685,11 @@ chp_add(int chpid) sprintf(dbf_txt, "cadd%x", chpid); CIO_TRACE_EVENT(2, dbf_txt); - rc = 0; - init_subchannel_id(&schid); - do { - int i; - - sch = get_subchannel_by_schid(schid); - if (!sch) { - struct schib schib; - - if (stsch(schid, &schib)) { - /* We're through */ - if (need_rescan) - rc = -EAGAIN; - break; - } - if (need_rescan) { - rc = -EAGAIN; - continue; - } - /* Put it on the slow path. */ - ret = css_enqueue_subchannel_slow(schid); - if (ret) { - css_clear_subchannel_slow_list(); - need_rescan = 1; - } - rc = -EAGAIN; - continue; - } - - spin_lock(&sch->lock); - for (i=0; i<8; i++) - if (sch->schib.pmcw.chpid[i] == chpid) { - if (stsch(sch->schid, &sch->schib) != 0) { - /* Endgame. */ - spin_unlock(&sch->lock); - return rc; - } - break; - } - if (i==8) { - spin_unlock(&sch->lock); - return rc; - } - sch->lpm = ((sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom) - | 0x80 >> i) & sch->opm; - - if (sch->driver && sch->driver->verify) - sch->driver->verify(&sch->dev); - - spin_unlock(&sch->lock); - put_device(&sch->dev); - } while (schid.sch_no++ < __MAX_SUBCHANNEL); + rc = for_each_subchannel(__chp_add, chps[chpid]); + if (css_slow_subchannels_exist()) + rc = -EAGAIN; + if (rc != -EAGAIN) + rc = 0; return rc; } @@ -786,6 +801,29 @@ s390_subchannel_vary_chpid_on(struct device *dev, void *data) return 0; } +static int +__s390_vary_chpid_on(struct subchannel_id schid, void *data) +{ + struct schib schib; + struct subchannel *sch; + + sch = get_subchannel_by_schid(schid); + if (sch) { + put_device(&sch->dev); + return 0; + } + if (stsch(schid, &schib)) + /* We're through */ + return -ENXIO; + /* Put it on the slow path. */ + if (css_enqueue_subchannel_slow(schid)) { + css_clear_subchannel_slow_list(); + need_rescan = 1; + return -EAGAIN; + } + return 0; +} + /* * Function: s390_vary_chpid * Varies the specified chpid online or offline @@ -794,9 +832,7 @@ static int s390_vary_chpid( __u8 chpid, int on) { char dbf_text[15]; - int status, ret; - struct subchannel_id schid; - struct subchannel *sch; + int status; sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); CIO_TRACE_EVENT( 2, dbf_text); @@ -821,31 +857,9 @@ s390_vary_chpid( __u8 chpid, int on) bus_for_each_dev(&css_bus_type, NULL, &chpid, on ? s390_subchannel_vary_chpid_on : s390_subchannel_vary_chpid_off); - if (!on) - goto out; - /* Scan for new devices on varied on path. */ - init_subchannel_id(&schid); - do { - struct schib schib; - - if (need_rescan) - break; - sch = get_subchannel_by_schid(schid); - if (sch) { - put_device(&sch->dev); - continue; - } - if (stsch(schid, &schib)) - /* We're through */ - break; - /* Put it on the slow path. */ - ret = css_enqueue_subchannel_slow(schid); - if (ret) { - css_clear_subchannel_slow_list(); - need_rescan = 1; - } - } while (schid.sch_no++ < __MAX_SUBCHANNEL); -out: + if (on) + /* Scan for new devices on varied on path. */ + for_each_subchannel(__s390_vary_chpid_on, NULL); if (need_rescan || css_slow_subchannels_exist()) queue_work(slow_path_wq, &slow_path_work); return 0; diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 396bada65f86..3eb6cb608fc9 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -691,7 +691,22 @@ wait_cons_dev (void) } static int -cio_console_irq(void) +cio_test_for_console(struct subchannel_id schid, void *data) +{ + if (stsch(schid, &console_subchannel.schib) != 0) + return -ENXIO; + if (console_subchannel.schib.pmcw.dnv && + console_subchannel.schib.pmcw.dev == + console_devno) { + console_irq = schid.sch_no; + return 1; /* found */ + } + return 0; +} + + +static int +cio_get_console_sch_no(void) { struct subchannel_id schid; @@ -705,16 +720,7 @@ cio_console_irq(void) console_devno = console_subchannel.schib.pmcw.dev; } else if (console_devno != -1) { /* At least the console device number is known. */ - do { - if (stsch(schid, &console_subchannel.schib) != 0) - break; - if (console_subchannel.schib.pmcw.dnv && - console_subchannel.schib.pmcw.dev == - console_devno) { - console_irq = schid.sch_no; - break; - } - } while (schid.sch_no++ < __MAX_SUBCHANNEL); + for_each_subchannel(cio_test_for_console, NULL); if (console_irq == -1) return -1; } else { @@ -730,19 +736,19 @@ cio_console_irq(void) struct subchannel * cio_probe_console(void) { - int irq, ret; + int sch_no, ret; struct subchannel_id schid; if (xchg(&console_subchannel_in_use, 1) != 0) return ERR_PTR(-EBUSY); - irq = cio_console_irq(); - if (irq == -1) { + sch_no = cio_get_console_sch_no(); + if (sch_no == -1) { console_subchannel_in_use = 0; return ERR_PTR(-ENODEV); } memset(&console_subchannel, 0, sizeof(struct subchannel)); init_subchannel_id(&schid); - schid.sch_no = irq; + schid.sch_no = sch_no; ret = cio_validate_subchannel(&console_subchannel, schid); if (ret) { console_subchannel_in_use = 0; @@ -830,32 +836,33 @@ __clear_subchannel_easy(struct subchannel_id schid) } extern void do_reipl(unsigned long devno); +static int +__shutdown_subchannel_easy(struct subchannel_id schid, void *data) +{ + struct schib schib; + + if (stsch(schid, &schib)) + return -ENXIO; + if (!schib.pmcw.ena) + return 0; + switch(__disable_subchannel_easy(schid, &schib)) { + case 0: + case -ENODEV: + break; + default: /* -EBUSY */ + if (__clear_subchannel_easy(schid)) + break; /* give up... */ + stsch(schid, &schib); + __disable_subchannel_easy(schid, &schib); + } + return 0; +} -/* Clear all subchannels. */ void clear_all_subchannels(void) { - struct subchannel_id schid; - local_irq_disable(); - init_subchannel_id(&schid); - do { - struct schib schib; - if (stsch(schid, &schib)) - break; /* break out of the loop */ - if (!schib.pmcw.ena) - continue; - switch(__disable_subchannel_easy(schid, &schib)) { - case 0: - case -ENODEV: - break; - default: /* -EBUSY */ - if (__clear_subchannel_easy(schid)) - break; /* give up... jump out of switch */ - stsch(schid, &schib); - __disable_subchannel_easy(schid, &schib); - } - } while (schid.sch_no++ < __MAX_SUBCHANNEL); + for_each_subchannel(__shutdown_subchannel_easy, NULL); } /* Make sure all subchannels are quiet before we re-ipl an lpar. */ diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 5137dafd1e8d..dba632a5f71f 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -21,7 +21,6 @@ #include "ioasm.h" #include "chsc.h" -unsigned int highest_subchannel; int need_rescan = 0; int css_init_done = 0; @@ -32,6 +31,22 @@ struct device css_bus_device = { .bus_id = "css0", }; +inline int +for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) +{ + struct subchannel_id schid; + int ret; + + init_subchannel_id(&schid); + ret = -ENODEV; + do { + ret = fn(schid, data); + if (ret) + break; + } while (schid.sch_no++ < __MAX_SUBCHANNEL); + return ret; +} + static struct subchannel * css_alloc_subchannel(struct subchannel_id schid) { @@ -280,25 +295,10 @@ css_evaluate_subchannel(struct subchannel_id schid, int slow) return ret; } -static void -css_rescan_devices(void) +static int +css_rescan_devices(struct subchannel_id schid, void *data) { - int ret; - struct subchannel_id schid; - - init_subchannel_id(&schid); - do { - ret = css_evaluate_subchannel(schid, 1); - /* No more memory. It doesn't make sense to continue. No - * panic because this can happen in midflight and just - * because we can't use a new device is no reason to crash - * the system. */ - if (ret == -ENOMEM) - break; - /* -ENXIO indicates that there are no more subchannels. */ - if (ret == -ENXIO) - break; - } while (schid.sch_no++ < __MAX_SUBCHANNEL); + return css_evaluate_subchannel(schid, 1); } struct slow_subchannel { @@ -316,7 +316,7 @@ css_trigger_slow_path(void) if (need_rescan) { need_rescan = 0; - css_rescan_devices(); + for_each_subchannel(css_rescan_devices, NULL); return; } @@ -383,6 +383,43 @@ css_process_crw(int irq) return ret; } +static int __init +__init_channel_subsystem(struct subchannel_id schid, void *data) +{ + struct subchannel *sch; + int ret; + + if (cio_is_console(schid)) + sch = cio_get_console_subchannel(); + else { + sch = css_alloc_subchannel(schid); + if (IS_ERR(sch)) + ret = PTR_ERR(sch); + else + ret = 0; + switch (ret) { + case 0: + break; + case -ENOMEM: + panic("Out of memory in init_channel_subsystem\n"); + /* -ENXIO: no more subchannels. */ + case -ENXIO: + return ret; + default: + return 0; + } + } + /* + * We register ALL valid subchannels in ioinfo, even those + * that have been present before init_channel_subsystem. + * These subchannels can't have been registered yet (kmalloc + * not working) so we do it now. This is true e.g. for the + * console subchannel. + */ + css_register_subchannel(sch); + return 0; +} + static void __init css_generate_pgid(void) { @@ -410,7 +447,6 @@ static int __init init_channel_subsystem (void) { int ret; - struct subchannel_id schid; if (chsc_determine_css_characteristics() == 0) css_characteristics_avail = 1; @@ -426,38 +462,8 @@ init_channel_subsystem (void) ctl_set_bit(6, 28); - init_subchannel_id(&schid); - do { - struct subchannel *sch; - - if (cio_is_console(schid)) - sch = cio_get_console_subchannel(); - else { - sch = css_alloc_subchannel(schid); - if (IS_ERR(sch)) - ret = PTR_ERR(sch); - else - ret = 0; - if (ret == -ENOMEM) - panic("Out of memory in " - "init_channel_subsystem\n"); - /* -ENXIO: no more subchannels. */ - if (ret == -ENXIO) - break; - if (ret) - continue; - } - /* - * We register ALL valid subchannels in ioinfo, even those - * that have been present before init_channel_subsystem. - * These subchannels can't have been registered yet (kmalloc - * not working) so we do it now. This is true e.g. for the - * console subchannel. - */ - css_register_subchannel(sch); - } while (schid.sch_no++ < __MAX_SUBCHANNEL); + for_each_subchannel(__init_channel_subsystem, NULL); return 0; - out_bus: bus_unregister(&css_bus_type); out: diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index f26e16daecb5..71efca25476d 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -126,6 +126,7 @@ extern struct css_driver io_subchannel_driver; extern int css_probe_device(struct subchannel_id); extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); extern int css_init_done; +extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); #define __MAX_SUBCHANNEL 65535 -- cgit v1.2.3 From a28c69448154a0901e8815922030c5dcd2f8e388 Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Fri, 6 Jan 2006 00:19:23 -0800 Subject: [PATCH] s390: introduce struct channel_subsystem struct channel_subsystem encapsulates several per channel subsystem properties, like status of chpids or the global path group id. Signed-off-by: Cornelia Huck Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/s390/cio/chsc.c | 32 ++++++++++++-------- drivers/s390/cio/chsc.h | 5 ++-- drivers/s390/cio/css.c | 67 +++++++++++++++++++++++++++++------------- drivers/s390/cio/css.h | 25 +++++++++++++--- drivers/s390/cio/device.c | 4 --- drivers/s390/cio/device_pgid.c | 2 +- 6 files changed, 90 insertions(+), 45 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 78e082311f48..ebd924962df0 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -24,8 +24,6 @@ #include "ioasm.h" #include "chsc.h" -static struct channel_path *chps[NR_CHPIDS]; - static void *sei_page; static int new_channel_path(int chpid); @@ -33,13 +31,13 @@ static int new_channel_path(int chpid); static inline void set_chp_logically_online(int chp, int onoff) { - chps[chp]->state = onoff; + css[0]->chps[chp]->state = onoff; } static int get_chp_status(int chp) { - return (chps[chp] ? chps[chp]->state : -ENODEV); + return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV); } void @@ -219,13 +217,13 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) int j; int mask; struct subchannel *sch; - __u8 *chpid; + struct channel_path *chpid; struct schib schib; sch = to_subchannel(dev); chpid = data; for (j = 0; j < 8; j++) - if (sch->schib.pmcw.chpid[j] == *chpid) + if (sch->schib.pmcw.chpid[j] == chpid->id) break; if (j >= 8) return 0; @@ -296,18 +294,20 @@ static inline void s390_set_chpid_offline( __u8 chpid) { char dbf_txt[15]; + struct device *dev; sprintf(dbf_txt, "chpr%x", chpid); CIO_TRACE_EVENT(2, dbf_txt); if (get_chp_status(chpid) <= 0) return; - - bus_for_each_dev(&css_bus_type, NULL, &chpid, + dev = get_device(&css[0]->chps[chpid]->dev); + bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev), s390_subchannel_remove_chpid); if (need_rescan || css_slow_subchannels_exist()) queue_work(slow_path_wq, &slow_path_work); + put_device(dev); } struct res_acc_data { @@ -511,6 +511,7 @@ chsc_process_crw(void) ret = 0; do { int ccode, status; + struct device *dev; memset(sei_area, 0, sizeof(*sei_area)); memset(&res_data, 0, sizeof(struct res_acc_data)); sei_area->request = (struct chsc_header) { @@ -586,7 +587,8 @@ chsc_process_crw(void) new_channel_path(sei_area->rsid); else if (!status) break; - res_data.chp = chps[sei_area->rsid]; + dev = get_device(&css[0]->chps[sei_area->rsid]->dev); + res_data.chp = to_channelpath(dev); pr_debug("chpid: %x", sei_area->rsid); if ((sei_area->vf & 0xc0) != 0) { res_data.fla = sei_area->fla; @@ -602,6 +604,7 @@ chsc_process_crw(void) } ret = s390_process_res_acc(&res_data); pr_debug("\n\n"); + put_device(dev); break; default: /* other stuff */ @@ -678,6 +681,7 @@ chp_add(int chpid) { int rc; char dbf_txt[15]; + struct device *dev; if (!get_chp_status(chpid)) return 0; /* no need to do the rest */ @@ -685,11 +689,13 @@ chp_add(int chpid) sprintf(dbf_txt, "cadd%x", chpid); CIO_TRACE_EVENT(2, dbf_txt); - rc = for_each_subchannel(__chp_add, chps[chpid]); + dev = get_device(&css[0]->chps[chpid]->dev); + rc = for_each_subchannel(__chp_add, to_channelpath(dev)); if (css_slow_subchannels_exist()) rc = -EAGAIN; if (rc != -EAGAIN) rc = 0; + put_device(dev); return rc; } @@ -1016,7 +1022,7 @@ new_channel_path(int chpid) chp->id = chpid; chp->state = 1; chp->dev = (struct device) { - .parent = &css_bus_device, + .parent = &css[0]->device, .release = chp_release, }; snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid); @@ -1038,7 +1044,7 @@ new_channel_path(int chpid) device_unregister(&chp->dev); goto out_free; } else - chps[chpid] = chp; + css[0]->chps[chpid] = chp; return ret; out_free: kfree(chp); @@ -1051,7 +1057,7 @@ chsc_get_chp_desc(struct subchannel *sch, int chp_no) struct channel_path *chp; struct channel_path_desc *desc; - chp = chps[sch->schib.pmcw.chpid[chp_no]]; + chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]]; if (!chp) return NULL; desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 69450134bec7..170083ca4349 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -1,8 +1,6 @@ #ifndef S390_CHSC_H #define S390_CHSC_H -#define NR_CHPIDS 256 - #define CHSC_SEI_ACC_CHPID 1 #define CHSC_SEI_ACC_LINKADDR 2 #define CHSC_SEI_ACC_FULLLINKADDR 3 @@ -65,4 +63,7 @@ extern int chsc_determine_css_characteristics(void); extern int css_characteristics_avail; extern void *chsc_get_chp_desc(struct subchannel*, int); + +#define to_channelpath(dev) container_of(dev, struct channel_path, dev) + #endif diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index dba632a5f71f..b6225cbbbee7 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -24,12 +24,9 @@ int need_rescan = 0; int css_init_done = 0; -struct pgid global_pgid; -int css_characteristics_avail = 0; +struct channel_subsystem *css[__MAX_CSSID + 1]; -struct device css_bus_device = { - .bus_id = "css0", -}; +int css_characteristics_avail = 0; inline int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) @@ -112,7 +109,7 @@ css_register_subchannel(struct subchannel *sch) int ret; /* Initialize the subchannel structure */ - sch->dev.parent = &css_bus_device; + sch->dev.parent = &css[0]->device; sch->dev.bus = &css_bus_type; sch->dev.release = &css_subchannel_release; @@ -421,21 +418,35 @@ __init_channel_subsystem(struct subchannel_id schid, void *data) } static void __init -css_generate_pgid(void) +css_generate_pgid(struct channel_subsystem *css, u32 tod_high) { - /* Let's build our path group ID here. */ - if (css_characteristics_avail && css_general_characteristics.mcss) - global_pgid.cpu_addr = 0x8000; - else { + if (css_characteristics_avail && css_general_characteristics.mcss) { + css->global_pgid.pgid_high.ext_cssid.version = 0x80; + css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; + } else { #ifdef CONFIG_SMP - global_pgid.cpu_addr = hard_smp_processor_id(); + css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id(); #else - global_pgid.cpu_addr = 0; + css->global_pgid.pgid_high.cpu_addr = 0; #endif } - global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; - global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; - global_pgid.tod_high = (__u32) (get_clock() >> 32); + css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; + css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; + css->global_pgid.tod_high = tod_high; + +} + +static inline void __init +setup_css(int nr) +{ + u32 tod_high; + + memset(css[nr], 0, sizeof(struct channel_subsystem)); + css[nr]->valid = 1; + css[nr]->cssid = nr; + sprintf(css[nr]->device.bus_id, "css%x", nr); + tod_high = (u32) (get_clock() >> 32); + css_generate_pgid(css[nr], tod_high); } /* @@ -446,25 +457,39 @@ css_generate_pgid(void) static int __init init_channel_subsystem (void) { - int ret; + int ret, i; if (chsc_determine_css_characteristics() == 0) css_characteristics_avail = 1; - css_generate_pgid(); - if ((ret = bus_register(&css_bus_type))) goto out; - if ((ret = device_register (&css_bus_device))) - goto out_bus; + /* Setup css structure. */ + for (i = 0; i <= __MAX_CSSID; i++) { + css[i] = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL); + if (!css[i]) { + ret = -ENOMEM; + goto out_bus; + } + setup_css(i); + ret = device_register(&css[i]->device); + if (ret) + goto out_free; + } css_init_done = 1; ctl_set_bit(6, 28); for_each_subchannel(__init_channel_subsystem, NULL); return 0; +out_free: + kfree(css[i]); out_bus: + while (i > 0) { + i--; + device_unregister(&css[i]->device); + } bus_unregister(&css_bus_type); out: return ret; diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index 71efca25476d..b74659cab0af 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -35,19 +35,25 @@ struct path_state { __u8 resvd : 3; /* reserved */ } __attribute__ ((packed)); +struct extended_cssid { + u8 version; + u8 cssid; +} __attribute__ ((packed)); + struct pgid { union { __u8 fc; /* SPID function code */ struct path_state ps; /* SNID path state */ } inf; - __u32 cpu_addr : 16; /* CPU address */ + union { + __u32 cpu_addr : 16; /* CPU address */ + struct extended_cssid ext_cssid; + } pgid_high; __u32 cpu_id : 24; /* CPU identification */ __u32 cpu_model : 16; /* CPU model */ __u32 tod_high; /* high word TOD clock */ } __attribute__ ((packed)); -extern struct pgid global_pgid; - #define MAX_CIWS 8 /* @@ -129,9 +135,20 @@ extern int css_init_done; extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); #define __MAX_SUBCHANNEL 65535 +#define __MAX_CHPID 255 +#define __MAX_CSSID 0 + +struct channel_subsystem { + u8 cssid; + int valid; + struct channel_path *chps[__MAX_CHPID]; + struct device device; + struct pgid global_pgid; +}; +#define to_css(dev) container_of(dev, struct channel_subsystem, device) extern struct bus_type css_bus_type; -extern struct device css_bus_device; +extern struct channel_subsystem *css[]; /* Some helper functions for disconnected state. */ int device_is_disconnected(struct subchannel *); diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 9ac07aeffbe6..ba9f7c11f63f 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -986,10 +986,6 @@ ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch) cdev->dev = (struct device) { .parent = &sch->dev, }; - /* Initialize the subchannel structure */ - sch->dev.parent = &css_bus_device; - sch->dev.bus = &css_bus_type; - rc = io_subchannel_recog(cdev, sch); if (rc) return rc; diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index f08e84cc3563..3c89d70b9c09 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c @@ -164,7 +164,7 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event) /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */ case 0: /* Sense Path Group ID successful. */ if (cdev->private->pgid.inf.ps.state1 == SNID_STATE1_RESET) - memcpy(&cdev->private->pgid, &global_pgid, + memcpy(&cdev->private->pgid, &css[0]->global_pgid, sizeof(struct pgid)); ccw_device_sense_pgid_done(cdev, 0); break; -- cgit v1.2.3 From 678a395b356a98368a93c3640252502b70c3676f Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Fri, 6 Jan 2006 00:19:24 -0800 Subject: [PATCH] s390: convert /proc/cio_ignore Convert /proc/cio_ignore to a sequential file. This makes multiple subchannel sets support easier. Signed-off-by: Cornelia Huck Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/s390/cio/blacklist.c | 122 ++++++++++++++++++++++++++++++++----------- 1 file changed, 92 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index 25e98483d4e4..daea41c63329 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -279,41 +280,82 @@ blacklist_parse_proc_parameters (char *buf) s390_redo_validation (); } -/* FIXME: These should be real bus ids and not home-grown ones! */ -static int cio_ignore_read (char *page, char **start, off_t off, - int count, int *eof, void *data) +/* Iterator struct for all devices. */ +struct ccwdev_iter { + int devno; + int in_range; +}; + +static void * +cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset) { - const unsigned int entry_size = 18; /* "0.0.ABCD-0.0.EFGH\n" */ - long devno; - int len; - - len = 0; - for (devno = off; /* abuse the page variable - * as counter, see fs/proc/generic.c */ - devno < __MAX_SUBCHANNEL && len + entry_size < count; devno++) { - if (!test_bit(devno, bl_dev)) - continue; - len += sprintf(page + len, "0.0.%04lx", devno); - if (test_bit(devno + 1, bl_dev)) { /* print range */ - while (++devno < __MAX_SUBCHANNEL) - if (!test_bit(devno, bl_dev)) - break; - len += sprintf(page + len, "-0.0.%04lx", --devno); - } - len += sprintf(page + len, "\n"); - } + struct ccwdev_iter *iter; + + if (*offset > __MAX_SUBCHANNEL) + return NULL; + iter = kmalloc(sizeof(struct ccwdev_iter), GFP_KERNEL); + if (!iter) + return ERR_PTR(-ENOMEM); + memset(iter, 0, sizeof(struct ccwdev_iter)); + iter->devno = *offset; + return iter; +} + +static void +cio_ignore_proc_seq_stop(struct seq_file *s, void *it) +{ + if (!IS_ERR(it)) + kfree(it); +} - if (devno < __MAX_SUBCHANNEL) - *eof = 1; - *start = (char *) (devno - off); /* number of checked entries */ - return len; +static void * +cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset) +{ + struct ccwdev_iter *iter; + + if (*offset > __MAX_SUBCHANNEL) + return NULL; + iter = (struct ccwdev_iter *)it; + iter->devno++; + (*offset)++; + return iter; } -static int cio_ignore_write(struct file *file, const char __user *user_buf, - unsigned long user_len, void *data) +static int +cio_ignore_proc_seq_show(struct seq_file *s, void *it) +{ + struct ccwdev_iter *iter; + + iter = (struct ccwdev_iter *)it; + if (!is_blacklisted(iter->devno)) + /* Not blacklisted, nothing to output. */ + return 0; + if (!iter->in_range) { + /* First device in range. */ + if ((iter->devno == __MAX_SUBCHANNEL) || + !is_blacklisted(iter->devno + 1)) + /* Singular device. */ + return seq_printf(s, "0.0.%04x\n", iter->devno); + iter->in_range = 1; + return seq_printf(s, "0.0.%04x-", iter->devno); + } + if ((iter->devno == __MAX_SUBCHANNEL) || + !is_blacklisted(iter->devno + 1)) { + /* Last device in range. */ + iter->in_range = 0; + return seq_printf(s, "0.0.%04x\n", iter->devno); + } + return 0; +} + +static ssize_t +cio_ignore_write(struct file *file, const char __user *user_buf, + size_t user_len, loff_t *offset) { char *buf; + if (*offset) + return -EINVAL; if (user_len > 65536) user_len = 65536; buf = vmalloc (user_len + 1); /* maybe better use the stack? */ @@ -331,6 +373,27 @@ static int cio_ignore_write(struct file *file, const char __user *user_buf, return user_len; } +static struct seq_operations cio_ignore_proc_seq_ops = { + .start = cio_ignore_proc_seq_start, + .stop = cio_ignore_proc_seq_stop, + .next = cio_ignore_proc_seq_next, + .show = cio_ignore_proc_seq_show, +}; + +static int +cio_ignore_proc_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &cio_ignore_proc_seq_ops); +} + +static struct file_operations cio_ignore_proc_fops = { + .open = cio_ignore_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, + .write = cio_ignore_write, +}; + static int cio_ignore_proc_init (void) { @@ -341,8 +404,7 @@ cio_ignore_proc_init (void) if (!entry) return 0; - entry->read_proc = cio_ignore_read; - entry->write_proc = cio_ignore_write; + entry->proc_fops = &cio_ignore_proc_fops; return 1; } -- cgit v1.2.3 From fb6958a594da49ece869793e6ec163b89fc5f79f Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Fri, 6 Jan 2006 00:19:25 -0800 Subject: [PATCH] s390: multiple subchannel sets support Add support for multiple subchannel sets. Works with arbitrary devices in subchannel set 1 and is transparent to device drivers. Although currently only two subchannel sets are available, this will work with the architectured maximum number of subchannel sets as well. Signed-off-by: Cornelia Huck Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/s390/cio/blacklist.c | 86 +++++++++++++++++++++++----------------- drivers/s390/cio/blacklist.h | 2 +- drivers/s390/cio/chsc.c | 68 +++++++++++++++++++++++++++---- drivers/s390/cio/chsc.h | 4 ++ drivers/s390/cio/cio.c | 35 +++++++++------- drivers/s390/cio/css.c | 44 ++++++++++++++------ drivers/s390/cio/css.h | 2 + drivers/s390/cio/device.c | 22 ++++++---- drivers/s390/cio/device_fsm.c | 15 ++++--- drivers/s390/cio/device_id.c | 22 +++++----- drivers/s390/cio/device_pgid.c | 40 ++++++++++--------- drivers/s390/cio/device_status.c | 10 +++-- drivers/s390/cio/ioasm.h | 29 ++++++++++++++ drivers/s390/cio/qdio.c | 81 ++++++++++++++++++++++--------------- drivers/s390/cio/schid.h | 3 +- drivers/s390/s390mach.c | 56 +++++++++++++++++++------- 16 files changed, 354 insertions(+), 165 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index daea41c63329..2d444cb2fdf7 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c @@ -1,7 +1,7 @@ /* * drivers/s390/cio/blacklist.c * S/390 common I/O routines -- blacklisting of specific devices - * $Revision: 1.35 $ + * $Revision: 1.39 $ * * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, * IBM Corporation @@ -35,10 +35,10 @@ * These can be single devices or ranges of devices */ -/* 65536 bits to indicate if a devno is blacklisted or not */ +/* 65536 bits for each set to indicate if a devno is blacklisted or not */ #define __BL_DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \ (8*sizeof(long))) -static unsigned long bl_dev[__BL_DEV_WORDS]; +static unsigned long bl_dev[__MAX_SSID + 1][__BL_DEV_WORDS]; typedef enum {add, free} range_action; /* @@ -46,21 +46,23 @@ typedef enum {add, free} range_action; * (Un-)blacklist the devices from-to */ static inline void -blacklist_range (range_action action, unsigned int from, unsigned int to) +blacklist_range (range_action action, unsigned int from, unsigned int to, + unsigned int ssid) { if (!to) to = from; - if (from > to || to > __MAX_SUBCHANNEL) { + if (from > to || to > __MAX_SUBCHANNEL || ssid > __MAX_SSID) { printk (KERN_WARNING "Invalid blacklist range " - "0x%04x to 0x%04x, skipping\n", from, to); + "0.%x.%04x to 0.%x.%04x, skipping\n", + ssid, from, ssid, to); return; } for (; from <= to; from++) { if (action == add) - set_bit (from, bl_dev); + set_bit (from, bl_dev[ssid]); else - clear_bit (from, bl_dev); + clear_bit (from, bl_dev[ssid]); } } @@ -70,7 +72,7 @@ blacklist_range (range_action action, unsigned int from, unsigned int to) * Shamelessly grabbed from dasd_devmap.c. */ static inline int -blacklist_busid(char **str, int *id0, int *id1, int *devno) +blacklist_busid(char **str, int *id0, int *ssid, int *devno) { int val, old_style; char *sav; @@ -87,7 +89,7 @@ blacklist_busid(char **str, int *id0, int *id1, int *devno) goto confused; val = simple_strtoul(*str, str, 16); if (old_style || (*str)[0] != '.') { - *id0 = *id1 = 0; + *id0 = *ssid = 0; if (val < 0 || val > 0xffff) goto confused; *devno = val; @@ -106,7 +108,7 @@ blacklist_busid(char **str, int *id0, int *id1, int *devno) val = simple_strtoul(*str, str, 16); if (val < 0 || val > 0xff || (*str)++[0] != '.') goto confused; - *id1 = val; + *ssid = val; if (!isxdigit((*str)[0])) /* We require at least one hex digit */ goto confused; val = simple_strtoul(*str, str, 16); @@ -126,7 +128,7 @@ confused: static inline int blacklist_parse_parameters (char *str, range_action action) { - unsigned int from, to, from_id0, to_id0, from_id1, to_id1; + unsigned int from, to, from_id0, to_id0, from_ssid, to_ssid; while (*str != 0 && *str != '\n') { range_action ra = action; @@ -143,23 +145,25 @@ blacklist_parse_parameters (char *str, range_action action) */ if (strncmp(str,"all,",4) == 0 || strcmp(str,"all") == 0 || strncmp(str,"all\n",4) == 0 || strncmp(str,"all ",4) == 0) { - from = 0; - to = __MAX_SUBCHANNEL; + int j; + str += 3; + for (j=0; j <= __MAX_SSID; j++) + blacklist_range(ra, 0, __MAX_SUBCHANNEL, j); } else { int rc; rc = blacklist_busid(&str, &from_id0, - &from_id1, &from); + &from_ssid, &from); if (rc) continue; to = from; to_id0 = from_id0; - to_id1 = from_id1; + to_ssid = from_ssid; if (*str == '-') { str++; rc = blacklist_busid(&str, &to_id0, - &to_id1, &to); + &to_ssid, &to); if (rc) continue; } @@ -169,18 +173,19 @@ blacklist_parse_parameters (char *str, range_action action) strsep(&str, ",\n")); continue; } - if ((from_id0 != to_id0) || (from_id1 != to_id1)) { + if ((from_id0 != to_id0) || + (from_ssid != to_ssid)) { printk(KERN_WARNING "invalid cio_ignore range " "%x.%x.%04x-%x.%x.%04x\n", - from_id0, from_id1, from, - to_id0, to_id1, to); + from_id0, from_ssid, from, + to_id0, to_ssid, to); continue; } + pr_debug("blacklist_setup: adding range " + "from %x.%x.%04x to %x.%x.%04x\n", + from_id0, from_ssid, from, to_id0, to_ssid, to); + blacklist_range (ra, from, to, to_ssid); } - /* FIXME: ignoring id0 and id1 here. */ - pr_debug("blacklist_setup: adding range " - "from 0.0.%04x to 0.0.%04x\n", from, to); - blacklist_range (ra, from, to); } return 1; } @@ -214,9 +219,9 @@ __setup ("cio_ignore=", blacklist_setup); * Used by validate_subchannel() */ int -is_blacklisted (int devno) +is_blacklisted (int ssid, int devno) { - return test_bit (devno, bl_dev); + return test_bit (devno, bl_dev[ssid]); } #ifdef CONFIG_PROC_FS @@ -283,6 +288,7 @@ blacklist_parse_proc_parameters (char *buf) /* Iterator struct for all devices. */ struct ccwdev_iter { int devno; + int ssid; int in_range; }; @@ -291,13 +297,14 @@ cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset) { struct ccwdev_iter *iter; - if (*offset > __MAX_SUBCHANNEL) + if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1)) return NULL; iter = kmalloc(sizeof(struct ccwdev_iter), GFP_KERNEL); if (!iter) return ERR_PTR(-ENOMEM); memset(iter, 0, sizeof(struct ccwdev_iter)); - iter->devno = *offset; + iter->ssid = *offset / (__MAX_SUBCHANNEL + 1); + iter->devno = *offset % (__MAX_SUBCHANNEL + 1); return iter; } @@ -313,10 +320,16 @@ cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset) { struct ccwdev_iter *iter; - if (*offset > __MAX_SUBCHANNEL) + if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1)) return NULL; iter = (struct ccwdev_iter *)it; - iter->devno++; + if (iter->devno == __MAX_SUBCHANNEL) { + iter->devno = 0; + iter->ssid++; + if (iter->ssid > __MAX_SSID) + return NULL; + } else + iter->devno++; (*offset)++; return iter; } @@ -327,23 +340,24 @@ cio_ignore_proc_seq_show(struct seq_file *s, void *it) struct ccwdev_iter *iter; iter = (struct ccwdev_iter *)it; - if (!is_blacklisted(iter->devno)) + if (!is_blacklisted(iter->ssid, iter->devno)) /* Not blacklisted, nothing to output. */ return 0; if (!iter->in_range) { /* First device in range. */ if ((iter->devno == __MAX_SUBCHANNEL) || - !is_blacklisted(iter->devno + 1)) + !is_blacklisted(iter->ssid, iter->devno + 1)) /* Singular device. */ - return seq_printf(s, "0.0.%04x\n", iter->devno); + return seq_printf(s, "0.%x.%04x\n", + iter->ssid, iter->devno); iter->in_range = 1; - return seq_printf(s, "0.0.%04x-", iter->devno); + return seq_printf(s, "0.%x.%04x-", iter->ssid, iter->devno); } if ((iter->devno == __MAX_SUBCHANNEL) || - !is_blacklisted(iter->devno + 1)) { + !is_blacklisted(iter->ssid, iter->devno + 1)) { /* Last device in range. */ iter->in_range = 0; - return seq_printf(s, "0.0.%04x\n", iter->devno); + return seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno); } return 0; } diff --git a/drivers/s390/cio/blacklist.h b/drivers/s390/cio/blacklist.h index fb42cafbe57c..95e25c1df922 100644 --- a/drivers/s390/cio/blacklist.h +++ b/drivers/s390/cio/blacklist.h @@ -1,6 +1,6 @@ #ifndef S390_BLACKLIST_H #define S390_BLACKLIST_H -extern int is_blacklisted (int devno); +extern int is_blacklisted (int ssid, int devno); #endif diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index ebd924962df0..7270808c02d1 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -1,7 +1,7 @@ /* * drivers/s390/cio/chsc.c * S/390 common I/O routines -- channel subsystem call - * $Revision: 1.120 $ + * $Revision: 1.126 $ * * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, * IBM Corporation @@ -75,7 +75,9 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page) struct { struct chsc_header request; - u16 reserved1; + u16 reserved1a:10; + u16 ssid:2; + u16 reserved1b:4; u16 f_sch; /* first subchannel */ u16 reserved2; u16 l_sch; /* last subchannel */ @@ -102,6 +104,7 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page) .code = 0x0004, }; + ssd_area->ssid = sch->schid.ssid; ssd_area->f_sch = sch->schid.sch_no; ssd_area->l_sch = sch->schid.sch_no; @@ -145,8 +148,8 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page) */ if (ssd_area->st > 3) { /* uhm, that looks strange... */ CIO_CRW_EVENT(0, "Strange subchannel type %d" - " for sch %04x\n", ssd_area->st, - sch->schid.sch_no); + " for sch 0.%x.%04x\n", ssd_area->st, + sch->schid.ssid, sch->schid.sch_no); /* * There may have been a new subchannel type defined in the * time since this code was written; since we don't know which @@ -155,8 +158,9 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page) return 0; } else { const char *type[4] = {"I/O", "chsc", "message", "ADM"}; - CIO_CRW_EVENT(6, "ssd: sch %04x is %s subchannel\n", - sch->schid.sch_no, type[ssd_area->st]); + CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n", + sch->schid.ssid, sch->schid.sch_no, + type[ssd_area->st]); sch->ssd_info.valid = 1; sch->ssd_info.type = ssd_area->st; @@ -364,7 +368,7 @@ s390_process_res_acc_new_sch(struct subchannel_id schid) * that beast may be on we'll have to do a stsch * on all devices, grr... */ - if (stsch(schid, &schib)) + if (stsch_err(schid, &schib)) /* We're through */ return need_rescan ? -EAGAIN : -ENXIO; @@ -818,7 +822,7 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data) put_device(&sch->dev); return 0; } - if (stsch(schid, &schib)) + if (stsch_err(schid, &schib)) /* We're through */ return -ENXIO; /* Put it on the slow path. */ @@ -1078,6 +1082,54 @@ chsc_alloc_sei_area(void) return (sei_page ? 0 : -ENOMEM); } +int __init +chsc_enable_facility(int operation_code) +{ + int ret; + struct { + struct chsc_header request; + u8 reserved1:4; + u8 format:4; + u8 reserved2; + u16 operation_code; + u32 reserved3; + u32 reserved4; + u32 operation_data_area[252]; + struct chsc_header response; + u32 reserved5:4; + u32 format2:4; + u32 reserved6:24; + } *sda_area; + + sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); + if (!sda_area) + return -ENOMEM; + sda_area->request = (struct chsc_header) { + .length = 0x0400, + .code = 0x0031, + }; + sda_area->operation_code = operation_code; + + ret = chsc(sda_area); + if (ret > 0) { + ret = (ret == 3) ? -ENODEV : -EBUSY; + goto out; + } + switch (sda_area->response.code) { + case 0x0003: /* invalid request block */ + case 0x0007: + ret = -EINVAL; + break; + case 0x0004: /* command not provided */ + case 0x0101: /* facility not provided */ + ret = -EOPNOTSUPP; + break; + } + out: + free_page((unsigned long)sda_area); + return ret; +} + subsys_initcall(chsc_alloc_sei_area); struct css_general_char css_general_characteristics; diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 170083ca4349..44e4b4bb1c5a 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -5,6 +5,8 @@ #define CHSC_SEI_ACC_LINKADDR 2 #define CHSC_SEI_ACC_FULLLINKADDR 3 +#define CHSC_SDA_OC_MSS 0x2 + struct chsc_header { u16 length; u16 code; @@ -64,6 +66,8 @@ extern int css_characteristics_avail; extern void *chsc_get_chp_desc(struct subchannel*, int); +extern int chsc_enable_facility(int); + #define to_channelpath(dev) container_of(dev, struct channel_path, dev) #endif diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 3eb6cb608fc9..6f274f4f92eb 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -1,7 +1,7 @@ /* * drivers/s390/cio/cio.c * S/390 common I/O routines -- low level i/o calls - * $Revision: 1.135 $ + * $Revision: 1.138 $ * * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, * IBM Corporation @@ -166,7 +166,8 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) stsch (sch->schid, &sch->schib); CIO_MSG_EVENT(0, "cio_start: 'not oper' status for " - "subchannel %04x!\n", sch->schid.sch_no); + "subchannel 0.%x.%04x!\n", sch->schid.ssid, + sch->schid.sch_no); sprintf(dbf_text, "no%s", sch->dev.bus_id); CIO_TRACE_EVENT(0, dbf_text); CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); @@ -522,15 +523,18 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) spin_lock_init(&sch->lock); /* Set a name for the subchannel */ - snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", schid.sch_no); + snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid, + schid.sch_no); /* * The first subchannel that is not-operational (ccode==3) * indicates that there aren't any more devices available. + * If stsch gets an exception, it means the current subchannel set + * is not valid. */ - ccode = stsch (schid, &sch->schib); + ccode = stsch_err (schid, &sch->schib); if (ccode) - return -ENXIO; + return (ccode == 3) ? -ENXIO : ccode; sch->schid = schid; /* Copy subchannel type from path management control word. */ @@ -541,9 +545,9 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) */ if (sch->st != 0) { CIO_DEBUG(KERN_INFO, 0, - "Subchannel %04X reports " + "Subchannel 0.%x.%04x reports " "non-I/O subchannel type %04X\n", - sch->schid.sch_no, sch->st); + sch->schid.ssid, sch->schid.sch_no, sch->st); /* We stop here for non-io subchannels. */ return sch->st; } @@ -554,26 +558,29 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) return -ENODEV; /* Devno is valid. */ - if (is_blacklisted (sch->schib.pmcw.dev)) { + if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) { /* * This device must not be known to Linux. So we simply * say that there is no device and return ENODEV. */ CIO_MSG_EVENT(0, "Blacklisted device detected " - "at devno %04X\n", sch->schib.pmcw.dev); + "at devno %04X, subchannel set %x\n", + sch->schib.pmcw.dev, sch->schid.ssid); return -ENODEV; } sch->opm = 0xff; - chsc_validate_chpids(sch); + if (!cio_is_console(sch->schid)) + chsc_validate_chpids(sch); sch->lpm = sch->schib.pmcw.pim & sch->schib.pmcw.pam & sch->schib.pmcw.pom & sch->opm; CIO_DEBUG(KERN_INFO, 0, - "Detected device %04X on subchannel %04X" + "Detected device %04x on subchannel 0.%x.%04X" " - PIM = %02X, PAM = %02X, POM = %02X\n", - sch->schib.pmcw.dev, sch->schid.sch_no, sch->schib.pmcw.pim, + sch->schib.pmcw.dev, sch->schid.ssid, + sch->schid.sch_no, sch->schib.pmcw.pim, sch->schib.pmcw.pam, sch->schib.pmcw.pom); /* @@ -693,7 +700,7 @@ wait_cons_dev (void) static int cio_test_for_console(struct subchannel_id schid, void *data) { - if (stsch(schid, &console_subchannel.schib) != 0) + if (stsch_err(schid, &console_subchannel.schib) != 0) return -ENXIO; if (console_subchannel.schib.pmcw.dnv && console_subchannel.schib.pmcw.dev == @@ -841,7 +848,7 @@ __shutdown_subchannel_easy(struct subchannel_id schid, void *data) { struct schib schib; - if (stsch(schid, &schib)) + if (stsch_err(schid, &schib)) return -ENXIO; if (!schib.pmcw.ena) return 0; diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index b6225cbbbee7..9e9d4a157a4c 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -1,7 +1,7 @@ /* * drivers/s390/cio/css.c * driver for channel subsystem - * $Revision: 1.85 $ + * $Revision: 1.93 $ * * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, * IBM Corporation @@ -23,6 +23,7 @@ int need_rescan = 0; int css_init_done = 0; +static int max_ssid = 0; struct channel_subsystem *css[__MAX_CSSID + 1]; @@ -37,10 +38,13 @@ for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) init_subchannel_id(&schid); ret = -ENODEV; do { - ret = fn(schid, data); - if (ret) - break; - } while (schid.sch_no++ < __MAX_SUBCHANNEL); + do { + ret = fn(schid, data); + if (ret) + break; + } while (schid.sch_no++ < __MAX_SUBCHANNEL); + schid.sch_no = 0; + } while (schid.ssid++ < max_ssid); return ret; } @@ -205,8 +209,8 @@ css_evaluate_subchannel(struct subchannel_id schid, int slow) return -EAGAIN; /* Will be done on the slow path. */ } event = css_get_subchannel_status(sch, schid); - CIO_MSG_EVENT(4, "Evaluating schid %04x, event %d, %s, %s path.\n", - schid.sch_no, event, + CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", + schid.ssid, schid.sch_no, event, sch?(disc?"disconnected":"normal"):"unknown", slow?"slow":"fast"); switch (event) { @@ -352,19 +356,23 @@ css_reiterate_subchannels(void) * Called from the machine check handler for subchannel report words. */ int -css_process_crw(int irq) +css_process_crw(int rsid1, int rsid2) { int ret; struct subchannel_id mchk_schid; - CIO_CRW_EVENT(2, "source is subchannel %04X\n", irq); + CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", + rsid1, rsid2); if (need_rescan) /* We need to iterate all subchannels anyway. */ return -EAGAIN; init_subchannel_id(&mchk_schid); - mchk_schid.sch_no = irq; + mchk_schid.sch_no = rsid1; + if (rsid2 != 0) + mchk_schid.ssid = (rsid2 >> 8) & 3; + /* * Since we are always presented with IPI in the CRW, we have to * use stsch() to find out if the subchannel in question has come @@ -465,12 +473,23 @@ init_channel_subsystem (void) if ((ret = bus_register(&css_bus_type))) goto out; + /* Try to enable MSS. */ + ret = chsc_enable_facility(CHSC_SDA_OC_MSS); + switch (ret) { + case 0: /* Success. */ + max_ssid = __MAX_SSID; + break; + case -ENOMEM: + goto out_bus; + default: + max_ssid = 0; + } /* Setup css structure. */ for (i = 0; i <= __MAX_CSSID; i++) { css[i] = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL); if (!css[i]) { ret = -ENOMEM; - goto out_bus; + goto out_unregister; } setup_css(i); ret = device_register(&css[i]->device); @@ -485,11 +504,12 @@ init_channel_subsystem (void) return 0; out_free: kfree(css[i]); -out_bus: +out_unregister: while (i > 0) { i--; device_unregister(&css[i]->device); } +out_bus: bus_unregister(&css_bus_type); out: return ret; diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index b74659cab0af..251ebd7a7d3a 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -77,6 +77,7 @@ struct ccw_device_private { unsigned long registered; __u16 devno; /* device number */ __u16 sch_no; /* subchannel number */ + __u8 ssid; /* subchannel set id */ __u8 imask; /* lpm mask for SNID/SID/SPGID */ int iretry; /* retry counter SNID/SID/SPGID */ struct { @@ -135,6 +136,7 @@ extern int css_init_done; extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); #define __MAX_SUBCHANNEL 65535 +#define __MAX_SSID 3 #define __MAX_CHPID 255 #define __MAX_CSSID 0 diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index ba9f7c11f63f..fa3e4c0a2536 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -535,7 +535,8 @@ ccw_device_register(struct ccw_device *cdev) } struct match_data { - unsigned int devno; + unsigned int devno; + unsigned int ssid; struct ccw_device * sibling; }; @@ -548,6 +549,7 @@ match_devno(struct device * dev, void * data) cdev = to_ccwdev(dev); if ((cdev->private->state == DEV_STATE_DISCONNECTED) && (cdev->private->devno == d->devno) && + (cdev->private->ssid == d->ssid) && (cdev != d->sibling)) { cdev->private->state = DEV_STATE_NOT_OPER; return 1; @@ -556,11 +558,13 @@ match_devno(struct device * dev, void * data) } static struct ccw_device * -get_disc_ccwdev_by_devno(unsigned int devno, struct ccw_device *sibling) +get_disc_ccwdev_by_devno(unsigned int devno, unsigned int ssid, + struct ccw_device *sibling) { struct device *dev; struct match_data data = { - .devno = devno, + .devno = devno, + .ssid = ssid, .sibling = sibling, }; @@ -616,7 +620,7 @@ ccw_device_do_unreg_rereg(void *data) need_rename = 1; other_cdev = get_disc_ccwdev_by_devno(sch->schib.pmcw.dev, - cdev); + sch->schid.ssid, cdev); if (other_cdev) { struct subchannel *other_sch; @@ -639,8 +643,8 @@ ccw_device_do_unreg_rereg(void *data) if (test_and_clear_bit(1, &cdev->private->registered)) device_del(&cdev->dev); if (need_rename) - snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", - sch->schib.pmcw.dev); + snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", + sch->schid.ssid, sch->schib.pmcw.dev); PREPARE_WORK(&cdev->private->kick_work, ccw_device_add_changed, (void *)cdev); queue_work(ccw_device_work, &cdev->private->kick_work); @@ -769,9 +773,11 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) sch->dev.driver_data = cdev; sch->driver = &io_subchannel_driver; cdev->ccwlock = &sch->lock; + /* Init private data. */ priv = cdev->private; priv->devno = sch->schib.pmcw.dev; + priv->ssid = sch->schid.ssid; priv->sch_no = sch->schid.sch_no; priv->state = DEV_STATE_NOT_OPER; INIT_LIST_HEAD(&priv->cmb_list); @@ -779,8 +785,8 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) init_timer(&priv->timer); /* Set an initial name for the device. */ - snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", - sch->schib.pmcw.dev); + snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", + sch->schid.ssid, sch->schib.pmcw.dev); /* Increase counter of devices currently in recognition. */ atomic_inc(&ccw_device_init_count); diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 9efeae75ad28..23d12b65e5fa 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -257,8 +257,9 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) switch (state) { case DEV_STATE_NOT_OPER: CIO_DEBUG(KERN_WARNING, 2, - "SenseID : unknown device %04x on subchannel %04x\n", - cdev->private->devno, sch->schid.sch_no); + "SenseID : unknown device %04x on subchannel " + "0.%x.%04x\n", cdev->private->devno, + sch->schid.ssid, sch->schid.sch_no); break; case DEV_STATE_OFFLINE: if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) { @@ -282,16 +283,18 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) return; } /* Issue device info message. */ - CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04x reports: " + CIO_DEBUG(KERN_INFO, 2, "SenseID : device 0.%x.%04x reports: " "CU Type/Mod = %04X/%02X, Dev Type/Mod = " - "%04X/%02X\n", cdev->private->devno, + "%04X/%02X\n", + cdev->private->ssid, cdev->private->devno, cdev->id.cu_type, cdev->id.cu_model, cdev->id.dev_type, cdev->id.dev_model); break; case DEV_STATE_BOXED: CIO_DEBUG(KERN_WARNING, 2, - "SenseID : boxed device %04x on subchannel %04x\n", - cdev->private->devno, sch->schid.sch_no); + "SenseID : boxed device %04x on subchannel " + "0.%x.%04x\n", cdev->private->devno, + sch->schid.ssid, sch->schid.sch_no); break; } cdev->private->state = state; diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c index 207881ec7aaf..3c77c3fd461d 100644 --- a/drivers/s390/cio/device_id.c +++ b/drivers/s390/cio/device_id.c @@ -256,16 +256,17 @@ ccw_device_check_sense_id(struct ccw_device *cdev) * sense id information. So, for intervention required, * we use the "whack it until it talks" strategy... */ - CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel %04x " - "reports cmd reject\n", - cdev->private->devno, sch->schid.sch_no); + CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel " + "0.%x.%04x reports cmd reject\n", + cdev->private->devno, sch->schid.ssid, + sch->schid.sch_no); return -EOPNOTSUPP; } if (irb->esw.esw0.erw.cons) { - CIO_MSG_EVENT(2, "SenseID : UC on dev %04x, " + CIO_MSG_EVENT(2, "SenseID : UC on dev 0.%x.%04x, " "lpum %02X, cnt %02d, sns :" " %02X%02X%02X%02X %02X%02X%02X%02X ...\n", - cdev->private->devno, + cdev->private->ssid, cdev->private->devno, irb->esw.esw0.sublog.lpum, irb->esw.esw0.erw.scnt, irb->ecw[0], irb->ecw[1], @@ -277,16 +278,17 @@ ccw_device_check_sense_id(struct ccw_device *cdev) if (irb->scsw.cc == 3) { if ((sch->orb.lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) - CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x on" - " subchannel %04x is 'not operational'\n", - sch->orb.lpm, cdev->private->devno, + CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x " + "on subchannel 0.%x.%04x is " + "'not operational'\n", sch->orb.lpm, + cdev->private->devno, sch->schid.ssid, sch->schid.sch_no); return -EACCES; } /* Hmm, whatever happened, try again. */ CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on " - "subchannel %04x returns status %02X%02X\n", - cdev->private->devno, sch->schid.sch_no, + "subchannel 0.%x.%04x returns status %02X%02X\n", + cdev->private->devno, sch->schid.ssid, sch->schid.sch_no, irb->scsw.dstat, irb->scsw.cstat); return -EAGAIN; } diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 3c89d70b9c09..052832d03d38 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c @@ -57,10 +57,10 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev) if (ret != -EACCES) return ret; CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel " - "%04x, lpm %02X, became 'not " + "0.%x.%04x, lpm %02X, became 'not " "operational'\n", - cdev->private->devno, sch->schid.sch_no, - cdev->private->imask); + cdev->private->devno, sch->schid.ssid, + sch->schid.sch_no, cdev->private->imask); } cdev->private->imask >>= 1; @@ -106,10 +106,10 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev) return -EOPNOTSUPP; } if (irb->esw.esw0.erw.cons) { - CIO_MSG_EVENT(2, "SNID - device %04x, unit check, " + CIO_MSG_EVENT(2, "SNID - device 0.%x.%04x, unit check, " "lpum %02X, cnt %02d, sns : " "%02X%02X%02X%02X %02X%02X%02X%02X ...\n", - cdev->private->devno, + cdev->private->ssid, cdev->private->devno, irb->esw.esw0.sublog.lpum, irb->esw.esw0.erw.scnt, irb->ecw[0], irb->ecw[1], @@ -119,16 +119,17 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev) return -EAGAIN; } if (irb->scsw.cc == 3) { - CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel " - "%04x, lpm %02X, became 'not operational'\n", - cdev->private->devno, sch->schid.sch_no, - sch->orb.lpm); + CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x," + " lpm %02X, became 'not operational'\n", + cdev->private->devno, sch->schid.ssid, + sch->schid.sch_no, sch->orb.lpm); return -EACCES; } if (cdev->private->pgid.inf.ps.state2 == SNID_STATE2_RESVD_ELSE) { - CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel %04x " + CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x " "is reserved by someone else\n", - cdev->private->devno, sch->schid.sch_no); + cdev->private->devno, sch->schid.ssid, + sch->schid.sch_no); return -EUSERS; } return 0; @@ -237,8 +238,9 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func) sch->lpm &= ~cdev->private->imask; sch->vpm &= ~cdev->private->imask; CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " - "%04x, lpm %02X, became 'not operational'\n", - cdev->private->devno, sch->schid.sch_no, cdev->private->imask); + "0.%x.%04x, lpm %02X, became 'not operational'\n", + cdev->private->devno, sch->schid.ssid, + sch->schid.sch_no, cdev->private->imask); return ret; } @@ -260,8 +262,10 @@ __ccw_device_check_pgid(struct ccw_device *cdev) if (irb->ecw[0] & SNS0_CMD_REJECT) return -EOPNOTSUPP; /* Hmm, whatever happened, try again. */ - CIO_MSG_EVENT(2, "SPID - device %04x, unit check, cnt %02d, " + CIO_MSG_EVENT(2, "SPID - device 0.%x.%04x, unit check, " + "cnt %02d, " "sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n", + cdev->private->ssid, cdev->private->devno, irb->esw.esw0.erw.scnt, irb->ecw[0], irb->ecw[1], irb->ecw[2], irb->ecw[3], @@ -270,10 +274,10 @@ __ccw_device_check_pgid(struct ccw_device *cdev) return -EAGAIN; } if (irb->scsw.cc == 3) { - CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " - "%04x, lpm %02X, became 'not operational'\n", - cdev->private->devno, sch->schid.sch_no, - cdev->private->imask); + CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel 0.%x.%04x," + " lpm %02X, became 'not operational'\n", + cdev->private->devno, sch->schid.ssid, + sch->schid.sch_no, cdev->private->imask); return -EACCES; } return 0; diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index 929f8fb505f2..db09c209098b 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c @@ -36,9 +36,10 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check " "received" - " ... device %04X on subchannel %04X, dev_stat " + " ... device %04x on subchannel 0.%x.%04x, dev_stat " ": %02X sch_stat : %02X\n", - cdev->private->devno, cdev->private->sch_no, + cdev->private->devno, cdev->private->ssid, + cdev->private->sch_no, irb->scsw.dstat, irb->scsw.cstat); if (irb->scsw.cc != 3) { @@ -61,8 +62,9 @@ ccw_device_path_notoper(struct ccw_device *cdev) sch = to_subchannel(cdev->dev.parent); stsch (sch->schid, &sch->schib); - CIO_MSG_EVENT(0, "%s(%04x) - path(s) %02x are " - "not operational \n", __FUNCTION__, sch->schid.sch_no, + CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are " + "not operational \n", __FUNCTION__, + sch->schid.ssid, sch->schid.sch_no, sch->schib.pmcw.pnom); sch->lpm &= ~sch->schib.pmcw.pnom; diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h index 66c882e52f32..62b0e2ad507f 100644 --- a/drivers/s390/cio/ioasm.h +++ b/drivers/s390/cio/ioasm.h @@ -38,6 +38,35 @@ static inline int stsch(struct subchannel_id schid, return ccode; } +static inline int stsch_err(struct subchannel_id schid, + volatile struct schib *addr) +{ + int ccode; + + __asm__ __volatile__( + " lhi %0,%3\n" + " lr 1,%1\n" + " stsch 0(%2)\n" + "0: ipm %0\n" + " srl %0,28\n" + "1:\n" +#ifdef CONFIG_ARCH_S390X + ".section __ex_table,\"a\"\n" + " .align 8\n" + " .quad 0b,1b\n" + ".previous" +#else + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 0b,1b\n" + ".previous" +#endif + : "=&d" (ccode) + : "d" (schid), "a" (addr), "K" (-EIO), "m" (*addr) + : "cc", "1" ); + return ccode; +} + static inline int msch(struct subchannel_id schid, volatile struct schib *addr) { diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index 5c7001b5c7a1..035c77af9cd3 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c @@ -56,7 +56,7 @@ #include "ioasm.h" #include "chsc.h" -#define VERSION_QDIO_C "$Revision: 1.113 $" +#define VERSION_QDIO_C "$Revision: 1.114 $" /****************** MODULE PARAMETER VARIABLES ********************/ MODULE_AUTHOR("Utz Bacher "); @@ -2066,21 +2066,22 @@ qdio_timeout_handler(struct ccw_device *cdev) switch (irq_ptr->state) { case QDIO_IRQ_STATE_INACTIVE: - QDIO_PRINT_ERR("establish queues on irq %04x: timed out\n", - irq_ptr->schid.sch_no); + QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: timed out\n", + irq_ptr->schid.ssid, irq_ptr->schid.sch_no); QDIO_DBF_TEXT2(1,setup,"eq:timeo"); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); break; case QDIO_IRQ_STATE_CLEANUP: - QDIO_PRINT_INFO("Did not get interrupt on cleanup, irq=0x%x.\n", - irq_ptr->schid.sch_no); + QDIO_PRINT_INFO("Did not get interrupt on cleanup, " + "irq=0.%x.%x.\n", + irq_ptr->schid.ssid, irq_ptr->schid.sch_no); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); break; case QDIO_IRQ_STATE_ESTABLISHED: case QDIO_IRQ_STATE_ACTIVE: /* I/O has been terminated by common I/O layer. */ - QDIO_PRINT_INFO("Queues on irq %04x killed by cio.\n", - irq_ptr->schid.sch_no); + QDIO_PRINT_INFO("Queues on irq 0.%x.%04x killed by cio.\n", + irq_ptr->schid.ssid, irq_ptr->schid.sch_no); QDIO_DBF_TEXT2(1, trace, "cio:term"); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); if (get_device(&cdev->dev)) { @@ -2273,7 +2274,9 @@ qdio_get_ssqd_information(struct qdio_irq *irq_ptr) unsigned char qdioac; struct { struct chsc_header request; - u16 reserved1; + u16 reserved1:10; + u16 ssid:2; + u16 fmt:4; u16 first_sch; u16 reserved2; u16 last_sch; @@ -2318,12 +2321,13 @@ qdio_get_ssqd_information(struct qdio_irq *irq_ptr) }; ssqd_area->first_sch = irq_ptr->schid.sch_no; ssqd_area->last_sch = irq_ptr->schid.sch_no; + ssqd_area->ssid = irq_ptr->schid.ssid; result = chsc(ssqd_area); if (result) { QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \ - "SIGAs for sch x%x.\n", - result, irq_ptr->schid.sch_no); + "SIGAs for sch 0.%x.%x.\n", result, + irq_ptr->schid.ssid, irq_ptr->schid.sch_no); qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY || CHSC_FLAG_SIGA_OUTPUT_NECESSARY || CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ @@ -2333,8 +2337,9 @@ qdio_get_ssqd_information(struct qdio_irq *irq_ptr) if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) { QDIO_PRINT_WARN("response upon checking SIGA needs " \ - "is 0x%x. Using all SIGAs for sch x%x.\n", - ssqd_area->response.code, irq_ptr->schid.sch_no); + "is 0x%x. Using all SIGAs for sch 0.%x.%x.\n", + ssqd_area->response.code, + irq_ptr->schid.ssid, irq_ptr->schid.sch_no); qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY || CHSC_FLAG_SIGA_OUTPUT_NECESSARY || CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ @@ -2344,8 +2349,9 @@ qdio_get_ssqd_information(struct qdio_irq *irq_ptr) if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) || !(ssqd_area->flags & CHSC_FLAG_VALIDITY) || (ssqd_area->sch != irq_ptr->schid.sch_no)) { - QDIO_PRINT_WARN("huh? problems checking out sch x%x... " \ - "using all SIGAs.\n",irq_ptr->schid.sch_no); + QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \ + "using all SIGAs.\n", + irq_ptr->schid.ssid, irq_ptr->schid.sch_no); qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY | CHSC_FLAG_SIGA_OUTPUT_NECESSARY | CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */ @@ -2453,7 +2459,8 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero) scssc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!scssc_area) { QDIO_PRINT_WARN("No memory for setting indicators on " \ - "subchannel x%x.\n", irq_ptr->schid.sch_no); + "subchannel 0.%x.%x.\n", + irq_ptr->schid.ssid, irq_ptr->schid.sch_no); return -ENOMEM; } scssc_area->request = (struct chsc_header) { @@ -2479,8 +2486,9 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero) result = chsc(scssc_area); if (result) { - QDIO_PRINT_WARN("could not set indicators on irq x%x, " \ - "cc=%i.\n",irq_ptr->schid.sch_no,result); + QDIO_PRINT_WARN("could not set indicators on irq 0.%x.%x, " \ + "cc=%i.\n", + irq_ptr->schid.ssid, irq_ptr->schid.sch_no,result); result = -EIO; goto out; } @@ -2536,7 +2544,8 @@ tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target) scsscf_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!scsscf_area) { QDIO_PRINT_WARN("No memory for setting delay target on " \ - "subchannel x%x.\n", irq_ptr->schid.sch_no); + "subchannel 0.%x.%x.\n", + irq_ptr->schid.ssid, irq_ptr->schid.sch_no); return -ENOMEM; } scsscf_area->request = (struct chsc_header) { @@ -2548,8 +2557,9 @@ tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target) result=chsc(scsscf_area); if (result) { - QDIO_PRINT_WARN("could not set delay target on irq x%x, " \ - "cc=%i. Continuing.\n",irq_ptr->schid.sch_no, + QDIO_PRINT_WARN("could not set delay target on irq 0.%x.%x, " \ + "cc=%i. Continuing.\n", + irq_ptr->schid.ssid, irq_ptr->schid.sch_no, result); result = -EIO; goto out; @@ -2870,8 +2880,9 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int)); QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int)); QDIO_PRINT_ERR("received check condition on establish " \ - "queues on irq 0x%x (cs=x%x, ds=x%x).\n", - irq_ptr->schid.sch_no,cstat,dstat); + "queues on irq 0.%x.%x (cs=x%x, ds=x%x).\n", + irq_ptr->schid.ssid, irq_ptr->schid.sch_no, + cstat,dstat); qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR); } @@ -2879,9 +2890,10 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, QDIO_DBF_TEXT2(1,setup,"eq:no de"); QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat)); QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat)); - QDIO_PRINT_ERR("establish queues on irq %04x: didn't get " + QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: didn't get " "device end: dstat=%02x, cstat=%02x\n", - irq_ptr->schid.sch_no, dstat, cstat); + irq_ptr->schid.ssid, irq_ptr->schid.sch_no, + dstat, cstat); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); return 1; } @@ -2890,9 +2902,9 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, QDIO_DBF_TEXT2(1,setup,"eq:badio"); QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat)); QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat)); - QDIO_PRINT_ERR("establish queues on irq %04x: got " + QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: got " "the following devstat: dstat=%02x, " - "cstat=%02x\n", + "cstat=%02x\n", irq_ptr->schid.ssid, irq_ptr->schid.sch_no, dstat, cstat); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); return 1; @@ -3041,7 +3053,8 @@ int qdio_fill_irq(struct qdio_initialize *init_data) QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*)); if (!irq_ptr->dev_st_chg_ind) { QDIO_PRINT_WARN("no indicator location available " \ - "for irq 0x%x\n",irq_ptr->schid.sch_no); + "for irq 0.%x.%x\n", + irq_ptr->schid.ssid, irq_ptr->schid.sch_no); qdio_release_irq_memory(irq_ptr); return -ENOBUFS; } @@ -3198,9 +3211,10 @@ qdio_establish(struct qdio_initialize *init_data) sprintf(dbf_text,"eq:io%4x",result); QDIO_DBF_TEXT2(1,setup,dbf_text); } - QDIO_PRINT_WARN("establish queues on irq %04x: do_IO " \ - "returned %i, next try returned %i\n", - irq_ptr->schid.sch_no,result,result2); + QDIO_PRINT_WARN("establish queues on irq 0.%x.%04x: do_IO " \ + "returned %i, next try returned %i\n", + irq_ptr->schid.ssid, irq_ptr->schid.sch_no, + result, result2); result=result2; if (result) ccw_device_set_timeout(cdev, 0); @@ -3298,9 +3312,10 @@ qdio_activate(struct ccw_device *cdev, int flags) sprintf(dbf_text,"aq:io%4x",result); QDIO_DBF_TEXT2(1,setup,dbf_text); } - QDIO_PRINT_WARN("activate queues on irq %04x: do_IO " \ - "returned %i, next try returned %i\n", - irq_ptr->schid.sch_no,result,result2); + QDIO_PRINT_WARN("activate queues on irq 0.%x.%04x: do_IO " \ + "returned %i, next try returned %i\n", + irq_ptr->schid.ssid, irq_ptr->schid.sch_no, + result, result2); result=result2; } diff --git a/drivers/s390/cio/schid.h b/drivers/s390/cio/schid.h index 220d97882341..54328fec5ade 100644 --- a/drivers/s390/cio/schid.h +++ b/drivers/s390/cio/schid.h @@ -2,7 +2,8 @@ #define S390_SCHID_H struct subchannel_id { - __u32 reserved:15; + __u32 reserved:13; + __u32 ssid:2; __u32 one:1; __u32 sch_no:16; } __attribute__ ((packed,aligned(4))); diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c index 4191fd9d4d11..7dad597ff86e 100644 --- a/drivers/s390/s390mach.c +++ b/drivers/s390/s390mach.c @@ -23,7 +23,7 @@ static struct semaphore m_sem; -extern int css_process_crw(int); +extern int css_process_crw(int, int); extern int chsc_process_crw(void); extern int chp_process_crw(int, int); extern void css_reiterate_subchannels(void); @@ -49,9 +49,10 @@ s390_handle_damage(char *msg) static int s390_collect_crw_info(void *param) { - struct crw crw; + struct crw crw[2]; int ccode, ret, slow; struct semaphore *sem; + unsigned int chain; sem = (struct semaphore *)param; /* Set a nice name. */ @@ -59,25 +60,50 @@ s390_collect_crw_info(void *param) repeat: down_interruptible(sem); slow = 0; + chain = 0; while (1) { - ccode = stcrw(&crw); + if (unlikely(chain > 1)) { + struct crw tmp_crw; + + printk(KERN_WARNING"%s: Code does not support more " + "than two chained crws; please report to " + "linux390@de.ibm.com!\n", __FUNCTION__); + ccode = stcrw(&tmp_crw); + printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, " + "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", + __FUNCTION__, tmp_crw.slct, tmp_crw.oflw, + tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc, + tmp_crw.erc, tmp_crw.rsid); + printk(KERN_WARNING"%s: This was crw number %x in the " + "chain\n", __FUNCTION__, chain); + if (ccode != 0) + break; + chain = tmp_crw.chn ? chain + 1 : 0; + continue; + } + ccode = stcrw(&crw[chain]); if (ccode != 0) break; DBG(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, " "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", - crw.slct, crw.oflw, crw.chn, crw.rsc, crw.anc, - crw.erc, crw.rsid); + crw[chain].slct, crw[chain].oflw, crw[chain].chn, + crw[chain].rsc, crw[chain].anc, crw[chain].erc, + crw[chain].rsid); /* Check for overflows. */ - if (crw.oflw) { + if (crw[chain].oflw) { pr_debug("%s: crw overflow detected!\n", __FUNCTION__); css_reiterate_subchannels(); + chain = 0; slow = 1; continue; } - switch (crw.rsc) { + switch (crw[chain].rsc) { case CRW_RSC_SCH: - pr_debug("source is subchannel %04X\n", crw.rsid); - ret = css_process_crw (crw.rsid); + if (crw[0].chn && !chain) + break; + pr_debug("source is subchannel %04X\n", crw[0].rsid); + ret = css_process_crw (crw[0].rsid, + chain ? crw[1].rsid : 0); if (ret == -EAGAIN) slow = 1; break; @@ -85,18 +111,18 @@ repeat: pr_debug("source is monitoring facility\n"); break; case CRW_RSC_CPATH: - pr_debug("source is channel path %02X\n", crw.rsid); - switch (crw.erc) { + pr_debug("source is channel path %02X\n", crw[0].rsid); + switch (crw[0].erc) { case CRW_ERC_IPARM: /* Path has come. */ - ret = chp_process_crw(crw.rsid, 1); + ret = chp_process_crw(crw[0].rsid, 1); break; case CRW_ERC_PERRI: /* Path has gone. */ case CRW_ERC_PERRN: - ret = chp_process_crw(crw.rsid, 0); + ret = chp_process_crw(crw[0].rsid, 0); break; default: pr_debug("Don't know how to handle erc=%x\n", - crw.erc); + crw[0].erc); ret = 0; } if (ret == -EAGAIN) @@ -115,6 +141,8 @@ repeat: pr_debug("unknown source\n"); break; } + /* chain is always 0 or 1 here. */ + chain = crw[chain].chn ? chain + 1 : 0; } if (slow) queue_work(slow_path_wq, &slow_path_work); -- cgit v1.2.3 From 88fbf18399bde8f2900cf932acd40733dfa1effa Mon Sep 17 00:00:00 2001 From: Eric Rossman Date: Fri, 6 Jan 2006 00:19:25 -0800 Subject: [PATCH] s390: add support for cex2a crypto cards Signed-off-by: Eric Rossman Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/s390/crypto/z90common.h | 9 +- drivers/s390/crypto/z90crypt.h | 13 +- drivers/s390/crypto/z90hardware.c | 301 +++++++++++++++++++++++++++++++++++++- drivers/s390/crypto/z90main.c | 111 +++++++++----- 4 files changed, 383 insertions(+), 51 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/crypto/z90common.h b/drivers/s390/crypto/z90common.h index e319e78b5ea2..f87c785f2039 100644 --- a/drivers/s390/crypto/z90common.h +++ b/drivers/s390/crypto/z90common.h @@ -1,9 +1,9 @@ /* * linux/drivers/s390/crypto/z90common.h * - * z90crypt 1.3.2 + * z90crypt 1.3.3 * - * Copyright (C) 2001, 2004 IBM Corporation + * Copyright (C) 2001, 2005 IBM Corporation * Author(s): Robert Burroughs (burrough@us.ibm.com) * Eric Rossman (edrossma@us.ibm.com) * @@ -91,12 +91,13 @@ enum hdstat { #define TSQ_FATAL_ERROR 34 #define RSQ_FATAL_ERROR 35 -#define Z90CRYPT_NUM_TYPES 5 +#define Z90CRYPT_NUM_TYPES 6 #define PCICA 0 #define PCICC 1 #define PCIXCC_MCL2 2 #define PCIXCC_MCL3 3 #define CEX2C 4 +#define CEX2A 5 #define NILDEV -1 #define ANYDEV -1 #define PCIXCC_UNK -2 @@ -105,7 +106,7 @@ enum hdevice_type { PCICC_HW = 3, PCICA_HW = 4, PCIXCC_HW = 5, - OTHER_HW = 6, + CEX2A_HW = 6, CEX2C_HW = 7 }; diff --git a/drivers/s390/crypto/z90crypt.h b/drivers/s390/crypto/z90crypt.h index 0a3bb5a10dd4..3a18443fdfa7 100644 --- a/drivers/s390/crypto/z90crypt.h +++ b/drivers/s390/crypto/z90crypt.h @@ -1,9 +1,9 @@ /* * linux/drivers/s390/crypto/z90crypt.h * - * z90crypt 1.3.2 + * z90crypt 1.3.3 * - * Copyright (C) 2001, 2004 IBM Corporation + * Copyright (C) 2001, 2005 IBM Corporation * Author(s): Robert Burroughs (burrough@us.ibm.com) * Eric Rossman (edrossma@us.ibm.com) * @@ -29,11 +29,11 @@ #include -#define VERSION_Z90CRYPT_H "$Revision: 1.11 $" +#define VERSION_Z90CRYPT_H "$Revision: 1.2.2.4 $" #define z90crypt_VERSION 1 #define z90crypt_RELEASE 3 // 2 = PCIXCC, 3 = rewrite for coding standards -#define z90crypt_VARIANT 2 // 2 = added PCIXCC MCL3 and CEX2C support +#define z90crypt_VARIANT 3 // 3 = CEX2A support /** * struct ica_rsa_modexpo @@ -122,6 +122,9 @@ struct ica_rsa_modexpo_crt { * Z90STAT_CEX2CCOUNT * Return an integer count of all CEX2Cs. * + * Z90STAT_CEX2ACOUNT + * Return an integer count of all CEX2As. + * * Z90STAT_REQUESTQ_COUNT * Return an integer count of the number of entries waiting to be * sent to a device. @@ -144,6 +147,7 @@ struct ica_rsa_modexpo_crt { * 0x03: PCIXCC_MCL2 * 0x04: PCIXCC_MCL3 * 0x05: CEX2C + * 0x06: CEX2A * 0x0d: device is disabled via the proc filesystem * * Z90STAT_QDEPTH_MASK @@ -199,6 +203,7 @@ struct ica_rsa_modexpo_crt { #define Z90STAT_PCIXCCMCL2COUNT _IOR(Z90_IOCTL_MAGIC, 0x4b, int) #define Z90STAT_PCIXCCMCL3COUNT _IOR(Z90_IOCTL_MAGIC, 0x4c, int) #define Z90STAT_CEX2CCOUNT _IOR(Z90_IOCTL_MAGIC, 0x4d, int) +#define Z90STAT_CEX2ACOUNT _IOR(Z90_IOCTL_MAGIC, 0x4e, int) #define Z90STAT_REQUESTQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x44, int) #define Z90STAT_PENDINGQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x45, int) #define Z90STAT_TOTALOPEN_COUNT _IOR(Z90_IOCTL_MAGIC, 0x46, int) diff --git a/drivers/s390/crypto/z90hardware.c b/drivers/s390/crypto/z90hardware.c index c215e0889736..7c3ed52e03e1 100644 --- a/drivers/s390/crypto/z90hardware.c +++ b/drivers/s390/crypto/z90hardware.c @@ -1,9 +1,9 @@ /* * linux/drivers/s390/crypto/z90hardware.c * - * z90crypt 1.3.2 + * z90crypt 1.3.3 * - * Copyright (C) 2001, 2004 IBM Corporation + * Copyright (C) 2001, 2005 IBM Corporation * Author(s): Robert Burroughs (burrough@us.ibm.com) * Eric Rossman (edrossma@us.ibm.com) * @@ -648,6 +648,87 @@ static struct cca_public_sec static_cca_pub_sec = { #define RESPONSE_CPRB_SIZE 0x000006B8 #define RESPONSE_CPRBX_SIZE 0x00000724 +struct type50_hdr { + u8 reserved1; + u8 msg_type_code; + u16 msg_len; + u8 reserved2; + u8 ignored; + u16 reserved3; +}; + +#define TYPE50_TYPE_CODE 0x50 + +#define TYPE50_MEB1_LEN (sizeof(struct type50_meb1_msg)) +#define TYPE50_MEB2_LEN (sizeof(struct type50_meb2_msg)) +#define TYPE50_CRB1_LEN (sizeof(struct type50_crb1_msg)) +#define TYPE50_CRB2_LEN (sizeof(struct type50_crb2_msg)) + +#define TYPE50_MEB1_FMT 0x0001 +#define TYPE50_MEB2_FMT 0x0002 +#define TYPE50_CRB1_FMT 0x0011 +#define TYPE50_CRB2_FMT 0x0012 + +struct type50_meb1_msg { + struct type50_hdr header; + u16 keyblock_type; + u8 reserved[6]; + u8 exponent[128]; + u8 modulus[128]; + u8 message[128]; +}; + +struct type50_meb2_msg { + struct type50_hdr header; + u16 keyblock_type; + u8 reserved[6]; + u8 exponent[256]; + u8 modulus[256]; + u8 message[256]; +}; + +struct type50_crb1_msg { + struct type50_hdr header; + u16 keyblock_type; + u8 reserved[6]; + u8 p[64]; + u8 q[64]; + u8 dp[64]; + u8 dq[64]; + u8 u[64]; + u8 message[128]; +}; + +struct type50_crb2_msg { + struct type50_hdr header; + u16 keyblock_type; + u8 reserved[6]; + u8 p[128]; + u8 q[128]; + u8 dp[128]; + u8 dq[128]; + u8 u[128]; + u8 message[256]; +}; + +union type50_msg { + struct type50_meb1_msg meb1; + struct type50_meb2_msg meb2; + struct type50_crb1_msg crb1; + struct type50_crb2_msg crb2; +}; + +struct type80_hdr { + u8 reserved1; + u8 type; + u16 len; + u8 code; + u8 reserved2[3]; + u8 reserved3[8]; +}; + +#define TYPE80_RSP_CODE 0x80 + struct error_hdr { unsigned char reserved1; unsigned char type; @@ -657,6 +738,7 @@ struct error_hdr { }; #define TYPE82_RSP_CODE 0x82 +#define TYPE88_RSP_CODE 0x88 #define REP82_ERROR_MACHINE_FAILURE 0x10 #define REP82_ERROR_PREEMPT_FAILURE 0x12 @@ -679,6 +761,22 @@ struct error_hdr { #define REP82_ERROR_PACKET_TRUNCATED 0xA0 #define REP82_ERROR_ZERO_BUFFER_LEN 0xB0 +#define REP88_ERROR_MODULE_FAILURE 0x10 +#define REP88_ERROR_MODULE_TIMEOUT 0x11 +#define REP88_ERROR_MODULE_NOTINIT 0x13 +#define REP88_ERROR_MODULE_NOTAVAIL 0x14 +#define REP88_ERROR_MODULE_DISABLED 0x15 +#define REP88_ERROR_MODULE_IN_DIAGN 0x17 +#define REP88_ERROR_FASTPATH_DISABLD 0x19 +#define REP88_ERROR_MESSAGE_TYPE 0x20 +#define REP88_ERROR_MESSAGE_MALFORMD 0x22 +#define REP88_ERROR_MESSAGE_LENGTH 0x23 +#define REP88_ERROR_RESERVED_FIELD 0x24 +#define REP88_ERROR_KEY_TYPE 0x34 +#define REP88_ERROR_INVALID_KEY 0x82 +#define REP88_ERROR_OPERAND 0x84 +#define REP88_ERROR_OPERAND_EVEN_MOD 0x85 + #define CALLER_HEADER 12 static inline int @@ -1029,10 +1127,6 @@ query_online(int deviceNr, int cdx, int resetNr, int *q_depth, int *dev_type) stat = HD_ONLINE; *q_depth = t_depth + 1; switch (t_dev_type) { - case OTHER_HW: - stat = HD_NOT_THERE; - *dev_type = NILDEV; - break; case PCICA_HW: *dev_type = PCICA; break; @@ -1045,6 +1139,9 @@ query_online(int deviceNr, int cdx, int resetNr, int *q_depth, int *dev_type) case CEX2C_HW: *dev_type = CEX2C; break; + case CEX2A_HW: + *dev_type = CEX2A; + break; default: *dev_type = NILDEV; break; @@ -2029,6 +2126,177 @@ ICACRT_msg_to_type6CRT_msgX(struct ica_rsa_modexpo_crt *icaMsg_p, int cdx, return 0; } +static int +ICAMEX_msg_to_type50MEX_msg(struct ica_rsa_modexpo *icaMex_p, int *z90cMsg_l_p, + union type50_msg *z90cMsg_p) +{ + int mod_len, msg_size, mod_tgt_len, exp_tgt_len, inp_tgt_len; + unsigned char *mod_tgt, *exp_tgt, *inp_tgt; + union type50_msg *tmp_type50_msg; + + mod_len = icaMex_p->inputdatalength; + + msg_size = ((mod_len <= 128) ? TYPE50_MEB1_LEN : TYPE50_MEB2_LEN) + + CALLER_HEADER; + + memset(z90cMsg_p, 0, msg_size); + + tmp_type50_msg = (union type50_msg *) + ((unsigned char *) z90cMsg_p + CALLER_HEADER); + + tmp_type50_msg->meb1.header.msg_type_code = TYPE50_TYPE_CODE; + + if (mod_len <= 128) { + tmp_type50_msg->meb1.header.msg_len = TYPE50_MEB1_LEN; + tmp_type50_msg->meb1.keyblock_type = TYPE50_MEB1_FMT; + mod_tgt = tmp_type50_msg->meb1.modulus; + mod_tgt_len = sizeof(tmp_type50_msg->meb1.modulus); + exp_tgt = tmp_type50_msg->meb1.exponent; + exp_tgt_len = sizeof(tmp_type50_msg->meb1.exponent); + inp_tgt = tmp_type50_msg->meb1.message; + inp_tgt_len = sizeof(tmp_type50_msg->meb1.message); + } else { + tmp_type50_msg->meb2.header.msg_len = TYPE50_MEB2_LEN; + tmp_type50_msg->meb2.keyblock_type = TYPE50_MEB2_FMT; + mod_tgt = tmp_type50_msg->meb2.modulus; + mod_tgt_len = sizeof(tmp_type50_msg->meb2.modulus); + exp_tgt = tmp_type50_msg->meb2.exponent; + exp_tgt_len = sizeof(tmp_type50_msg->meb2.exponent); + inp_tgt = tmp_type50_msg->meb2.message; + inp_tgt_len = sizeof(tmp_type50_msg->meb2.message); + } + + mod_tgt += (mod_tgt_len - mod_len); + if (copy_from_user(mod_tgt, icaMex_p->n_modulus, mod_len)) + return SEN_RELEASED; + if (is_empty(mod_tgt, mod_len)) + return SEN_USER_ERROR; + exp_tgt += (exp_tgt_len - mod_len); + if (copy_from_user(exp_tgt, icaMex_p->b_key, mod_len)) + return SEN_RELEASED; + if (is_empty(exp_tgt, mod_len)) + return SEN_USER_ERROR; + inp_tgt += (inp_tgt_len - mod_len); + if (copy_from_user(inp_tgt, icaMex_p->inputdata, mod_len)) + return SEN_RELEASED; + if (is_empty(inp_tgt, mod_len)) + return SEN_USER_ERROR; + + *z90cMsg_l_p = msg_size - CALLER_HEADER; + + return 0; +} + +static int +ICACRT_msg_to_type50CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p, + int *z90cMsg_l_p, union type50_msg *z90cMsg_p) +{ + int mod_len, short_len, long_len, tmp_size, p_tgt_len, q_tgt_len, + dp_tgt_len, dq_tgt_len, u_tgt_len, inp_tgt_len, long_offset; + unsigned char *p_tgt, *q_tgt, *dp_tgt, *dq_tgt, *u_tgt, *inp_tgt, + temp[8]; + union type50_msg *tmp_type50_msg; + + mod_len = icaMsg_p->inputdatalength; + short_len = mod_len / 2; + long_len = mod_len / 2 + 8; + long_offset = 0; + + if (long_len > 128) { + memset(temp, 0x00, sizeof(temp)); + if (copy_from_user(temp, icaMsg_p->np_prime, long_len-128)) + return SEN_RELEASED; + if (!is_empty(temp, 8)) + return SEN_NOT_AVAIL; + if (copy_from_user(temp, icaMsg_p->bp_key, long_len-128)) + return SEN_RELEASED; + if (!is_empty(temp, 8)) + return SEN_NOT_AVAIL; + if (copy_from_user(temp, icaMsg_p->u_mult_inv, long_len-128)) + return SEN_RELEASED; + if (!is_empty(temp, 8)) + return SEN_NOT_AVAIL; + long_offset = long_len - 128; + long_len = 128; + } + + tmp_size = ((mod_len <= 128) ? TYPE50_CRB1_LEN : TYPE50_CRB2_LEN) + + CALLER_HEADER; + + memset(z90cMsg_p, 0, tmp_size); + + tmp_type50_msg = (union type50_msg *) + ((unsigned char *) z90cMsg_p + CALLER_HEADER); + + tmp_type50_msg->crb1.header.msg_type_code = TYPE50_TYPE_CODE; + if (long_len <= 64) { + tmp_type50_msg->crb1.header.msg_len = TYPE50_CRB1_LEN; + tmp_type50_msg->crb1.keyblock_type = TYPE50_CRB1_FMT; + p_tgt = tmp_type50_msg->crb1.p; + p_tgt_len = sizeof(tmp_type50_msg->crb1.p); + q_tgt = tmp_type50_msg->crb1.q; + q_tgt_len = sizeof(tmp_type50_msg->crb1.q); + dp_tgt = tmp_type50_msg->crb1.dp; + dp_tgt_len = sizeof(tmp_type50_msg->crb1.dp); + dq_tgt = tmp_type50_msg->crb1.dq; + dq_tgt_len = sizeof(tmp_type50_msg->crb1.dq); + u_tgt = tmp_type50_msg->crb1.u; + u_tgt_len = sizeof(tmp_type50_msg->crb1.u); + inp_tgt = tmp_type50_msg->crb1.message; + inp_tgt_len = sizeof(tmp_type50_msg->crb1.message); + } else { + tmp_type50_msg->crb2.header.msg_len = TYPE50_CRB2_LEN; + tmp_type50_msg->crb2.keyblock_type = TYPE50_CRB2_FMT; + p_tgt = tmp_type50_msg->crb2.p; + p_tgt_len = sizeof(tmp_type50_msg->crb2.p); + q_tgt = tmp_type50_msg->crb2.q; + q_tgt_len = sizeof(tmp_type50_msg->crb2.q); + dp_tgt = tmp_type50_msg->crb2.dp; + dp_tgt_len = sizeof(tmp_type50_msg->crb2.dp); + dq_tgt = tmp_type50_msg->crb2.dq; + dq_tgt_len = sizeof(tmp_type50_msg->crb2.dq); + u_tgt = tmp_type50_msg->crb2.u; + u_tgt_len = sizeof(tmp_type50_msg->crb2.u); + inp_tgt = tmp_type50_msg->crb2.message; + inp_tgt_len = sizeof(tmp_type50_msg->crb2.message); + } + + p_tgt += (p_tgt_len - long_len); + if (copy_from_user(p_tgt, icaMsg_p->np_prime + long_offset, long_len)) + return SEN_RELEASED; + if (is_empty(p_tgt, long_len)) + return SEN_USER_ERROR; + q_tgt += (q_tgt_len - short_len); + if (copy_from_user(q_tgt, icaMsg_p->nq_prime, short_len)) + return SEN_RELEASED; + if (is_empty(q_tgt, short_len)) + return SEN_USER_ERROR; + dp_tgt += (dp_tgt_len - long_len); + if (copy_from_user(dp_tgt, icaMsg_p->bp_key + long_offset, long_len)) + return SEN_RELEASED; + if (is_empty(dp_tgt, long_len)) + return SEN_USER_ERROR; + dq_tgt += (dq_tgt_len - short_len); + if (copy_from_user(dq_tgt, icaMsg_p->bq_key, short_len)) + return SEN_RELEASED; + if (is_empty(dq_tgt, short_len)) + return SEN_USER_ERROR; + u_tgt += (u_tgt_len - long_len); + if (copy_from_user(u_tgt, icaMsg_p->u_mult_inv + long_offset, long_len)) + return SEN_RELEASED; + if (is_empty(u_tgt, long_len)) + return SEN_USER_ERROR; + inp_tgt += (inp_tgt_len - mod_len); + if (copy_from_user(inp_tgt, icaMsg_p->inputdata, mod_len)) + return SEN_RELEASED; + if (is_empty(inp_tgt, mod_len)) + return SEN_USER_ERROR; + + *z90cMsg_l_p = tmp_size - CALLER_HEADER; + + return 0; +} + int convert_request(unsigned char *buffer, int func, unsigned short function, int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p) @@ -2071,6 +2339,16 @@ convert_request(unsigned char *buffer, int func, unsigned short function, cdx, msg_l_p, (struct type6_msg *) msg_p, dev_type); } + if (dev_type == CEX2A) { + if (func == ICARSACRT) + return ICACRT_msg_to_type50CRT_msg( + (struct ica_rsa_modexpo_crt *) buffer, + msg_l_p, (union type50_msg *) msg_p); + else + return ICAMEX_msg_to_type50MEX_msg( + (struct ica_rsa_modexpo *) buffer, + msg_l_p, (union type50_msg *) msg_p); + } return 0; } @@ -2081,8 +2359,8 @@ unset_ext_bitlens(void) { if (!ext_bitlens_msg_count) { PRINTK("Unable to use coprocessors for extended bitlengths. " - "Using PCICAs (if present) for extended bitlengths. " - "This is not an error.\n"); + "Using PCICAs/CEX2As (if present) for extended " + "bitlengths. This is not an error.\n"); ext_bitlens_msg_count++; } ext_bitlens = 0; @@ -2094,6 +2372,7 @@ convert_response(unsigned char *response, unsigned char *buffer, { struct ica_rsa_modexpo *icaMsg_p = (struct ica_rsa_modexpo *) buffer; struct error_hdr *errh_p = (struct error_hdr *) response; + struct type80_hdr *t80h_p = (struct type80_hdr *) response; struct type84_hdr *t84h_p = (struct type84_hdr *) response; struct type86_fmt2_msg *t86m_p = (struct type86_fmt2_msg *) response; int reply_code, service_rc, service_rs, src_l; @@ -2108,6 +2387,7 @@ convert_response(unsigned char *response, unsigned char *buffer, src_l = 0; switch (errh_p->type) { case TYPE82_RSP_CODE: + case TYPE88_RSP_CODE: reply_code = errh_p->reply_code; src_p = (unsigned char *)errh_p; PRINTK("Hardware error: Type %02X Message Header: " @@ -2116,6 +2396,10 @@ convert_response(unsigned char *response, unsigned char *buffer, src_p[0], src_p[1], src_p[2], src_p[3], src_p[4], src_p[5], src_p[6], src_p[7]); break; + case TYPE80_RSP_CODE: + src_l = icaMsg_p->outputdatalength; + src_p = response + (int)t80h_p->len - src_l; + break; case TYPE84_RSP_CODE: src_l = icaMsg_p->outputdatalength; src_p = response + (int)t84h_p->len - src_l; @@ -2202,6 +2486,7 @@ convert_response(unsigned char *response, unsigned char *buffer, if (reply_code) switch (reply_code) { case REP82_ERROR_OPERAND_INVALID: + case REP88_ERROR_MESSAGE_MALFORMD: return REC_OPERAND_INV; case REP82_ERROR_OPERAND_SIZE: return REC_OPERAND_SIZE; diff --git a/drivers/s390/crypto/z90main.c b/drivers/s390/crypto/z90main.c index 790fcbb74b43..135ae04e6e75 100644 --- a/drivers/s390/crypto/z90main.c +++ b/drivers/s390/crypto/z90main.c @@ -228,7 +228,7 @@ struct device_x { */ struct device { int dev_type; // PCICA, PCICC, PCIXCC_MCL2, - // PCIXCC_MCL3, CEX2C + // PCIXCC_MCL3, CEX2C, CEX2A enum devstat dev_stat; // current device status int dev_self_x; // Index in array int disabled; // Set when device is in error @@ -295,26 +295,30 @@ struct caller { /** * Function prototypes from z90hardware.c */ -enum hdstat query_online(int, int, int, int *, int *); -enum devstat reset_device(int, int, int); -enum devstat send_to_AP(int, int, int, unsigned char *); -enum devstat receive_from_AP(int, int, int, unsigned char *, unsigned char *); -int convert_request(unsigned char *, int, short, int, int, int *, - unsigned char *); -int convert_response(unsigned char *, unsigned char *, int *, unsigned char *); +enum hdstat query_online(int deviceNr, int cdx, int resetNr, int *q_depth, + int *dev_type); +enum devstat reset_device(int deviceNr, int cdx, int resetNr); +enum devstat send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext); +enum devstat receive_from_AP(int dev_nr, int cdx, int resplen, + unsigned char *resp, unsigned char *psmid); +int convert_request(unsigned char *buffer, int func, unsigned short function, + int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p); +int convert_response(unsigned char *response, unsigned char *buffer, + int *respbufflen_p, unsigned char *resp_buff); /** * Low level function prototypes */ -static int create_z90crypt(int *); -static int refresh_z90crypt(int *); -static int find_crypto_devices(struct status *); -static int create_crypto_device(int); -static int destroy_crypto_device(int); +static int create_z90crypt(int *cdx_p); +static int refresh_z90crypt(int *cdx_p); +static int find_crypto_devices(struct status *deviceMask); +static int create_crypto_device(int index); +static int destroy_crypto_device(int index); static void destroy_z90crypt(void); -static int refresh_index_array(struct status *, struct device_x *); -static int probe_device_type(struct device *); -static int probe_PCIXCC_type(struct device *); +static int refresh_index_array(struct status *status_str, + struct device_x *index_array); +static int probe_device_type(struct device *devPtr); +static int probe_PCIXCC_type(struct device *devPtr); /** * proc fs definitions @@ -425,7 +429,7 @@ static struct miscdevice z90crypt_misc_device = { MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman" "and Jochen Roehrig"); MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, " - "Copyright 2001, 2004 IBM Corporation"); + "Copyright 2001, 2005 IBM Corporation"); MODULE_LICENSE("GPL"); module_param(domain, int, 0); MODULE_PARM_DESC(domain, "domain index for device"); @@ -859,6 +863,12 @@ get_status_CEX2Ccount(void) return z90crypt.hdware_info->type_mask[CEX2C].st_count; } +static inline int +get_status_CEX2Acount(void) +{ + return z90crypt.hdware_info->type_mask[CEX2A].st_count; +} + static inline int get_status_requestq_count(void) { @@ -1008,11 +1018,13 @@ static inline int select_device_type(int *dev_type_p, int bytelength) { static int count = 0; - int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, index_to_use; + int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, CEX2A_avail, + index_to_use; struct status *stat; if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) && (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) && - (*dev_type_p != CEX2C) && (*dev_type_p != ANYDEV)) + (*dev_type_p != CEX2C) && (*dev_type_p != CEX2A) && + (*dev_type_p != ANYDEV)) return -1; if (*dev_type_p != ANYDEV) { stat = &z90crypt.hdware_info->type_mask[*dev_type_p]; @@ -1022,7 +1034,13 @@ select_device_type(int *dev_type_p, int bytelength) return -1; } - /* Assumption: PCICA, PCIXCC_MCL3, and CEX2C are all similar in speed */ + /** + * Assumption: PCICA, PCIXCC_MCL3, CEX2C, and CEX2A are all similar in + * speed. + * + * PCICA and CEX2A do NOT co-exist, so it would be either one or the + * other present. + */ stat = &z90crypt.hdware_info->type_mask[PCICA]; PCICA_avail = stat->st_count - (stat->disabled_count + stat->user_disabled_count); @@ -1032,29 +1050,38 @@ select_device_type(int *dev_type_p, int bytelength) stat = &z90crypt.hdware_info->type_mask[CEX2C]; CEX2C_avail = stat->st_count - (stat->disabled_count + stat->user_disabled_count); - if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail) { + stat = &z90crypt.hdware_info->type_mask[CEX2A]; + CEX2A_avail = stat->st_count - + (stat->disabled_count + stat->user_disabled_count); + if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail || CEX2A_avail) { /** - * bitlength is a factor, PCICA is the most capable, even with - * the new MCL for PCIXCC. + * bitlength is a factor, PCICA or CEX2A are the most capable, + * even with the new MCL for PCIXCC. */ if ((bytelength < PCIXCC_MIN_MOD_SIZE) || (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) { - if (!PCICA_avail) - return -1; - else { + if (PCICA_avail) { *dev_type_p = PCICA; return 0; } + if (CEX2A_avail) { + *dev_type_p = CEX2A; + return 0; + } + return -1; } index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail + - CEX2C_avail); + CEX2C_avail + CEX2A_avail); if (index_to_use < PCICA_avail) *dev_type_p = PCICA; else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail)) *dev_type_p = PCIXCC_MCL3; - else + else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail + + CEX2C_avail)) *dev_type_p = CEX2C; + else + *dev_type_p = CEX2A; count++; return 0; } @@ -1359,7 +1386,7 @@ build_caller(struct work_element *we_p, short function) if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) && (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) && - (we_p->devtype != CEX2C)) + (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A)) return SEN_NOT_AVAIL; memcpy(caller_p->caller_id, we_p->caller_id, @@ -1428,7 +1455,8 @@ get_crypto_request_buffer(struct work_element *we_p) if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) && (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) && - (we_p->devtype != CEX2C) && (we_p->devtype != ANYDEV)) { + (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A) && + (we_p->devtype != ANYDEV)) { PRINTK("invalid device type\n"); return SEN_USER_ERROR; } @@ -1503,8 +1531,9 @@ get_crypto_request_buffer(struct work_element *we_p) function = PCI_FUNC_KEY_ENCRYPT; switch (we_p->devtype) { - /* PCICA does everything with a simple RSA mod-expo operation */ + /* PCICA and CEX2A do everything with a simple RSA mod-expo operation */ case PCICA: + case CEX2A: function = PCI_FUNC_KEY_ENCRYPT; break; /** @@ -1662,7 +1691,8 @@ z90crypt_rsa(struct priv_data *private_data_p, pid_t pid, * trigger a fallback to software. */ case -EINVAL: - if (we_p->devtype != PCICA) + if ((we_p->devtype != PCICA) && + (we_p->devtype != CEX2A)) rv = -EGETBUFF; break; case -ETIMEOUT: @@ -1779,6 +1809,12 @@ z90crypt_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ret = -EFAULT; break; + case Z90STAT_CEX2ACOUNT: + tempstat = get_status_CEX2Acount(); + if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) + ret = -EFAULT; + break; + case Z90STAT_REQUESTQ_COUNT: tempstat = get_status_requestq_count(); if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) @@ -2019,6 +2055,8 @@ z90crypt_status(char *resp_buff, char **start, off_t offset, get_status_PCIXCCMCL3count()); len += sprintf(resp_buff+len, "CEX2C count: %d\n", get_status_CEX2Ccount()); + len += sprintf(resp_buff+len, "CEX2A count: %d\n", + get_status_CEX2Acount()); len += sprintf(resp_buff+len, "requestq count: %d\n", get_status_requestq_count()); len += sprintf(resp_buff+len, "pendingq count: %d\n", @@ -2026,8 +2064,8 @@ z90crypt_status(char *resp_buff, char **start, off_t offset, len += sprintf(resp_buff+len, "Total open handles: %d\n\n", get_status_totalopen_count()); len += sprinthx( - "Online devices: 1: PCICA, 2: PCICC, 3: PCIXCC (MCL2), " - "4: PCIXCC (MCL3), 5: CEX2C", + "Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " + "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A", resp_buff+len, get_status_status_mask(workarea), Z90CRYPT_NUM_APS); @@ -2140,6 +2178,7 @@ z90crypt_status_write(struct file *file, const char __user *buffer, case '3': // PCIXCC_MCL2 case '4': // PCIXCC_MCL3 case '5': // CEX2C + case '6': // CEX2A j++; break; case 'd': @@ -3007,7 +3046,9 @@ create_crypto_device(int index) z90crypt.hdware_info->device_type_array[index] = 4; else if (deviceType == CEX2C) z90crypt.hdware_info->device_type_array[index] = 5; - else + else if (deviceType == CEX2A) + z90crypt.hdware_info->device_type_array[index] = 6; + else // No idea how this would happen. z90crypt.hdware_info->device_type_array[index] = -1; } -- cgit v1.2.3 From 3b793060e768197d525e892fd1f84dbc8767cada Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Fri, 6 Jan 2006 00:19:26 -0800 Subject: [PATCH] s390: Fix missing release function and cosmetic changes - Use kzalloc() in blacklist.c. - Kill unwanted casts in blacklist.c. - Provide release function for struct channel_subsystem. Signed-off-by: Cornelia Huck Cc: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/s390/cio/blacklist.c | 7 +++---- drivers/s390/cio/css.c | 10 ++++++++++ 2 files changed, 13 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index 2d444cb2fdf7..daf21e03b21d 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c @@ -299,10 +299,9 @@ cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset) if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1)) return NULL; - iter = kmalloc(sizeof(struct ccwdev_iter), GFP_KERNEL); + iter = kzalloc(sizeof(struct ccwdev_iter), GFP_KERNEL); if (!iter) return ERR_PTR(-ENOMEM); - memset(iter, 0, sizeof(struct ccwdev_iter)); iter->ssid = *offset / (__MAX_SUBCHANNEL + 1); iter->devno = *offset % (__MAX_SUBCHANNEL + 1); return iter; @@ -322,7 +321,7 @@ cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset) if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1)) return NULL; - iter = (struct ccwdev_iter *)it; + iter = it; if (iter->devno == __MAX_SUBCHANNEL) { iter->devno = 0; iter->ssid++; @@ -339,7 +338,7 @@ cio_ignore_proc_seq_show(struct seq_file *s, void *it) { struct ccwdev_iter *iter; - iter = (struct ccwdev_iter *)it; + iter = it; if (!is_blacklisted(iter->ssid, iter->devno)) /* Not blacklisted, nothing to output. */ return 0; diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 9e9d4a157a4c..e565193650c7 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -444,6 +444,15 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high) } +static void +channel_subsystem_release(struct device *dev) +{ + struct channel_subsystem *css; + + css = to_css(dev); + kfree(css); +} + static inline void __init setup_css(int nr) { @@ -453,6 +462,7 @@ setup_css(int nr) css[nr]->valid = 1; css[nr]->cssid = nr; sprintf(css[nr]->device.bus_id, "css%x", nr); + css[nr]->device.release = channel_subsystem_release; tod_high = (u32) (get_clock() >> 32); css_generate_pgid(css[nr], tod_high); } -- cgit v1.2.3 From 9bbc8346fb21fad3f678220b067450e436e45dbf Mon Sep 17 00:00:00 2001 From: Peter Oberparleiter Date: Fri, 6 Jan 2006 00:19:27 -0800 Subject: [PATCH] s390: fix invalid return code in sclp_cpi When the sclp_cpi module is loaded on a system which does not support the required SCLP call (e.g. on z/VM), ENOSUPP is returned to user space. The correct return value is EOPNOTSUPP. Signed-off-by: Peter Oberparleiter Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/s390/char/sclp_cpi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c index 5a6cef2dfa13..80f7f31310e6 100644 --- a/drivers/s390/char/sclp_cpi.c +++ b/drivers/s390/char/sclp_cpi.c @@ -204,7 +204,7 @@ cpi_module_init(void) printk(KERN_WARNING "cpi: no control program identification " "support\n"); sclp_unregister(&sclp_cpi_event); - return -ENOTSUPP; + return -EOPNOTSUPP; } req = cpi_prepare_req(); -- cgit v1.2.3 From 347a8dc3b815f0c0fa62a1df075184ffe4cbdcf1 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 6 Jan 2006 00:19:28 -0800 Subject: [PATCH] s390: cleanup Kconfig Sanitize some s390 Kconfig options. We have ARCH_S390, ARCH_S390X, ARCH_S390_31, 64BIT, S390_SUPPORT and COMPAT. Replace these 6 options by S390, 64BIT and COMPAT. Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/Kconfig | 2 +- drivers/char/hangcheck-timer.c | 2 +- drivers/char/watchdog/Kconfig | 2 +- drivers/input/evdev.c | 2 +- drivers/net/phy/Kconfig | 2 +- drivers/s390/block/Kconfig | 8 ++++---- drivers/s390/block/dasd.c | 2 +- drivers/s390/block/dasd_diag.c | 2 +- drivers/s390/block/dasd_diag.h | 6 +++--- drivers/s390/block/dasd_eckd.c | 2 +- drivers/s390/block/dasd_fba.c | 2 +- drivers/s390/block/xpram.c | 4 ++-- drivers/s390/char/vmwatchdog.c | 2 +- drivers/s390/cio/cio.c | 2 +- drivers/s390/cio/device_id.c | 2 +- drivers/s390/cio/ioasm.h | 4 ++-- drivers/s390/cio/qdio.c | 2 +- drivers/s390/cio/qdio.h | 34 +++++++++++++++++----------------- drivers/s390/crypto/z90hardware.c | 8 ++++---- drivers/s390/net/Kconfig | 2 +- drivers/s390/net/claw.c | 6 +++--- drivers/s390/s390mach.c | 10 +++++----- drivers/s390/sysinfo.c | 2 +- drivers/scsi/Kconfig | 2 +- 24 files changed, 56 insertions(+), 56 deletions(-) (limited to 'drivers') diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 84e68cdd451b..5ebd06b1b4ca 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -985,7 +985,7 @@ config HPET_MMAP config HANGCHECK_TIMER tristate "Hangcheck timer" - depends on X86 || IA64 || PPC64 || ARCH_S390 + depends on X86 || IA64 || PPC64 || S390 help The hangcheck-timer module detects when the system has gone out to lunch past a certain margin. It can reboot the system diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c index 66e53dd450ff..40a67c86420c 100644 --- a/drivers/char/hangcheck-timer.c +++ b/drivers/char/hangcheck-timer.c @@ -120,7 +120,7 @@ __setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks); #if defined(CONFIG_X86) # define HAVE_MONOTONIC # define TIMER_FREQ 1000000000ULL -#elif defined(CONFIG_ARCH_S390) +#elif defined(CONFIG_S390) /* FA240000 is 1 Second in the IBM time universe (Page 4-38 Principles of Op for zSeries */ # define TIMER_FREQ 0xFA240000ULL #elif defined(CONFIG_IA64) diff --git a/drivers/char/watchdog/Kconfig b/drivers/char/watchdog/Kconfig index 344001b45af9..a6544790af60 100644 --- a/drivers/char/watchdog/Kconfig +++ b/drivers/char/watchdog/Kconfig @@ -438,7 +438,7 @@ config INDYDOG config ZVM_WATCHDOG tristate "z/VM Watchdog Timer" - depends on WATCHDOG && ARCH_S390 + depends on WATCHDOG && S390 help IBM s/390 and zSeries machines running under z/VM 5.1 or later provide a virtual watchdog timer to their guest that cause a diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index 9f2352bd8348..a1e660e3531d 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -157,7 +157,7 @@ struct input_event_compat { # define COMPAT_TEST test_thread_flag(TIF_IA32) #elif defined(CONFIG_IA64) # define COMPAT_TEST IS_IA32_PROCESS(ia64_task_regs(current)) -#elif defined(CONFIG_ARCH_S390) +#elif defined(CONFIG_S390) # define COMPAT_TEST test_thread_flag(TIF_31BIT) #elif defined(CONFIG_MIPS) # define COMPAT_TEST (current->thread.mflags & MF_32BIT_ADDR) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index c782a6329805..fa39b944bc46 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -6,7 +6,7 @@ menu "PHY device support" config PHYLIB tristate "PHY Device support and infrastructure" - depends on NET_ETHERNET && (BROKEN || !ARCH_S390) + depends on NET_ETHERNET && (BROKEN || !S390) help Ethernet controllers are usually attached to PHY devices. This option provides infrastructure for diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig index 6e7d7b06421d..6f50cc9323d9 100644 --- a/drivers/s390/block/Kconfig +++ b/drivers/s390/block/Kconfig @@ -1,11 +1,11 @@ -if ARCH_S390 +if S390 comment "S/390 block device drivers" - depends on ARCH_S390 + depends on S390 config BLK_DEV_XPRAM tristate "XPRAM disk support" - depends on ARCH_S390 + depends on S390 help Select this option if you want to use your expanded storage on S/390 or zSeries as a disk. This is useful as a _fast_ swap device if you @@ -49,7 +49,7 @@ config DASD_FBA config DASD_DIAG tristate "Support for DIAG access to Disks" - depends on DASD && ( ARCH_S390X = 'n' || EXPERIMENTAL) + depends on DASD && ( 64BIT = 'n' || EXPERIMENTAL) help Select this option if you want to use Diagnose250 command to access Disks under VM. If you are not running under VM or unsure what it is, diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 1141a5963b67..041e1a621885 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -604,7 +604,7 @@ dasd_smalloc_request(char *magic, int cplength, int datasize, void dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device) { -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT struct ccw1 *ccw; /* Clear any idals used for the request. */ diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index a33d4064b537..ba80fdea7ebf 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -75,7 +75,7 @@ dia250(void *iob, int cmd) int rc; __asm__ __volatile__( -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT " lghi %0,3\n" " lgr 0,%3\n" " diag 0,%2,0x250\n" diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h index 37edf6e91715..a4f80bd735f1 100644 --- a/drivers/s390/block/dasd_diag.h +++ b/drivers/s390/block/dasd_diag.h @@ -45,7 +45,7 @@ struct dasd_diag_characteristics { } __attribute__ ((packed, aligned(4))); -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT #define DASD_DIAG_FLAGA_DEFAULT DASD_DIAG_FLAGA_FORMAT_64BIT typedef u64 blocknum_t; @@ -86,7 +86,7 @@ struct dasd_diag_rw_io { struct dasd_diag_bio *bio_list; u8 spare4[8]; } __attribute__ ((packed, aligned(8))); -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ #define DASD_DIAG_FLAGA_DEFAULT 0x0 typedef u32 blocknum_t; @@ -125,4 +125,4 @@ struct dasd_diag_rw_io { u32 interrupt_params; u8 spare3[20]; } __attribute__ ((packed, aligned(8))); -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index efc4cf62496e..96eb48258580 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -1041,7 +1041,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req) /* Eckd can only do full blocks. */ return ERR_PTR(-EINVAL); count += bv->bv_len >> (device->s2b_shift + 9); -#if defined(CONFIG_ARCH_S390X) +#if defined(CONFIG_64BIT) if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) cidaw += bv->bv_len >> (device->s2b_shift + 9); diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 9bac8d87a9cc..8ec75dc08e2c 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -271,7 +271,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req) /* Fba can only do full blocks. */ return ERR_PTR(-EINVAL); count += bv->bv_len >> (device->s2b_shift + 9); -#if defined(CONFIG_ARCH_S390X) +#if defined(CONFIG_64BIT) if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) cidaw += bv->bv_len / blksize; diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index d428c909b8a0..bf3a67c3cc5e 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -160,7 +160,7 @@ static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index) "0: ipm %0\n" " srl %0,28\n" "1:\n" -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT ".section __ex_table,\"a\"\n" " .align 4\n" " .long 0b,1b\n" @@ -208,7 +208,7 @@ static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index) "0: ipm %0\n" " srl %0,28\n" "1:\n" -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT ".section __ex_table,\"a\"\n" " .align 4\n" " .long 0b,1b\n" diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c index 5473c23fcb52..5acc0ace3d7d 100644 --- a/drivers/s390/char/vmwatchdog.c +++ b/drivers/s390/char/vmwatchdog.c @@ -66,7 +66,7 @@ static int __diag288(enum vmwdt_func func, unsigned int timeout, __cmdl = len; err = 0; asm volatile ( -#ifdef __s390x__ +#ifdef CONFIG_64BIT "diag %2,%4,0x288\n" "1: \n" ".section .fixup,\"ax\"\n" diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 6f274f4f92eb..7376bc87206d 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -195,7 +195,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */ sch->orb.spnd = sch->options.suspend; sch->orb.ssic = sch->options.suspend && sch->options.inter; sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm; -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT /* * for 64 bit we always support 64 bit IDAWs with 4k page size only */ diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c index 3c77c3fd461d..04ceba343db8 100644 --- a/drivers/s390/cio/device_id.c +++ b/drivers/s390/cio/device_id.c @@ -27,7 +27,7 @@ /* * diag210 is used under VM to get information about a virtual device */ -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT int diag210(struct diag210 * addr) { diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h index 62b0e2ad507f..95a9462f9a91 100644 --- a/drivers/s390/cio/ioasm.h +++ b/drivers/s390/cio/ioasm.h @@ -50,7 +50,7 @@ static inline int stsch_err(struct subchannel_id schid, "0: ipm %0\n" " srl %0,28\n" "1:\n" -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT ".section __ex_table,\"a\"\n" " .align 8\n" " .quad 0b,1b\n" @@ -95,7 +95,7 @@ static inline int msch_err(struct subchannel_id schid, "0: ipm %0\n" " srl %0,28\n" "1:\n" -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT ".section __ex_table,\"a\"\n" " .align 8\n" " .quad 0b,1b\n" diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index 035c77af9cd3..30a836ffc31f 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c @@ -2394,7 +2394,7 @@ tiqdio_check_chsc_availability(void) sprintf(dbf_text,"hydrati%1x", hydra_thinints); QDIO_DBF_TEXT0(0,setup,dbf_text); -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT /* Check for QEBSM support in general (bit 58). */ is_passthrough = css_general_characteristics.qebsm; #endif diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 43b840af5300..fa385e761fe1 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -271,7 +271,7 @@ static inline int do_sqbs(unsigned long sch, unsigned char state, int queue, unsigned int *start, unsigned int *count) { -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT register unsigned long _ccq asm ("0") = *count; register unsigned long _sch asm ("1") = sch; unsigned long _queuestart = ((unsigned long)queue << 32) | *start; @@ -295,7 +295,7 @@ static inline int do_eqbs(unsigned long sch, unsigned char *state, int queue, unsigned int *start, unsigned int *count) { -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT register unsigned long _ccq asm ("0") = *count; register unsigned long _sch asm ("1") = sch; unsigned long _queuestart = ((unsigned long)queue << 32) | *start; @@ -323,7 +323,7 @@ do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2) { int cc; -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT asm volatile ( "lhi 0,2 \n\t" "lr 1,%1 \n\t" @@ -336,7 +336,7 @@ do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2) : "d" (schid), "d" (mask1), "d" (mask2) : "cc", "0", "1", "2", "3" ); -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ asm volatile ( "lghi 0,2 \n\t" "llgfr 1,%1 \n\t" @@ -349,7 +349,7 @@ do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2) : "d" (schid), "d" (mask1), "d" (mask2) : "cc", "0", "1", "2", "3" ); -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ return cc; } @@ -358,7 +358,7 @@ do_siga_input(struct subchannel_id schid, unsigned int mask) { int cc; -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT asm volatile ( "lhi 0,1 \n\t" "lr 1,%1 \n\t" @@ -370,7 +370,7 @@ do_siga_input(struct subchannel_id schid, unsigned int mask) : "d" (schid), "d" (mask) : "cc", "0", "1", "2", "memory" ); -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ asm volatile ( "lghi 0,1 \n\t" "llgfr 1,%1 \n\t" @@ -382,7 +382,7 @@ do_siga_input(struct subchannel_id schid, unsigned int mask) : "d" (schid), "d" (mask) : "cc", "0", "1", "2", "memory" ); -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ return cc; } @@ -394,7 +394,7 @@ do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb, int cc; __u32 busy_bit; -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT asm volatile ( "lhi 0,0 \n\t" "lr 1,%2 \n\t" @@ -424,7 +424,7 @@ do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb, "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION) : "cc", "0", "1", "2", "memory" ); -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ asm volatile ( "llgfr 0,%5 \n\t" "lgr 1,%2 \n\t" @@ -449,7 +449,7 @@ do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb, "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION), "d" (fc) : "cc", "0", "1", "2", "memory" ); -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ (*bb) = busy_bit; return cc; @@ -461,21 +461,21 @@ do_clear_global_summary(void) unsigned long time; -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT asm volatile ( "lhi 1,3 \n\t" ".insn rre,0xb2650000,2,0 \n\t" "lr %0,3 \n\t" : "=d" (time) : : "cc", "1", "2", "3" ); -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ asm volatile ( "lghi 1,3 \n\t" ".insn rre,0xb2650000,2,0 \n\t" "lgr %0,3 \n\t" : "=d" (time) : : "cc", "1", "2", "3" ); -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ return time; } @@ -542,11 +542,11 @@ struct qdio_perf_stats { #define MY_MODULE_STRING(x) #x -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT #define QDIO_GET_ADDR(x) ((__u32)(unsigned long)x) -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ #define QDIO_GET_ADDR(x) ((__u32)(long)x) -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ struct qdio_q { volatile struct slsb slsb; diff --git a/drivers/s390/crypto/z90hardware.c b/drivers/s390/crypto/z90hardware.c index 7c3ed52e03e1..d7f7494a0cbe 100644 --- a/drivers/s390/crypto/z90hardware.c +++ b/drivers/s390/crypto/z90hardware.c @@ -785,7 +785,7 @@ testq(int q_nr, int *q_depth, int *dev_type, struct ap_status_word *stat) int ccode; asm volatile -#ifdef __s390x__ +#ifdef CONFIG_64BIT (" llgfr 0,%4 \n" " slgr 1,1 \n" " lgr 2,1 \n" @@ -855,7 +855,7 @@ resetq(int q_nr, struct ap_status_word *stat_p) int ccode; asm volatile -#ifdef __s390x__ +#ifdef CONFIG_64BIT (" llgfr 0,%2 \n" " lghi 1,1 \n" " sll 1,24 \n" @@ -921,7 +921,7 @@ sen(int msg_len, unsigned char *msg_ext, struct ap_status_word *stat) int ccode; asm volatile -#ifdef __s390x__ +#ifdef CONFIG_64BIT (" lgr 6,%3 \n" " llgfr 7,%2 \n" " llgt 0,0(6) \n" @@ -1000,7 +1000,7 @@ rec(int q_nr, int buff_l, unsigned char *rsp, unsigned char *id, int ccode; asm volatile -#ifdef __s390x__ +#ifdef CONFIG_64BIT (" llgfr 0,%2 \n" " lgr 3,%4 \n" " lgr 6,%3 \n" diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index a7efc394515e..548854754921 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig @@ -1,5 +1,5 @@ menu "S/390 network device drivers" - depends on NETDEVICES && ARCH_S390 + depends on NETDEVICES && S390 config LCS tristate "Lan Channel Station Interface" diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 6b63d21612ec..e70af7f39946 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c @@ -1603,7 +1603,7 @@ dumpit(char* buf, int len) __u32 ct, sw, rm, dup; char *ptr, *rptr; char tbuf[82], tdup[82]; -#if (CONFIG_ARCH_S390X) +#if (CONFIG_64BIT) char addr[22]; #else char addr[12]; @@ -1619,7 +1619,7 @@ dumpit(char* buf, int len) dup = 0; for ( ct=0; ct < len; ct++, ptr++, rptr++ ) { if (sw == 0) { -#if (CONFIG_ARCH_S390X) +#if (CONFIG_64BIT) sprintf(addr, "%16.16lX",(unsigned long)rptr); #else sprintf(addr, "%8.8X",(__u32)rptr); @@ -1634,7 +1634,7 @@ dumpit(char* buf, int len) if (sw == 8) { strcat(bhex, " "); } -#if (CONFIG_ARCH_S390X) +#if (CONFIG_64BIT) sprintf(tbuf,"%2.2lX", (unsigned long)*ptr); #else sprintf(tbuf,"%2.2X", (__u32)*ptr); diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c index 7dad597ff86e..3bf466603512 100644 --- a/drivers/s390/s390mach.c +++ b/drivers/s390/s390mach.c @@ -246,7 +246,7 @@ s390_revalidate_registers(struct mci *mci) */ kill_task = 1; -#ifndef __s390x__ +#ifndef CONFIG_64BIT asm volatile("ld 0,0(%0)\n" "ld 2,8(%0)\n" "ld 4,16(%0)\n" @@ -255,7 +255,7 @@ s390_revalidate_registers(struct mci *mci) #endif if (MACHINE_HAS_IEEE) { -#ifdef __s390x__ +#ifdef CONFIG_64BIT fpt_save_area = &S390_lowcore.floating_pt_save_area; fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; #else @@ -314,7 +314,7 @@ s390_revalidate_registers(struct mci *mci) */ s390_handle_damage("invalid control registers."); else -#ifdef __s390x__ +#ifdef CONFIG_64BIT asm volatile("lctlg 0,15,0(%0)" : : "a" (&S390_lowcore.cregs_save_area)); #else @@ -327,7 +327,7 @@ s390_revalidate_registers(struct mci *mci) * can't write something sensible into that register. */ -#ifdef __s390x__ +#ifdef CONFIG_64BIT /* * See if we can revalidate the TOD programmable register with its * old contents (should be zero) otherwise set it to zero. @@ -384,7 +384,7 @@ s390_do_machine_check(struct pt_regs *regs) if (mci->b) { /* Processing backup -> verify if we can survive this */ u64 z_mcic, o_mcic, t_mcic; -#ifdef __s390x__ +#ifdef CONFIG_64BIT z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29); o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c index 87c2db1bd4f5..66da840c9316 100644 --- a/drivers/s390/sysinfo.c +++ b/drivers/s390/sysinfo.c @@ -106,7 +106,7 @@ static inline int stsi (void *sysinfo, { int cc, retv; -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT __asm__ __volatile__ ( "lr\t0,%2\n" "\tlr\t1,%3\n" "\tstsi\t0(%4)\n" diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 4c42065dea88..9e8254f0256c 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1815,7 +1815,7 @@ config SCSI_SUNESP config ZFCP tristate "FCP host bus adapter driver for IBM eServer zSeries" - depends on ARCH_S390 && QDIO && SCSI + depends on S390 && QDIO && SCSI select SCSI_FC_ATTRS help If you want to access SCSI devices attached to your IBM eServer -- cgit v1.2.3 From a1a5ea70a6e9db6332b27fe2d96666e17aa1436b Mon Sep 17 00:00:00 2001 From: Markus Lidel Date: Fri, 6 Jan 2006 00:19:29 -0800 Subject: [PATCH] I2O: changed I2O API to create I2O messages in kernel memory Changed the I2O API to create I2O messages first in kernel memory and then transfer it at once over the PCI bus instead of sending each quad-word over the PCI bus. Signed-off-by: Markus Lidel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/message/i2o/bus-osm.c | 21 +-- drivers/message/i2o/device.c | 51 +++---- drivers/message/i2o/exec-osm.c | 93 ++++++------ drivers/message/i2o/i2o_block.c | 157 ++++++++++----------- drivers/message/i2o/i2o_config.c | 169 +++++++++++----------- drivers/message/i2o/i2o_scsi.c | 50 ++++--- drivers/message/i2o/iop.c | 296 +++++++++++++++++++-------------------- drivers/message/i2o/pci.c | 1 + 8 files changed, 412 insertions(+), 426 deletions(-) (limited to 'drivers') diff --git a/drivers/message/i2o/bus-osm.c b/drivers/message/i2o/bus-osm.c index 151b228e1cb3..ce039d322fd0 100644 --- a/drivers/message/i2o/bus-osm.c +++ b/drivers/message/i2o/bus-osm.c @@ -39,18 +39,18 @@ static struct i2o_class_id i2o_bus_class_id[] = { */ static int i2o_bus_scan(struct i2o_device *dev) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; - m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) + msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) return -ETIMEDOUT; - writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data.tid, - &msg->u.head[1]); + msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data. + tid); - return i2o_msg_post_wait(dev->iop, m, 60); + return i2o_msg_post_wait(dev->iop, msg, 60); }; /** @@ -59,8 +59,9 @@ static int i2o_bus_scan(struct i2o_device *dev) * * Returns count. */ -static ssize_t i2o_bus_store_scan(struct device *d, struct device_attribute *attr, const char *buf, - size_t count) +static ssize_t i2o_bus_store_scan(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) { struct i2o_device *i2o_dev = to_i2o_device(d); int rc; diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c index 8eb50cdb8ae1..002ae0ed8966 100644 --- a/drivers/message/i2o/device.c +++ b/drivers/message/i2o/device.c @@ -35,18 +35,18 @@ static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd, u32 type) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; - m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid, &msg->u.head[1]); - writel(type, &msg->body[0]); + msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid); + msg->body[0] = cpu_to_le32(type); - return i2o_msg_post_wait(dev->iop, m, 60); + return i2o_msg_post_wait(dev->iop, msg, 60); } /** @@ -419,10 +419,9 @@ int i2o_device_parse_lct(struct i2o_controller *c) * ResultCount, ErrorInfoSize, BlockStatus and BlockSize. */ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist, - int oplen, void *reslist, int reslen) + int oplen, void *reslist, int reslen) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; u32 *res32 = (u32 *) reslist; u32 *restmp = (u32 *) reslist; int len = 0; @@ -437,26 +436,28 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist, if (i2o_dma_alloc(dev, &res, reslen, GFP_KERNEL)) return -ENOMEM; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) { + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) { i2o_dma_free(dev, &res); - return -ETIMEDOUT; + return PTR_ERR(msg); } i = 0; - writel(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid, - &msg->u.head[1]); - writel(0, &msg->body[i++]); - writel(0x4C000000 | oplen, &msg->body[i++]); /* OperationList */ - memcpy_toio(&msg->body[i], oplist, oplen); + msg->u.head[1] = + cpu_to_le32(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid); + msg->body[i++] = cpu_to_le32(0x00000000); + msg->body[i++] = cpu_to_le32(0x4C000000 | oplen); /* OperationList */ + memcpy(&msg->body[i], oplist, oplen); + i += (oplen / 4 + (oplen % 4 ? 1 : 0)); - writel(0xD0000000 | res.len, &msg->body[i++]); /* ResultList */ - writel(res.phys, &msg->body[i++]); + msg->body[i++] = cpu_to_le32(0xD0000000 | res.len); /* ResultList */ + msg->body[i++] = cpu_to_le32(res.phys); - writel(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) | - SGL_OFFSET_5, &msg->u.head[0]); + msg->u.head[0] = + cpu_to_le32(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) | + SGL_OFFSET_5); - rc = i2o_msg_post_wait_mem(c, m, 10, &res); + rc = i2o_msg_post_wait_mem(c, msg, 10, &res); /* This only looks like a memory leak - don't "fix" it. */ if (rc == -ETIMEDOUT) diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c index 9c339a2505b0..71a09332e7c0 100644 --- a/drivers/message/i2o/exec-osm.c +++ b/drivers/message/i2o/exec-osm.c @@ -114,13 +114,12 @@ static void i2o_exec_wait_free(struct i2o_exec_wait *wait) * Returns 0 on success, negative error code on timeout or positive error * code from reply. */ -int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long - timeout, struct i2o_dma *dma) +int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg, + unsigned long timeout, struct i2o_dma *dma) { DECLARE_WAIT_QUEUE_HEAD(wq); struct i2o_exec_wait *wait; static u32 tcntxt = 0x80000000; - struct i2o_message __iomem *msg = i2o_msg_in_to_virt(c, m); int rc = 0; wait = i2o_exec_wait_alloc(); @@ -138,15 +137,15 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long * We will only use transaction contexts >= 0x80000000 for POST WAIT, * so we could find a POST WAIT reply easier in the reply handler. */ - writel(i2o_exec_driver.context, &msg->u.s.icntxt); + msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); wait->tcntxt = tcntxt++; - writel(wait->tcntxt, &msg->u.s.tcntxt); + msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt); /* * Post the message to the controller. At some point later it will * return. If we time out before it returns then complete will be zero. */ - i2o_msg_post(c, m); + i2o_msg_post(c, msg); if (!wait->complete) { wait->wq = &wq; @@ -266,7 +265,8 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m, * * Returns number of bytes printed into buffer. */ -static ssize_t i2o_exec_show_vendor_id(struct device *d, struct device_attribute *attr, char *buf) +static ssize_t i2o_exec_show_vendor_id(struct device *d, + struct device_attribute *attr, char *buf) { struct i2o_device *dev = to_i2o_device(d); u16 id; @@ -286,7 +286,9 @@ static ssize_t i2o_exec_show_vendor_id(struct device *d, struct device_attribute * * Returns number of bytes printed into buffer. */ -static ssize_t i2o_exec_show_product_id(struct device *d, struct device_attribute *attr, char *buf) +static ssize_t i2o_exec_show_product_id(struct device *d, + struct device_attribute *attr, + char *buf) { struct i2o_device *dev = to_i2o_device(d); u16 id; @@ -385,23 +387,22 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m, u32 context; if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) { + struct i2o_message __iomem *pmsg; + u32 pm; + /* * If Fail bit is set we must take the transaction context of * the preserved message to find the right request again. */ - struct i2o_message __iomem *pmsg; - u32 pm; pm = le32_to_cpu(msg->body[3]); - pmsg = i2o_msg_in_to_virt(c, pm); + context = readl(&pmsg->u.s.tcntxt); i2o_report_status(KERN_INFO, "i2o_core", msg); - context = readl(&pmsg->u.s.tcntxt); - /* Release the preserved msg */ - i2o_msg_nop(c, pm); + i2o_msg_nop_mfa(c, pm); } else context = le32_to_cpu(msg->u.s.tcntxt); @@ -462,25 +463,26 @@ static void i2o_exec_event(struct i2o_event *evt) */ int i2o_exec_lct_get(struct i2o_controller *c) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; int i = 0; int rc = -EAGAIN; for (i = 1; i <= I2O_LCT_GET_TRIES; i++) { - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; - - writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); - writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); - writel(0xffffffff, &msg->body[0]); - writel(0x00000000, &msg->body[1]); - writel(0xd0000000 | c->dlct.len, &msg->body[2]); - writel(c->dlct.phys, &msg->body[3]); - - rc = i2o_msg_post_wait(c, m, I2O_TIMEOUT_LCT_GET); + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); + + msg->u.head[0] = + cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | + ADAPTER_TID); + msg->body[0] = cpu_to_le32(0xffffffff); + msg->body[1] = cpu_to_le32(0x00000000); + msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len); + msg->body[3] = cpu_to_le32(c->dlct.phys); + + rc = i2o_msg_post_wait(c, msg, I2O_TIMEOUT_LCT_GET); if (rc < 0) break; @@ -506,29 +508,28 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind) { i2o_status_block *sb = c->status_block.virt; struct device *dev; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; dev = &c->pdev->dev; if (i2o_dma_realloc(dev, &c->dlct, sb->expected_lct_size, GFP_KERNEL)) return -ENOMEM; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; - - writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); - writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); - writel(i2o_exec_driver.context, &msg->u.s.icntxt); - writel(0, &msg->u.s.tcntxt); /* FIXME */ - writel(0xffffffff, &msg->body[0]); - writel(change_ind, &msg->body[1]); - writel(0xd0000000 | c->dlct.len, &msg->body[2]); - writel(c->dlct.phys, &msg->body[3]); - - i2o_msg_post(c, m); + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); + + msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6); + msg->u.head[1] = cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | + ADAPTER_TID); + msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); + msg->u.s.tcntxt = cpu_to_le32(0x00000000); + msg->body[0] = cpu_to_le32(0xffffffff); + msg->body[1] = cpu_to_le32(change_ind); + msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len); + msg->body[3] = cpu_to_le32(c->dlct.phys); + + i2o_msg_post(c, msg); return 0; }; diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index f283b5bafdd3..2bd15c70773b 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c @@ -130,20 +130,20 @@ static int i2o_block_remove(struct device *dev) */ static int i2o_block_device_flush(struct i2o_device *dev) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; - m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->lct_data.tid, - &msg->u.head[1]); - writel(60 << 16, &msg->body[0]); + msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev-> + lct_data.tid); + msg->body[0] = cpu_to_le32(60 << 16); osm_debug("Flushing...\n"); - return i2o_msg_post_wait(dev->iop, m, 60); + return i2o_msg_post_wait(dev->iop, msg, 60); }; /** @@ -181,21 +181,21 @@ static int i2o_block_issue_flush(request_queue_t * queue, struct gendisk *disk, */ static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id) { - struct i2o_message __iomem *msg; - u32 m; - - m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; - - writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->lct_data.tid, - &msg->u.head[1]); - writel(-1, &msg->body[0]); - writel(0, &msg->body[1]); + struct i2o_message *msg; + + msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); + + msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev-> + lct_data.tid); + msg->body[0] = cpu_to_le32(-1); + msg->body[1] = cpu_to_le32(0x00000000); osm_debug("Mounting...\n"); - return i2o_msg_post_wait(dev->iop, m, 2); + return i2o_msg_post_wait(dev->iop, msg, 2); }; /** @@ -210,20 +210,20 @@ static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id) */ static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; - m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg) == I2O_QUEUE_EMPTY) + return PTR_ERR(msg); - writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid, - &msg->u.head[1]); - writel(-1, &msg->body[0]); + msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev-> + lct_data.tid); + msg->body[0] = cpu_to_le32(-1); osm_debug("Locking...\n"); - return i2o_msg_post_wait(dev->iop, m, 2); + return i2o_msg_post_wait(dev->iop, msg, 2); }; /** @@ -238,20 +238,20 @@ static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id) */ static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; - m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid, - &msg->u.head[1]); - writel(media_id, &msg->body[0]); + msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev-> + lct_data.tid); + msg->body[0] = cpu_to_le32(media_id); osm_debug("Unlocking...\n"); - return i2o_msg_post_wait(dev->iop, m, 2); + return i2o_msg_post_wait(dev->iop, msg, 2); }; /** @@ -267,21 +267,21 @@ static int i2o_block_device_power(struct i2o_block_device *dev, u8 op) { struct i2o_device *i2o_dev = dev->i2o_dev; struct i2o_controller *c = i2o_dev->iop; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; int rc; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->lct_data. - tid, &msg->u.head[1]); - writel(op << 24, &msg->body[0]); + msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev-> + lct_data.tid); + msg->body[0] = cpu_to_le32(op << 24); osm_debug("Power...\n"); - rc = i2o_msg_post_wait(c, m, 60); + rc = i2o_msg_post_wait(c, msg, 60); if (!rc) dev->power = op; @@ -331,7 +331,7 @@ static inline void i2o_block_request_free(struct i2o_block_request *ireq) */ static inline int i2o_block_sglist_alloc(struct i2o_controller *c, struct i2o_block_request *ireq, - u32 __iomem ** mptr) + u32 ** mptr) { int nents; enum dma_data_direction direction; @@ -745,10 +745,9 @@ static int i2o_block_transfer(struct request *req) struct i2o_block_device *dev = req->rq_disk->private_data; struct i2o_controller *c; int tid = dev->i2o_dev->lct_data.tid; - struct i2o_message __iomem *msg; - u32 __iomem *mptr; + struct i2o_message *msg; + u32 *mptr; struct i2o_block_request *ireq = req->special; - u32 m; u32 tcntxt; u32 sgl_offset = SGL_OFFSET_8; u32 ctl_flags = 0x00000000; @@ -763,9 +762,9 @@ static int i2o_block_transfer(struct request *req) c = dev->i2o_dev->iop; - m = i2o_msg_get(c, &msg); - if (m == I2O_QUEUE_EMPTY) { - rc = -EBUSY; + msg = i2o_msg_get(c); + if (IS_ERR(msg)) { + rc = PTR_ERR(msg); goto exit; } @@ -775,8 +774,8 @@ static int i2o_block_transfer(struct request *req) goto nop_msg; } - writel(i2o_block_driver.context, &msg->u.s.icntxt); - writel(tcntxt, &msg->u.s.tcntxt); + msg->u.s.icntxt = cpu_to_le32(i2o_block_driver.context); + msg->u.s.tcntxt = cpu_to_le32(tcntxt); mptr = &msg->body[0]; @@ -834,11 +833,11 @@ static int i2o_block_transfer(struct request *req) sgl_offset = SGL_OFFSET_12; - writel(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid, - &msg->u.head[1]); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid); - writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++); - writel(tid, mptr++); + *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC); + *mptr++ = cpu_to_le32(tid); /* * ENABLE_DISCONNECT @@ -853,22 +852,24 @@ static int i2o_block_transfer(struct request *req) scsi_flags = 0xa0a0000a; } - writel(scsi_flags, mptr++); + *mptr++ = cpu_to_le32(scsi_flags); *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec); *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec); - memcpy_toio(mptr, cmd, 10); + memcpy(mptr, cmd, 10); mptr += 4; - writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++); + *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); } else #endif { - writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]); - writel(ctl_flags, mptr++); - writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++); - writel((u32) (req->sector << KERNEL_SECTOR_SHIFT), mptr++); - writel(req->sector >> (32 - KERNEL_SECTOR_SHIFT), mptr++); + msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); + *mptr++ = cpu_to_le32(ctl_flags); + *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); + *mptr++ = + cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT)); + *mptr++ = + cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT)); } if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { @@ -876,13 +877,13 @@ static int i2o_block_transfer(struct request *req) goto context_remove; } - writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | - sgl_offset, &msg->u.head[0]); + msg->u.head[0] = + cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset); list_add_tail(&ireq->queue, &dev->open_queue); dev->open_queue_depth++; - i2o_msg_post(c, m); + i2o_msg_post(c, msg); return 0; @@ -890,7 +891,7 @@ static int i2o_block_transfer(struct request *req) i2o_cntxt_list_remove(c, req); nop_msg: - i2o_msg_nop(c, m); + i2o_msg_nop(c, msg); exit: return rc; diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c index 3c3a7abebb1b..4fe73d628c5b 100644 --- a/drivers/message/i2o/i2o_config.c +++ b/drivers/message/i2o/i2o_config.c @@ -230,8 +230,7 @@ static int i2o_cfg_swdl(unsigned long arg) struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; unsigned char maxfrag = 0, curfrag = 1; struct i2o_dma buffer; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; unsigned int status = 0, swlen = 0, fragsize = 8192; struct i2o_controller *c; @@ -257,31 +256,34 @@ static int i2o_cfg_swdl(unsigned long arg) if (!c) return -ENXIO; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -EBUSY; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { - i2o_msg_nop(c, m); + i2o_msg_nop(c, msg); return -ENOMEM; } __copy_from_user(buffer.virt, kxfer.buf, fragsize); - writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]); - writel(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); - writel(i2o_config_driver.context, &msg->u.head[2]); - writel(0, &msg->u.head[3]); - writel((((u32) kxfer.flags) << 24) | (((u32) kxfer.sw_type) << 16) | - (((u32) maxfrag) << 8) | (((u32) curfrag)), &msg->body[0]); - writel(swlen, &msg->body[1]); - writel(kxfer.sw_id, &msg->body[2]); - writel(0xD0000000 | fragsize, &msg->body[3]); - writel(buffer.phys, &msg->body[4]); + msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 | + ADAPTER_TID); + msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); + msg->u.head[3] = cpu_to_le32(0); + msg->body[0] = + cpu_to_le32((((u32) kxfer.flags) << 24) | (((u32) kxfer. + sw_type) << 16) | + (((u32) maxfrag) << 8) | (((u32) curfrag))); + msg->body[1] = cpu_to_le32(swlen); + msg->body[2] = cpu_to_le32(kxfer.sw_id); + msg->body[3] = cpu_to_le32(0xD0000000 | fragsize); + msg->body[4] = cpu_to_le32(buffer.phys); osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); - status = i2o_msg_post_wait_mem(c, m, 60, &buffer); + status = i2o_msg_post_wait_mem(c, msg, 60, &buffer); if (status != -ETIMEDOUT) i2o_dma_free(&c->pdev->dev, &buffer); @@ -302,8 +304,7 @@ static int i2o_cfg_swul(unsigned long arg) struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; unsigned char maxfrag = 0, curfrag = 1; struct i2o_dma buffer; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; unsigned int status = 0, swlen = 0, fragsize = 8192; struct i2o_controller *c; int ret = 0; @@ -330,30 +331,30 @@ static int i2o_cfg_swul(unsigned long arg) if (!c) return -ENXIO; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -EBUSY; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { - i2o_msg_nop(c, m); + i2o_msg_nop(c, msg); return -ENOMEM; } - writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]); - writel(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); - writel(i2o_config_driver.context, &msg->u.head[2]); - writel(0, &msg->u.head[3]); - writel((u32) kxfer.flags << 24 | (u32) kxfer. - sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag, - &msg->body[0]); - writel(swlen, &msg->body[1]); - writel(kxfer.sw_id, &msg->body[2]); - writel(0xD0000000 | fragsize, &msg->body[3]); - writel(buffer.phys, &msg->body[4]); + msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID); + msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); + msg->u.head[3] = cpu_to_le32(0); + msg->body[0] = + cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer. + sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag); + msg->body[1] = cpu_to_le32(swlen); + msg->body[2] = cpu_to_le32(kxfer.sw_id); + msg->body[3] = cpu_to_le32(0xD0000000 | fragsize); + msg->body[4] = cpu_to_le32(buffer.phys); osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); - status = i2o_msg_post_wait_mem(c, m, 60, &buffer); + status = i2o_msg_post_wait_mem(c, msg, 60, &buffer); if (status != I2O_POST_WAIT_OK) { if (status != -ETIMEDOUT) @@ -380,8 +381,7 @@ static int i2o_cfg_swdel(unsigned long arg) struct i2o_controller *c; struct i2o_sw_xfer kxfer; struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; unsigned int swlen; int token; @@ -395,21 +395,21 @@ static int i2o_cfg_swdel(unsigned long arg) if (!c) return -ENXIO; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -EBUSY; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); - writel(i2o_config_driver.context, &msg->u.head[2]); - writel(0, &msg->u.head[3]); - writel((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16, - &msg->body[0]); - writel(swlen, &msg->body[1]); - writel(kxfer.sw_id, &msg->body[2]); + msg->u.head[0] = cpu_to_le32(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID); + msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); + msg->u.head[3] = cpu_to_le32(0); + msg->body[0] = + cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16); + msg->body[1] = cpu_to_le32(swlen); + msg->body[2] = cpu_to_le32(kxfer.sw_id); - token = i2o_msg_post_wait(c, m, 10); + token = i2o_msg_post_wait(c, msg, 10); if (token != I2O_POST_WAIT_OK) { osm_info("swdel failed, DetailedStatus = %d\n", token); @@ -423,25 +423,24 @@ static int i2o_cfg_validate(unsigned long arg) { int token; int iop = (int)arg; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; struct i2o_controller *c; c = i2o_find_iop(iop); if (!c) return -ENXIO; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -EBUSY; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop, - &msg->u.head[1]); - writel(i2o_config_driver.context, &msg->u.head[2]); - writel(0, &msg->u.head[3]); + msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop); + msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); + msg->u.head[3] = cpu_to_le32(0); - token = i2o_msg_post_wait(c, m, 10); + token = i2o_msg_post_wait(c, msg, 10); if (token != I2O_POST_WAIT_OK) { osm_info("Can't validate configuration, ErrorStatus = %d\n", @@ -454,8 +453,7 @@ static int i2o_cfg_validate(unsigned long arg) static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg; struct i2o_evt_id kdesc; struct i2o_controller *c; @@ -474,18 +472,19 @@ static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp) if (!d) return -ENODEV; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -EBUSY; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | kdesc.tid, - &msg->u.head[1]); - writel(i2o_config_driver.context, &msg->u.head[2]); - writel(i2o_cntxt_list_add(c, fp->private_data), &msg->u.head[3]); - writel(kdesc.evt_mask, &msg->body[0]); + msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | + kdesc.tid); + msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); + msg->u.head[3] = cpu_to_le32(i2o_cntxt_list_add(c, fp->private_data)); + msg->body[0] = cpu_to_le32(kdesc.evt_mask); - i2o_msg_post(c, m); + i2o_msg_post(c, msg); return 0; } @@ -537,7 +536,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, u32 sg_index = 0; i2o_status_block *sb; struct i2o_message *msg; - u32 m; unsigned int iop; cmd = (struct i2o_cmd_passthru32 __user *)arg; @@ -553,7 +551,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, return -ENXIO; } - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); sb = c->status_block.virt; @@ -595,8 +593,8 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, sg_offset = (msg->u.head[0] >> 4) & 0x0f; - writel(i2o_config_driver.context, &msg->u.s.icntxt); - writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt); + msg->u.s.icntxt = cpu_to_le32(i2o_config_driver.context); + msg->u.s.tcntxt = cpu_to_le32(i2o_cntxt_list_add(c, reply)); memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); if (sg_offset) { @@ -662,7 +660,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, } } - rcode = i2o_msg_post_wait(c, m, 60); + rcode = i2o_msg_post_wait(c, msg, 60); if (rcode) goto sg_list_cleanup; @@ -780,8 +778,7 @@ static int i2o_cfg_passthru(unsigned long arg) u32 i = 0; void *p = NULL; i2o_status_block *sb; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; unsigned int iop; if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg)) @@ -793,7 +790,7 @@ static int i2o_cfg_passthru(unsigned long arg) return -ENXIO; } - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); sb = c->status_block.virt; @@ -830,8 +827,8 @@ static int i2o_cfg_passthru(unsigned long arg) sg_offset = (msg->u.head[0] >> 4) & 0x0f; - writel(i2o_config_driver.context, &msg->u.s.icntxt); - writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt); + msg->u.s.icntxt = cpu_to_le32(i2o_config_driver.context); + msg->u.s.tcntxt = cpu_to_le32(i2o_cntxt_list_add(c, reply)); memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); if (sg_offset) { @@ -894,7 +891,7 @@ static int i2o_cfg_passthru(unsigned long arg) } } - rcode = i2o_msg_post_wait(c, m, 60); + rcode = i2o_msg_post_wait(c, msg, 60); if (rcode) goto sg_list_cleanup; diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c index 9f1744c3933b..7a784fd60804 100644 --- a/drivers/message/i2o/i2o_scsi.c +++ b/drivers/message/i2o/i2o_scsi.c @@ -510,8 +510,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, struct i2o_controller *c; struct i2o_device *i2o_dev; int tid; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; /* * ENABLE_DISCONNECT * SIMPLE_TAG @@ -519,7 +518,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, */ u32 scsi_flags = 0x20a00000; u32 sgl_offset; - u32 __iomem *mptr; + u32 *mptr; u32 cmd = I2O_CMD_SCSI_EXEC << 24; int rc = 0; @@ -576,8 +575,8 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, * throw it back to the scsi layer */ - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) { + msg = i2o_msg_get(c); + if (IS_ERR(msg)) { rc = SCSI_MLQUEUE_HOST_BUSY; goto exit; } @@ -617,16 +616,16 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, if (sgl_offset == SGL_OFFSET_10) sgl_offset = SGL_OFFSET_12; cmd = I2O_CMD_PRIVATE << 24; - writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++); - writel(adpt_flags | tid, mptr++); + *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC); + *mptr++ = cpu_to_le32(adpt_flags | tid); } #endif - writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]); - writel(i2o_scsi_driver.context, &msg->u.s.icntxt); + msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); + msg->u.s.icntxt = cpu_to_le32(i2o_scsi_driver.context); /* We want the SCSI control block back */ - writel(i2o_cntxt_list_add(c, SCpnt), &msg->u.s.tcntxt); + msg->u.s.tcntxt = cpu_to_le32(i2o_cntxt_list_add(c, SCpnt)); /* LSI_920_PCI_QUIRK * @@ -649,15 +648,15 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, } */ - writel(scsi_flags | SCpnt->cmd_len, mptr++); + *mptr++ = cpu_to_le32(scsi_flags | SCpnt->cmd_len); /* Write SCSI command into the message - always 16 byte block */ - memcpy_toio(mptr, SCpnt->cmnd, 16); + memcpy(mptr, SCpnt->cmnd, 16); mptr += 4; if (sgl_offset != SGL_OFFSET_0) { /* write size of data addressed by SGL */ - writel(SCpnt->request_bufflen, mptr++); + *mptr++ = cpu_to_le32(SCpnt->request_bufflen); /* Now fill in the SGList and command */ if (SCpnt->use_sg) { @@ -676,11 +675,11 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, } /* Stick the headers on */ - writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset, - &msg->u.head[0]); + msg->u.head[0] = + cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset); /* Queue the message */ - i2o_msg_post(c, m); + i2o_msg_post(c, msg); osm_debug("Issued %ld\n", SCpnt->serial_number); @@ -688,7 +687,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, nomem: rc = -ENOMEM; - i2o_msg_nop(c, m); + i2o_msg_nop(c, msg); exit: return rc; @@ -709,8 +708,7 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt) { struct i2o_device *i2o_dev; struct i2o_controller *c; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; int tid; int status = FAILED; @@ -720,16 +718,16 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt) c = i2o_dev->iop; tid = i2o_dev->lct_data.tid; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) return SCSI_MLQUEUE_HOST_BUSY; - writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid, - &msg->u.head[1]); - writel(i2o_cntxt_list_get_ptr(c, SCpnt), &msg->body[0]); + msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid); + msg->body[0] = cpu_to_le32(i2o_cntxt_list_get_ptr(c, SCpnt)); - if (i2o_msg_post_wait(c, m, I2O_TIMEOUT_SCSI_SCB_ABORT)) + if (i2o_msg_post_wait(c, msg, I2O_TIMEOUT_SCSI_SCB_ABORT)) status = SUCCESS; return status; diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c index 4eb53258842e..f86abb42bf89 100644 --- a/drivers/message/i2o/iop.c +++ b/drivers/message/i2o/iop.c @@ -46,27 +46,6 @@ static struct i2o_dma i2o_systab; static int i2o_hrt_get(struct i2o_controller *c); -/** - * i2o_msg_nop - Returns a message which is not used - * @c: I2O controller from which the message was created - * @m: message which should be returned - * - * If you fetch a message via i2o_msg_get, and can't use it, you must - * return the message with this function. Otherwise the message frame - * is lost. - */ -void i2o_msg_nop(struct i2o_controller *c, u32 m) -{ - struct i2o_message __iomem *msg = i2o_msg_in_to_virt(c, m); - - writel(THREE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); - writel(0, &msg->u.head[2]); - writel(0, &msg->u.head[3]); - i2o_msg_post(c, m); -}; - /** * i2o_msg_get_wait - obtain an I2O message from the IOP * @c: I2O controller @@ -81,22 +60,21 @@ void i2o_msg_nop(struct i2o_controller *c, u32 m) * address from the read port (see the i2o spec). If no message is * available returns I2O_QUEUE_EMPTY and msg is leaved untouched. */ -u32 i2o_msg_get_wait(struct i2o_controller *c, - struct i2o_message __iomem ** msg, int wait) +struct i2o_message *i2o_msg_get_wait(struct i2o_controller *c, int wait) { unsigned long timeout = jiffies + wait * HZ; - u32 m; + struct i2o_message *msg; - while ((m = i2o_msg_get(c, msg)) == I2O_QUEUE_EMPTY) { + while (IS_ERR(msg = i2o_msg_get(c))) { if (time_after(jiffies, timeout)) { osm_debug("%s: Timeout waiting for message frame.\n", c->name); - return I2O_QUEUE_EMPTY; + return ERR_PTR(-ETIMEDOUT); } schedule_timeout_uninterruptible(1); } - return m; + return msg; }; #if BITS_PER_LONG == 64 @@ -301,8 +279,7 @@ struct i2o_device *i2o_iop_find_device(struct i2o_controller *c, u16 tid) */ static int i2o_iop_quiesce(struct i2o_controller *c) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; i2o_status_block *sb = c->status_block.virt; int rc; @@ -313,16 +290,17 @@ static int i2o_iop_quiesce(struct i2o_controller *c) (sb->iop_state != ADAPTER_STATE_OPERATIONAL)) return 0; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); + msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 | + ADAPTER_TID); /* Long timeout needed for quiesce if lots of devices */ - if ((rc = i2o_msg_post_wait(c, m, 240))) + if ((rc = i2o_msg_post_wait(c, msg, 240))) osm_info("%s: Unable to quiesce (status=%#x).\n", c->name, -rc); else osm_debug("%s: Quiesced.\n", c->name); @@ -342,8 +320,7 @@ static int i2o_iop_quiesce(struct i2o_controller *c) */ static int i2o_iop_enable(struct i2o_controller *c) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; i2o_status_block *sb = c->status_block.virt; int rc; @@ -353,16 +330,17 @@ static int i2o_iop_enable(struct i2o_controller *c) if (sb->iop_state != ADAPTER_STATE_READY) return -EINVAL; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); + msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 | + ADAPTER_TID); /* How long of a timeout do we need? */ - if ((rc = i2o_msg_post_wait(c, m, 240))) + if ((rc = i2o_msg_post_wait(c, msg, 240))) osm_err("%s: Could not enable (status=%#x).\n", c->name, -rc); else osm_debug("%s: Enabled.\n", c->name); @@ -413,22 +391,22 @@ static inline void i2o_iop_enable_all(void) */ static int i2o_iop_clear(struct i2o_controller *c) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; int rc; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); /* Quiesce all IOPs first */ i2o_iop_quiesce_all(); - writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); + msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 | + ADAPTER_TID); - if ((rc = i2o_msg_post_wait(c, m, 30))) + if ((rc = i2o_msg_post_wait(c, msg, 30))) osm_info("%s: Unable to clear (status=%#x).\n", c->name, -rc); else osm_debug("%s: Cleared.\n", c->name); @@ -446,13 +424,13 @@ static int i2o_iop_clear(struct i2o_controller *c) * Clear and (re)initialize IOP's outbound queue and post the message * frames to the IOP. * - * Returns 0 on success or a negative errno code on failure. + * Returns 0 on success or negative error code on failure. */ static int i2o_iop_init_outbound_queue(struct i2o_controller *c) { - volatile u8 *status = c->status.virt; u32 m; - struct i2o_message __iomem *msg; + volatile u8 *status = c->status.virt; + struct i2o_message *msg; ulong timeout; int i; @@ -460,23 +438,24 @@ static int i2o_iop_init_outbound_queue(struct i2o_controller *c) memset(c->status.virt, 0, 4); - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; - - writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); - writel(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); - writel(i2o_exec_driver.context, &msg->u.s.icntxt); - writel(0x00000000, &msg->u.s.tcntxt); - writel(PAGE_SIZE, &msg->body[0]); + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); + + msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | + ADAPTER_TID); + msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); + msg->u.s.tcntxt = cpu_to_le32(0x00000000); + msg->body[0] = cpu_to_le32(PAGE_SIZE); /* Outbound msg frame size in words and Initcode */ - writel(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80, &msg->body[1]); - writel(0xd0000004, &msg->body[2]); - writel(i2o_dma_low(c->status.phys), &msg->body[3]); - writel(i2o_dma_high(c->status.phys), &msg->body[4]); + msg->body[1] = cpu_to_le32(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80); + msg->body[2] = cpu_to_le32(0xd0000004); + msg->body[3] = cpu_to_le32(i2o_dma_low(c->status.phys)); + msg->body[4] = cpu_to_le32(i2o_dma_high(c->status.phys)); - i2o_msg_post(c, m); + i2o_msg_post(c, msg); timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ; while (*status <= I2O_CMD_IN_PROGRESS) { @@ -511,34 +490,34 @@ static int i2o_iop_init_outbound_queue(struct i2o_controller *c) static int i2o_iop_reset(struct i2o_controller *c) { volatile u8 *status = c->status.virt; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; unsigned long timeout; i2o_status_block *sb = c->status_block.virt; int rc = 0; osm_debug("%s: Resetting controller\n", c->name); - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); memset(c->status_block.virt, 0, 8); /* Quiesce all IOPs first */ i2o_iop_quiesce_all(); - writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); - writel(i2o_exec_driver.context, &msg->u.s.icntxt); - writel(0, &msg->u.s.tcntxt); //FIXME: use reasonable transaction context - writel(0, &msg->body[0]); - writel(0, &msg->body[1]); - writel(i2o_dma_low(c->status.phys), &msg->body[2]); - writel(i2o_dma_high(c->status.phys), &msg->body[3]); + msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 | + ADAPTER_TID); + msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); + msg->u.s.tcntxt = cpu_to_le32(0x00000000); + msg->body[0] = cpu_to_le32(0x00000000); + msg->body[1] = cpu_to_le32(0x00000000); + msg->body[2] = cpu_to_le32(i2o_dma_low(c->status.phys)); + msg->body[3] = cpu_to_le32(i2o_dma_high(c->status.phys)); - i2o_msg_post(c, m); + i2o_msg_post(c, msg); /* Wait for a reply */ timeout = jiffies + I2O_TIMEOUT_RESET * HZ; @@ -567,18 +546,15 @@ static int i2o_iop_reset(struct i2o_controller *c) osm_debug("%s: Reset in progress, waiting for reboot...\n", c->name); - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET); - while (m == I2O_QUEUE_EMPTY) { + while (IS_ERR(msg = i2o_msg_get_wait(c, I2O_TIMEOUT_RESET))) { if (time_after(jiffies, timeout)) { osm_err("%s: IOP reset timeout.\n", c->name); - rc = -ETIMEDOUT; + rc = PTR_ERR(msg); goto exit; } schedule_timeout_uninterruptible(1); - - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET); } - i2o_msg_nop(c, m); + i2o_msg_nop(c, msg); /* from here all quiesce commands are safe */ c->no_quiesce = 0; @@ -686,8 +662,7 @@ static int i2o_iop_activate(struct i2o_controller *c) */ static int i2o_iop_systab_set(struct i2o_controller *c) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; i2o_status_block *sb = c->status_block.virt; struct device *dev = &c->pdev->dev; struct resource *root; @@ -735,20 +710,21 @@ static int i2o_iop_systab_set(struct i2o_controller *c) } } - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len, PCI_DMA_TODEVICE); if (!i2o_systab.phys) { - i2o_msg_nop(c, m); + i2o_msg_nop(c, msg); return -ENOMEM; } - writel(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6, &msg->u.head[0]); - writel(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); + msg->u.head[0] = cpu_to_le32(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 | + ADAPTER_TID); /* * Provide three SGL-elements: @@ -760,16 +736,16 @@ static int i2o_iop_systab_set(struct i2o_controller *c) * same table to everyone. We have to go remap it for them all */ - writel(c->unit + 2, &msg->body[0]); - writel(0, &msg->body[1]); - writel(0x54000000 | i2o_systab.len, &msg->body[2]); - writel(i2o_systab.phys, &msg->body[3]); - writel(0x54000000 | sb->current_mem_size, &msg->body[4]); - writel(sb->current_mem_base, &msg->body[5]); - writel(0xd4000000 | sb->current_io_size, &msg->body[6]); - writel(sb->current_io_base, &msg->body[6]); + msg->body[0] = cpu_to_le32(c->unit + 2); + msg->body[1] = cpu_to_le32(0x00000000); + msg->body[2] = cpu_to_le32(0x54000000 | i2o_systab.len); + msg->body[3] = cpu_to_le32(i2o_systab.phys); + msg->body[4] = cpu_to_le32(0x54000000 | sb->current_mem_size); + msg->body[5] = cpu_to_le32(sb->current_mem_base); + msg->body[6] = cpu_to_le32(0xd4000000 | sb->current_io_size); + msg->body[6] = cpu_to_le32(sb->current_io_base); - rc = i2o_msg_post_wait(c, m, 120); + rc = i2o_msg_post_wait(c, msg, 120); dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len, PCI_DMA_TODEVICE); @@ -952,30 +928,30 @@ static int i2o_parse_hrt(struct i2o_controller *c) */ int i2o_status_get(struct i2o_controller *c) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; volatile u8 *status_block; unsigned long timeout; status_block = (u8 *) c->status_block.virt; memset(c->status_block.virt, 0, sizeof(i2o_status_block)); - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); - writel(i2o_exec_driver.context, &msg->u.s.icntxt); - writel(0, &msg->u.s.tcntxt); // FIXME: use resonable transaction context - writel(0, &msg->body[0]); - writel(0, &msg->body[1]); - writel(i2o_dma_low(c->status_block.phys), &msg->body[2]); - writel(i2o_dma_high(c->status_block.phys), &msg->body[3]); - writel(sizeof(i2o_status_block), &msg->body[4]); /* always 88 bytes */ + msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 | + ADAPTER_TID); + msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); + msg->u.s.tcntxt = cpu_to_le32(0x00000000); + msg->body[0] = cpu_to_le32(0x00000000); + msg->body[1] = cpu_to_le32(0x00000000); + msg->body[2] = cpu_to_le32(i2o_dma_low(c->status_block.phys)); + msg->body[3] = cpu_to_le32(i2o_dma_high(c->status_block.phys)); + msg->body[4] = cpu_to_le32(sizeof(i2o_status_block)); /* always 88 bytes */ - i2o_msg_post(c, m); + i2o_msg_post(c, msg); /* Wait for a reply */ timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ; @@ -1013,20 +989,20 @@ static int i2o_hrt_get(struct i2o_controller *c) struct device *dev = &c->pdev->dev; for (i = 0; i < I2O_HRT_GET_TRIES; i++) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(SIX_WORD_MSG_SIZE | SGL_OFFSET_4, &msg->u.head[0]); - writel(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); - writel(0xd0000000 | c->hrt.len, &msg->body[0]); - writel(c->hrt.phys, &msg->body[1]); + msg->u.head[0] = cpu_to_le32(SIX_WORD_MSG_SIZE | SGL_OFFSET_4); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 | + ADAPTER_TID); + msg->body[0] = cpu_to_le32(0xd0000000 | c->hrt.len); + msg->body[1] = cpu_to_le32(c->hrt.phys); - rc = i2o_msg_post_wait_mem(c, m, 20, &c->hrt); + rc = i2o_msg_post_wait_mem(c, msg, 20, &c->hrt); if (rc < 0) { osm_err("%s: Unable to get HRT (status=%#x)\n", c->name, @@ -1056,6 +1032,7 @@ static int i2o_hrt_get(struct i2o_controller *c) */ void i2o_iop_free(struct i2o_controller *c) { + i2o_pool_free(&c->in_msg); kfree(c); }; @@ -1080,7 +1057,7 @@ static struct class *i2o_controller_class; * i2o_iop_alloc - Allocate and initialize a i2o_controller struct * * Allocate the necessary memory for a i2o_controller struct and - * initialize the lists. + * initialize the lists and message mempool. * * Returns a pointer to the I2O controller or a negative error code on * failure. @@ -1089,6 +1066,7 @@ struct i2o_controller *i2o_iop_alloc(void) { static int unit = 0; /* 0 and 1 are NULL IOP and Local Host */ struct i2o_controller *c; + char poolname[32]; c = kmalloc(sizeof(*c), GFP_KERNEL); if (!c) { @@ -1098,11 +1076,20 @@ struct i2o_controller *i2o_iop_alloc(void) } memset(c, 0, sizeof(*c)); + c->unit = unit++; + sprintf(c->name, "iop%d", c->unit); + + snprintf(poolname, sizeof(poolname), "i2o_%s_msg_inpool", c->name); + if (i2o_pool_alloc + (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4, + I2O_MSG_INPOOL_MIN)) { + kfree(c); + return ERR_PTR(-ENOMEM); + }; + INIT_LIST_HEAD(&c->devices); spin_lock_init(&c->lock); init_MUTEX(&c->lct_lock); - c->unit = unit++; - sprintf(c->name, "iop%d", c->unit); device_initialize(&c->device); @@ -1199,28 +1186,27 @@ int i2o_iop_add(struct i2o_controller *c) * is waited for, or expected. If you do not want further notifications, * call the i2o_event_register again with a evt_mask of 0. * - * Returns 0 on success or -ETIMEDOUT if no message could be fetched for - * sending the request. + * Returns 0 on success or negative error code on failure. */ int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv, int tcntxt, u32 evt_mask) { struct i2o_controller *c = dev->iop; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->lct_data. - tid, &msg->u.head[1]); - writel(drv->context, &msg->u.s.icntxt); - writel(tcntxt, &msg->u.s.tcntxt); - writel(evt_mask, &msg->body[0]); + msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev-> + lct_data.tid); + msg->u.s.icntxt = cpu_to_le32(drv->context); + msg->u.s.tcntxt = cpu_to_le32(tcntxt); + msg->body[0] = cpu_to_le32(evt_mask); - i2o_msg_post(c, m); + i2o_msg_post(c, msg); return 0; }; diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c index ee7075fa1ec3..329d482eee81 100644 --- a/drivers/message/i2o/pci.c +++ b/drivers/message/i2o/pci.c @@ -483,4 +483,5 @@ void __exit i2o_pci_exit(void) { pci_unregister_driver(&i2o_pci_driver); }; + MODULE_DEVICE_TABLE(pci, i2o_pci_ids); -- cgit v1.2.3 From 793fd15d9fafe5b1c71e50d3c041f1463895dbde Mon Sep 17 00:00:00 2001 From: Markus Lidel Date: Fri, 6 Jan 2006 00:19:30 -0800 Subject: [PATCH] I2O: SPARC fixes Fix lot of BE <-> LE bugs which prevent it from working on SPARC. Signed-off-by: Markus Lidel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/message/i2o/Kconfig | 12 ++++ drivers/message/i2o/device.c | 130 +++++++++++++++++++--------------------- drivers/message/i2o/exec-osm.c | 15 +++-- drivers/message/i2o/i2o_block.c | 20 +++---- drivers/message/i2o/i2o_scsi.c | 35 ++++++----- 5 files changed, 111 insertions(+), 101 deletions(-) (limited to 'drivers') diff --git a/drivers/message/i2o/Kconfig b/drivers/message/i2o/Kconfig index 43a942a29c2e..fef677103880 100644 --- a/drivers/message/i2o/Kconfig +++ b/drivers/message/i2o/Kconfig @@ -24,6 +24,18 @@ config I2O If unsure, say N. +config I2O_LCT_NOTIFY_ON_CHANGES + bool "Enable LCT notification" + depends on I2O + default y + ---help--- + Only say N here if you have a I2O controller from SUN. The SUN + firmware doesn't support LCT notification on changes. If this option + is enabled on such a controller the driver will hang up in a endless + loop. On all other controllers say Y. + + If unsure, say Y. + config I2O_EXT_ADAPTEC bool "Enable Adaptec extensions" depends on I2O diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c index 002ae0ed8966..1db26215937f 100644 --- a/drivers/message/i2o/device.c +++ b/drivers/message/i2o/device.c @@ -341,56 +341,83 @@ int i2o_device_parse_lct(struct i2o_controller *c) { struct i2o_device *dev, *tmp; i2o_lct *lct; - int i; - int max; + u32 *dlct = c->dlct.virt; + int max = 0, i = 0; + u16 table_size; + u32 buf; down(&c->lct_lock); kfree(c->lct); - lct = c->dlct.virt; + buf = le32_to_cpu(*dlct++); + table_size = buf & 0xffff; - c->lct = kmalloc(lct->table_size * 4, GFP_KERNEL); - if (!c->lct) { + lct = c->lct = kmalloc(table_size * 4, GFP_KERNEL); + if (!lct) { up(&c->lct_lock); return -ENOMEM; } - if (lct->table_size * 4 > c->dlct.len) { - memcpy(c->lct, c->dlct.virt, c->dlct.len); - up(&c->lct_lock); - return -EAGAIN; - } - - memcpy(c->lct, c->dlct.virt, lct->table_size * 4); + lct->lct_ver = buf >> 28; + lct->boot_tid = buf >> 16 & 0xfff; + lct->table_size = table_size; + lct->change_ind = le32_to_cpu(*dlct++); + lct->iop_flags = le32_to_cpu(*dlct++); - lct = c->lct; - - max = (lct->table_size - 3) / 9; + table_size -= 3; pr_debug("%s: LCT has %d entries (LCT size: %d)\n", c->name, max, lct->table_size); - /* remove devices, which are not in the LCT anymore */ - list_for_each_entry_safe(dev, tmp, &c->devices, list) { + while (table_size > 0) { + i2o_lct_entry *entry = &lct->lct_entry[max]; int found = 0; - for (i = 0; i < max; i++) { - if (lct->lct_entry[i].tid == dev->lct_data.tid) { + buf = le32_to_cpu(*dlct++); + entry->entry_size = buf & 0xffff; + entry->tid = buf >> 16 & 0xfff; + + entry->change_ind = le32_to_cpu(*dlct++); + entry->device_flags = le32_to_cpu(*dlct++); + + buf = le32_to_cpu(*dlct++); + entry->class_id = buf & 0xfff; + entry->version = buf >> 12 & 0xf; + entry->vendor_id = buf >> 16; + + entry->sub_class = le32_to_cpu(*dlct++); + + buf = le32_to_cpu(*dlct++); + entry->user_tid = buf & 0xfff; + entry->parent_tid = buf >> 12 & 0xfff; + entry->bios_info = buf >> 24; + + memcpy(&entry->identity_tag, dlct, 8); + dlct += 2; + + entry->event_capabilities = le32_to_cpu(*dlct++); + + /* add new devices, which are new in the LCT */ + list_for_each_entry_safe(dev, tmp, &c->devices, list) { + if (entry->tid == dev->lct_data.tid) { found = 1; break; } } if (!found) - i2o_device_remove(dev); + i2o_device_add(c, entry); + + table_size -= 9; + max++; } - /* add new devices, which are new in the LCT */ - for (i = 0; i < max; i++) { + /* remove devices, which are not in the LCT anymore */ + list_for_each_entry_safe(dev, tmp, &c->devices, list) { int found = 0; - list_for_each_entry_safe(dev, tmp, &c->devices, list) { + for (i = 0; i < max; i++) { if (lct->lct_entry[i].tid == dev->lct_data.tid) { found = 1; break; @@ -398,8 +425,9 @@ int i2o_device_parse_lct(struct i2o_controller *c) } if (!found) - i2o_device_add(c, &lct->lct_entry[i]); + i2o_device_remove(dev); } + up(&c->lct_lock); return 0; @@ -422,9 +450,6 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist, int oplen, void *reslist, int reslen) { struct i2o_message *msg; - u32 *res32 = (u32 *) reslist; - u32 *restmp = (u32 *) reslist; - int len = 0; int i = 0; int rc; struct i2o_dma res; @@ -448,7 +473,6 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist, msg->body[i++] = cpu_to_le32(0x00000000); msg->body[i++] = cpu_to_le32(0x4C000000 | oplen); /* OperationList */ memcpy(&msg->body[i], oplist, oplen); - i += (oplen / 4 + (oplen % 4 ? 1 : 0)); msg->body[i++] = cpu_to_le32(0xD0000000 | res.len); /* ResultList */ msg->body[i++] = cpu_to_le32(res.phys); @@ -466,36 +490,7 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist, memcpy(reslist, res.virt, res.len); i2o_dma_free(dev, &res); - /* Query failed */ - if (rc) - return rc; - /* - * Calculate number of bytes of Result LIST - * We need to loop through each Result BLOCK and grab the length - */ - restmp = res32 + 1; - len = 1; - for (i = 0; i < (res32[0] & 0X0000FFFF); i++) { - if (restmp[0] & 0x00FF0000) { /* BlockStatus != SUCCESS */ - printk(KERN_WARNING - "%s - Error:\n ErrorInfoSize = 0x%02x, " - "BlockStatus = 0x%02x, BlockSize = 0x%04x\n", - (cmd == - I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET" : - "PARAMS_GET", res32[1] >> 24, - (res32[1] >> 16) & 0xFF, res32[1] & 0xFFFF); - - /* - * If this is the only request,than we return an error - */ - if ((res32[0] & 0x0000FFFF) == 1) { - return -((res32[1] >> 16) & 0xFF); /* -BlockStatus */ - } - } - len += restmp[0] & 0x0000FFFF; /* Length of res BLOCK */ - restmp += restmp[0] & 0x0000FFFF; /* Skip to next BLOCK */ - } - return (len << 2); /* bytes used by result list */ + return rc; } /* @@ -504,28 +499,25 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist, int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field, void *buf, int buflen) { - u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field }; + u32 opblk[] = { cpu_to_le32(0x00000001), + cpu_to_le32((u16) group << 16 | I2O_PARAMS_FIELD_GET), + cpu_to_le32((s16) field << 16 | 0x00000001) + }; u8 *resblk; /* 8 bytes for header */ - int size; - - if (field == -1) /* whole group */ - opblk[4] = -1; + int rc; resblk = kmalloc(buflen + 8, GFP_KERNEL | GFP_ATOMIC); if (!resblk) return -ENOMEM; - size = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk, - sizeof(opblk), resblk, buflen + 8); + rc = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk, + sizeof(opblk), resblk, buflen + 8); memcpy(buf, resblk + 8, buflen); /* cut off header */ kfree(resblk); - if (size > buflen) - return buflen; - - return size; + return rc; } /* diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c index 71a09332e7c0..d24548f4c033 100644 --- a/drivers/message/i2o/exec-osm.c +++ b/drivers/message/i2o/exec-osm.c @@ -77,7 +77,7 @@ static struct i2o_exec_wait *i2o_exec_wait_alloc(void) wait = kmalloc(sizeof(*wait), GFP_KERNEL); if (!wait) - return ERR_PTR(-ENOMEM); + return NULL; memset(wait, 0, sizeof(*wait)); @@ -271,8 +271,8 @@ static ssize_t i2o_exec_show_vendor_id(struct device *d, struct i2o_device *dev = to_i2o_device(d); u16 id; - if (i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) { - sprintf(buf, "0x%04x", id); + if (!i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) { + sprintf(buf, "0x%04x", le16_to_cpu(id)); return strlen(buf) + 1; } @@ -293,8 +293,8 @@ static ssize_t i2o_exec_show_product_id(struct device *d, struct i2o_device *dev = to_i2o_device(d); u16 id; - if (i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) { - sprintf(buf, "0x%04x", id); + if (!i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) { + sprintf(buf, "0x%04x", le16_to_cpu(id)); return strlen(buf) + 1; } @@ -364,7 +364,9 @@ static void i2o_exec_lct_modified(struct i2o_controller *c) if (i2o_device_parse_lct(c) != -EAGAIN) change_ind = c->lct->change_ind + 1; +#ifdef CONFIG_I2O_LCT_NOTIFY_ON_CHANGES i2o_exec_lct_notify(c, change_ind); +#endif }; /** @@ -512,7 +514,8 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind) dev = &c->pdev->dev; - if (i2o_dma_realloc(dev, &c->dlct, sb->expected_lct_size, GFP_KERNEL)) + if (i2o_dma_realloc + (dev, &c->dlct, le32_to_cpu(sb->expected_lct_size), GFP_KERNEL)) return -ENOMEM; msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index 2bd15c70773b..ed2df54bf464 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c @@ -1050,8 +1050,8 @@ static int i2o_block_probe(struct device *dev) int rc; u64 size; u32 blocksize; - u32 flags, status; u16 body_size = 4; + u16 power; unsigned short max_sectors; #ifdef CONFIG_I2O_EXT_ADAPTEC @@ -1109,22 +1109,20 @@ static int i2o_block_probe(struct device *dev) * Ask for the current media data. If that isn't supported * then we ask for the device capacity data */ - if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || - i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { - blk_queue_hardsect_size(queue, blocksize); + if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || + !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { + blk_queue_hardsect_size(queue, le32_to_cpu(blocksize)); } else osm_warn("unable to get blocksize of %s\n", gd->disk_name); - if (i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) || - i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) { - set_capacity(gd, size >> KERNEL_SECTOR_SHIFT); + if (!i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) || + !i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) { + set_capacity(gd, le64_to_cpu(size) >> KERNEL_SECTOR_SHIFT); } else osm_warn("could not get size of %s\n", gd->disk_name); - if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &i2o_blk_dev->power, 2)) - i2o_blk_dev->power = 0; - i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4); - i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4); + if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2)) + i2o_blk_dev->power = power; i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff); diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c index 7a784fd60804..24061dfd46e4 100644 --- a/drivers/message/i2o/i2o_scsi.c +++ b/drivers/message/i2o/i2o_scsi.c @@ -113,7 +113,7 @@ static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c) list_for_each_entry(i2o_dev, &c->devices, list) if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) { - if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) + if (!i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) && (type == 0x01)) /* SCSI bus */ max_channel++; } @@ -146,7 +146,7 @@ static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c) i = 0; list_for_each_entry(i2o_dev, &c->devices, list) if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) { - if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) + if (!i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) && (type == 0x01)) /* only SCSI bus */ i2o_shost->channel[i++] = i2o_dev; @@ -238,13 +238,15 @@ static int i2o_scsi_probe(struct device *dev) u8 type; struct i2o_device *d = i2o_shost->channel[0]; - if (i2o_parm_field_get(d, 0x0000, 0, &type, 1) + if (!i2o_parm_field_get(d, 0x0000, 0, &type, 1) && (type == 0x01)) /* SCSI bus */ - if (i2o_parm_field_get(d, 0x0200, 4, &id, 4)) { + if (!i2o_parm_field_get(d, 0x0200, 4, &id, 4)) { channel = 0; if (i2o_dev->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE) - lun = i2o_shost->lun++; + lun = + cpu_to_le64(i2o_shost-> + lun++); else lun = 0; } @@ -253,10 +255,10 @@ static int i2o_scsi_probe(struct device *dev) break; case I2O_CLASS_SCSI_PERIPHERAL: - if (i2o_parm_field_get(i2o_dev, 0x0000, 3, &id, 4) < 0) + if (i2o_parm_field_get(i2o_dev, 0x0000, 3, &id, 4)) return -EFAULT; - if (i2o_parm_field_get(i2o_dev, 0x0000, 4, &lun, 8) < 0) + if (i2o_parm_field_get(i2o_dev, 0x0000, 4, &lun, 8)) return -EFAULT; parent = i2o_iop_find_device(c, i2o_dev->lct_data.parent_tid); @@ -281,20 +283,22 @@ static int i2o_scsi_probe(struct device *dev) return -EFAULT; } - if (id >= scsi_host->max_id) { - osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)", id, - scsi_host->max_id); + if (le32_to_cpu(id) >= scsi_host->max_id) { + osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)", + le32_to_cpu(id), scsi_host->max_id); return -EFAULT; } - if (lun >= scsi_host->max_lun) { - osm_warn("SCSI device id (%d) >= max_lun of I2O host (%d)", - (unsigned int)lun, scsi_host->max_lun); + if (le64_to_cpu(lun) >= scsi_host->max_lun) { + osm_warn("SCSI device lun (%lu) >= max_lun of I2O host (%d)", + (long unsigned int)le64_to_cpu(lun), + scsi_host->max_lun); return -EFAULT; } scsi_dev = - __scsi_add_device(i2o_shost->scsi_host, channel, id, lun, i2o_dev); + __scsi_add_device(i2o_shost->scsi_host, channel, le32_to_cpu(id), + le64_to_cpu(lun), i2o_dev); if (IS_ERR(scsi_dev)) { osm_warn("can not add SCSI device %03x\n", @@ -306,7 +310,8 @@ static int i2o_scsi_probe(struct device *dev) "scsi"); osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %d\n", - i2o_dev->lct_data.tid, channel, id, (unsigned int)lun); + i2o_dev->lct_data.tid, channel, le32_to_cpu(id), + (unsigned int)le64_to_cpu(lun)); return 0; }; -- cgit v1.2.3 From 24791bd48f643194d806654b587251b0f92233e8 Mon Sep 17 00:00:00 2001 From: Markus Lidel Date: Fri, 6 Jan 2006 00:19:31 -0800 Subject: [PATCH] I2O: Remove wrong I2O device class Removed wrong I2O device class, which was only needed to add sysfs attributes. Signed-off-by: Markus Lidel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/message/i2o/core.h | 2 + drivers/message/i2o/device.c | 144 ++++++++++++++++++------------------------- drivers/message/i2o/driver.c | 2 - drivers/message/i2o/iop.c | 34 ++-------- 4 files changed, 68 insertions(+), 114 deletions(-) (limited to 'drivers') diff --git a/drivers/message/i2o/core.h b/drivers/message/i2o/core.h index 9eefedb16211..9aa9b91170b2 100644 --- a/drivers/message/i2o/core.h +++ b/drivers/message/i2o/core.h @@ -33,6 +33,8 @@ extern int __init i2o_pci_init(void); extern void __exit i2o_pci_exit(void); /* device */ +extern struct device_attribute i2o_device_attrs[]; + extern void i2o_device_remove(struct i2o_device *); extern int i2o_device_parse_lct(struct i2o_controller *); diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c index 1db26215937f..a5e260b7a3b6 100644 --- a/drivers/message/i2o/device.c +++ b/drivers/message/i2o/device.c @@ -142,8 +142,9 @@ static void i2o_device_release(struct device *dev) /** - * i2o_device_class_show_class_id - Displays class id of I2O device - * @cd: class device of which the class id should be displayed + * i2o_device_show_class_id - Displays class id of I2O device + * @dev: device of which the class id should be displayed + * @attr: pointer to device attribute * @buf: buffer into which the class id should be printed * * Returns the number of bytes which are printed into the buffer. @@ -159,15 +160,15 @@ static ssize_t i2o_device_show_class_id(struct device *dev, } /** - * i2o_device_class_show_tid - Displays TID of I2O device - * @cd: class device of which the TID should be displayed - * @buf: buffer into which the class id should be printed + * i2o_device_show_tid - Displays TID of I2O device + * @dev: device of which the TID should be displayed + * @attr: pointer to device attribute + * @buf: buffer into which the TID should be printed * * Returns the number of bytes which are printed into the buffer. */ static ssize_t i2o_device_show_tid(struct device *dev, - struct device_attribute *attr, - char *buf) + struct device_attribute *attr, char *buf) { struct i2o_device *i2o_dev = to_i2o_device(dev); @@ -208,66 +209,6 @@ static struct i2o_device *i2o_device_alloc(void) return dev; } -/** - * i2o_setup_sysfs_links - Adds attributes to the I2O device - * @cd: I2O class device which is added to the I2O device class - * - * This function get called when a I2O device is added to the class. It - * creates the attributes for each device and creates user/parent symlink - * if necessary. - * - * Returns 0 on success or negative error code on failure. - */ -static void i2o_setup_sysfs_links(struct i2o_device *i2o_dev) -{ - struct i2o_controller *c = i2o_dev->iop; - struct i2o_device *tmp; - - /* create user entries for this device */ - tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid); - if (tmp && tmp != i2o_dev) - sysfs_create_link(&i2o_dev->device.kobj, - &tmp->device.kobj, "user"); - - /* create user entries refering to this device */ - list_for_each_entry(tmp, &c->devices, list) - if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid && - tmp != i2o_dev) - sysfs_create_link(&tmp->device.kobj, - &i2o_dev->device.kobj, "user"); - - /* create parent entries for this device */ - tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid); - if (tmp && tmp != i2o_dev) - sysfs_create_link(&i2o_dev->device.kobj, - &tmp->device.kobj, "parent"); - - /* create parent entries refering to this device */ - list_for_each_entry(tmp, &c->devices, list) - if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid && - tmp != i2o_dev) - sysfs_create_link(&tmp->device.kobj, - &i2o_dev->device.kobj, "parent"); -} - -static void i2o_remove_sysfs_links(struct i2o_device *i2o_dev) -{ - struct i2o_controller *c = i2o_dev->iop; - struct i2o_device *tmp; - - sysfs_remove_link(&i2o_dev->device.kobj, "parent"); - sysfs_remove_link(&i2o_dev->device.kobj, "user"); - - list_for_each_entry(tmp, &c->devices, list) { - if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid) - sysfs_remove_link(&tmp->device.kobj, "parent"); - if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid) - sysfs_remove_link(&tmp->device.kobj, "user"); - } -} - - - /** * i2o_device_add - allocate a new I2O device and add it to the IOP * @iop: I2O controller where the device is on @@ -282,33 +223,57 @@ static void i2o_remove_sysfs_links(struct i2o_device *i2o_dev) static struct i2o_device *i2o_device_add(struct i2o_controller *c, i2o_lct_entry * entry) { - struct i2o_device *dev; + struct i2o_device *i2o_dev, *tmp; - dev = i2o_device_alloc(); - if (IS_ERR(dev)) { + i2o_dev = i2o_device_alloc(); + if (IS_ERR(i2o_dev)) { printk(KERN_ERR "i2o: unable to allocate i2o device\n"); - return dev; + return i2o_dev; } - dev->lct_data = *entry; - dev->iop = c; + i2o_dev->lct_data = *entry; - snprintf(dev->device.bus_id, BUS_ID_SIZE, "%d:%03x", c->unit, - dev->lct_data.tid); + snprintf(i2o_dev->device.bus_id, BUS_ID_SIZE, "%d:%03x", c->unit, + i2o_dev->lct_data.tid); - dev->device.parent = &c->device; + i2o_dev->iop = c; + i2o_dev->device.parent = &c->device; - device_register(&dev->device); + device_register(&i2o_dev->device); - list_add_tail(&dev->list, &c->devices); + list_add_tail(&i2o_dev->list, &c->devices); - i2o_setup_sysfs_links(dev); + /* create user entries for this device */ + tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid); + if (tmp && (tmp != i2o_dev)) + sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj, + "user"); - i2o_driver_notify_device_add_all(dev); + /* create user entries refering to this device */ + list_for_each_entry(tmp, &c->devices, list) + if ((tmp->lct_data.user_tid == i2o_dev->lct_data.tid) + && (tmp != i2o_dev)) + sysfs_create_link(&tmp->device.kobj, + &i2o_dev->device.kobj, "user"); - pr_debug("i2o: device %s added\n", dev->device.bus_id); + /* create parent entries for this device */ + tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid); + if (tmp && (tmp != i2o_dev)) + sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj, + "parent"); - return dev; + /* create parent entries refering to this device */ + list_for_each_entry(tmp, &c->devices, list) + if ((tmp->lct_data.parent_tid == i2o_dev->lct_data.tid) + && (tmp != i2o_dev)) + sysfs_create_link(&tmp->device.kobj, + &i2o_dev->device.kobj, "parent"); + + i2o_driver_notify_device_add_all(i2o_dev); + + pr_debug("i2o: device %s added\n", i2o_dev->device.bus_id); + + return i2o_dev; } /** @@ -321,9 +286,22 @@ static struct i2o_device *i2o_device_add(struct i2o_controller *c, */ void i2o_device_remove(struct i2o_device *i2o_dev) { + struct i2o_device *tmp; + struct i2o_controller *c = i2o_dev->iop; + i2o_driver_notify_device_remove_all(i2o_dev); - i2o_remove_sysfs_links(i2o_dev); + + sysfs_remove_link(&i2o_dev->device.kobj, "parent"); + sysfs_remove_link(&i2o_dev->device.kobj, "user"); + + list_for_each_entry(tmp, &c->devices, list) { + if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid) + sysfs_remove_link(&tmp->device.kobj, "parent"); + if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid) + sysfs_remove_link(&tmp->device.kobj, "user"); + } list_del(&i2o_dev->list); + device_unregister(&i2o_dev->device); } diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c index 0fb9c4e2ad4c..25292b36e2d9 100644 --- a/drivers/message/i2o/driver.c +++ b/drivers/message/i2o/driver.c @@ -61,8 +61,6 @@ static int i2o_bus_match(struct device *dev, struct device_driver *drv) }; /* I2O bus type */ -extern struct device_attribute i2o_device_attrs[]; - struct bus_type i2o_bus_type = { .name = "i2o", .match = i2o_bus_match, diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c index f86abb42bf89..7411a0504bf2 100644 --- a/drivers/message/i2o/iop.c +++ b/drivers/message/i2o/iop.c @@ -806,7 +806,6 @@ void i2o_iop_remove(struct i2o_controller *c) list_for_each_entry_safe(dev, tmp, &c->devices, list) i2o_device_remove(dev); - class_device_unregister(c->classdev); device_del(&c->device); /* Ask the IOP to switch to RESET state */ @@ -1050,9 +1049,6 @@ static void i2o_iop_release(struct device *dev) i2o_iop_free(c); }; -/* I2O controller class */ -static struct class *i2o_controller_class; - /** * i2o_iop_alloc - Allocate and initialize a i2o_controller struct * @@ -1124,36 +1120,29 @@ int i2o_iop_add(struct i2o_controller *c) goto iop_reset; } - c->classdev = class_device_create(i2o_controller_class, NULL, MKDEV(0,0), - &c->device, "iop%d", c->unit); - if (IS_ERR(c->classdev)) { - osm_err("%s: could not add controller class\n", c->name); - goto device_del; - } - osm_info("%s: Activating I2O controller...\n", c->name); osm_info("%s: This may take a few minutes if there are many devices\n", c->name); if ((rc = i2o_iop_activate(c))) { osm_err("%s: could not activate controller\n", c->name); - goto class_del; + goto device_del; } osm_debug("%s: building sys table...\n", c->name); if ((rc = i2o_systab_build())) - goto class_del; + goto device_del; osm_debug("%s: online controller...\n", c->name); if ((rc = i2o_iop_online(c))) - goto class_del; + goto device_del; osm_debug("%s: getting LCT...\n", c->name); if ((rc = i2o_exec_lct_get(c))) - goto class_del; + goto device_del; list_add(&c->list, &i2o_controllers); @@ -1163,9 +1152,6 @@ int i2o_iop_add(struct i2o_controller *c) return 0; - class_del: - class_device_unregister(c->classdev); - device_del: device_del(&c->device); @@ -1225,14 +1211,8 @@ static int __init i2o_iop_init(void) printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); - i2o_controller_class = class_create(THIS_MODULE, "i2o_controller"); - if (IS_ERR(i2o_controller_class)) { - osm_err("can't register class i2o_controller\n"); - goto exit; - } - if ((rc = i2o_driver_init())) - goto class_exit; + goto exit; if ((rc = i2o_exec_init())) goto driver_exit; @@ -1248,9 +1228,6 @@ static int __init i2o_iop_init(void) driver_exit: i2o_driver_exit(); - class_exit: - class_destroy(i2o_controller_class); - exit: return rc; } @@ -1265,7 +1242,6 @@ static void __exit i2o_iop_exit(void) i2o_pci_exit(); i2o_exec_exit(); i2o_driver_exit(); - class_destroy(i2o_controller_class); }; module_init(i2o_iop_init); -- cgit v1.2.3 From dcceafe25a5f47cf69e5b46b4da6f15186ec8386 Mon Sep 17 00:00:00 2001 From: Markus Lidel Date: Fri, 6 Jan 2006 00:19:32 -0800 Subject: [PATCH] I2O: Bugfixes - Removed some kmalloc's with __GFP_ZERO and replace it with memset() because it didn't work properly. - Fixed returned message frame in i2o_cfg_passthru() which caused raidutils to display wrong error message in case a disk was missing. - Fixed size of printk() in i2o_scsi.c. - Fixed get_device() and put_device() in probing of the I2O controller. Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/message/i2o/driver.c | 5 +++-- drivers/message/i2o/i2o_config.c | 29 ++++++++++++++--------------- drivers/message/i2o/i2o_scsi.c | 4 ++-- drivers/message/i2o/pci.c | 6 +----- 4 files changed, 20 insertions(+), 24 deletions(-) (limited to 'drivers') diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c index 25292b36e2d9..9c631c873dc6 100644 --- a/drivers/message/i2o/driver.c +++ b/drivers/message/i2o/driver.c @@ -217,14 +217,15 @@ int i2o_driver_dispatch(struct i2o_controller *c, u32 m) /* cut of header from message size (in 32-bit words) */ size = (le32_to_cpu(msg->u.head[0]) >> 16) - 5; - evt = kmalloc(size * 4 + sizeof(*evt), GFP_ATOMIC | __GFP_ZERO); + evt = kmalloc(size * 4 + sizeof(*evt), GFP_ATOMIC); if (!evt) return -ENOMEM; + memset(evt, 0, size * 4 + sizeof(*evt)); evt->size = size; evt->tcntxt = le32_to_cpu(msg->u.s.tcntxt); evt->event_indicator = le32_to_cpu(msg->body[0]); - memcpy(&evt->tcntxt, &msg->u.s.tcntxt, size * 4); + memcpy(&evt->data, &msg->body[1], size * 4); list_for_each_entry_safe(dev, tmp, &c->devices, list) if (dev->lct_data.tid == tid) { diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c index 4fe73d628c5b..286fef3240c4 100644 --- a/drivers/message/i2o/i2o_config.c +++ b/drivers/message/i2o/i2o_config.c @@ -36,12 +36,12 @@ #include -#include "core.h" - #define SG_TABLESIZE 30 -static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, - unsigned long arg); +extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int); + +static int i2o_cfg_ioctl(struct inode *, struct file *, unsigned int, + unsigned long); static spinlock_t i2o_config_lock; @@ -593,9 +593,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, sg_offset = (msg->u.head[0] >> 4) & 0x0f; - msg->u.s.icntxt = cpu_to_le32(i2o_config_driver.context); - msg->u.s.tcntxt = cpu_to_le32(i2o_cntxt_list_add(c, reply)); - memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); if (sg_offset) { struct sg_simple_element *sg; @@ -629,7 +626,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, goto cleanup; } sg_size = sg[i].flag_count & 0xffffff; - p = &(sg_list[sg_index++]); + p = &(sg_list[sg_index]); /* Allocate memory for the transfer */ if (i2o_dma_alloc (&c->pdev->dev, p, sg_size, @@ -640,6 +637,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, rcode = -ENOMEM; goto sg_list_cleanup; } + sg_index++; /* Copy in the user's SG buffer if necessary */ if (sg[i]. flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) { @@ -661,8 +659,10 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, } rcode = i2o_msg_post_wait(c, msg, 60); - if (rcode) + if (rcode) { + reply[4] = ((u32) rcode) << 24; goto sg_list_cleanup; + } if (sg_offset) { u32 msg[I2O_OUTBOUND_MSG_FRAME_SIZE]; @@ -712,6 +712,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, } } + sg_list_cleanup: /* Copy back the reply to user space */ if (reply_size) { // we wrote our own values for context - now restore the user supplied ones @@ -729,7 +730,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, } } - sg_list_cleanup: for (i = 0; i < sg_index; i++) i2o_dma_free(&c->pdev->dev, &sg_list[i]); @@ -827,9 +827,6 @@ static int i2o_cfg_passthru(unsigned long arg) sg_offset = (msg->u.head[0] >> 4) & 0x0f; - msg->u.s.icntxt = cpu_to_le32(i2o_config_driver.context); - msg->u.s.tcntxt = cpu_to_le32(i2o_cntxt_list_add(c, reply)); - memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); if (sg_offset) { struct sg_simple_element *sg; @@ -892,8 +889,10 @@ static int i2o_cfg_passthru(unsigned long arg) } rcode = i2o_msg_post_wait(c, msg, 60); - if (rcode) + if (rcode) { + reply[4] = ((u32) rcode) << 24; goto sg_list_cleanup; + } if (sg_offset) { u32 msg[128]; @@ -943,6 +942,7 @@ static int i2o_cfg_passthru(unsigned long arg) } } + sg_list_cleanup: /* Copy back the reply to user space */ if (reply_size) { // we wrote our own values for context - now restore the user supplied ones @@ -959,7 +959,6 @@ static int i2o_cfg_passthru(unsigned long arg) } } - sg_list_cleanup: for (i = 0; i < sg_index; i++) kfree(sg_list[i]); diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c index 24061dfd46e4..76b9516b1934 100644 --- a/drivers/message/i2o/i2o_scsi.c +++ b/drivers/message/i2o/i2o_scsi.c @@ -309,9 +309,9 @@ static int i2o_scsi_probe(struct device *dev) sysfs_create_link(&i2o_dev->device.kobj, &scsi_dev->sdev_gendev.kobj, "scsi"); - osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %d\n", + osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %ld\n", i2o_dev->lct_data.tid, channel, le32_to_cpu(id), - (unsigned int)le64_to_cpu(lun)); + (long unsigned int)le64_to_cpu(lun)); return 0; }; diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c index 329d482eee81..c5b656cdea7c 100644 --- a/drivers/message/i2o/pci.c +++ b/drivers/message/i2o/pci.c @@ -339,7 +339,7 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev, pci_name(pdev)); c->pdev = pdev; - c->device.parent = get_device(&pdev->dev); + c->device.parent = &pdev->dev; /* Cards that fall apart if you hit them with large I/O loads... */ if (pdev->vendor == PCI_VENDOR_ID_NCR && pdev->device == 0x0630) { @@ -410,8 +410,6 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev, if ((rc = i2o_iop_add(c))) goto uninstall; - get_device(&c->device); - if (i960) pci_write_config_word(i960, 0x42, 0x03ff); @@ -424,7 +422,6 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev, i2o_pci_free(c); free_controller: - put_device(c->device.parent); i2o_iop_free(c); disable: @@ -454,7 +451,6 @@ static void __devexit i2o_pci_remove(struct pci_dev *pdev) printk(KERN_INFO "%s: Controller removed.\n", c->name); - put_device(c->device.parent); put_device(&c->device); }; -- cgit v1.2.3 From 2e1973a3cd0b9fe31469be62df3583bdc5a34f51 Mon Sep 17 00:00:00 2001 From: Markus Lidel Date: Fri, 6 Jan 2006 00:19:32 -0800 Subject: [PATCH] I2O: Beautifying Fix some typos and minor code beautifying. Signed-off-by: Markus Lidel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/message/i2o/bus-osm.c | 2 +- drivers/message/i2o/config-osm.c | 2 +- drivers/message/i2o/core.h | 7 ++----- drivers/message/i2o/device.c | 9 +++++---- drivers/message/i2o/driver.c | 2 +- drivers/message/i2o/i2o_block.c | 8 +++++--- drivers/message/i2o/i2o_proc.c | 2 +- drivers/message/i2o/i2o_scsi.c | 2 +- drivers/message/i2o/iop.c | 12 +++--------- 9 files changed, 20 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/message/i2o/bus-osm.c b/drivers/message/i2o/bus-osm.c index ce039d322fd0..ac06f10c54ec 100644 --- a/drivers/message/i2o/bus-osm.c +++ b/drivers/message/i2o/bus-osm.c @@ -17,7 +17,7 @@ #include #define OSM_NAME "bus-osm" -#define OSM_VERSION "$Rev$" +#define OSM_VERSION "1.317" #define OSM_DESCRIPTION "I2O Bus Adapter OSM" static struct i2o_driver i2o_bus_driver; diff --git a/drivers/message/i2o/config-osm.c b/drivers/message/i2o/config-osm.c index 10432f665201..b613890026be 100644 --- a/drivers/message/i2o/config-osm.c +++ b/drivers/message/i2o/config-osm.c @@ -22,7 +22,7 @@ #include #define OSM_NAME "config-osm" -#define OSM_VERSION "1.248" +#define OSM_VERSION "1.317" #define OSM_DESCRIPTION "I2O Configuration OSM" /* access mode user rw */ diff --git a/drivers/message/i2o/core.h b/drivers/message/i2o/core.h index 9aa9b91170b2..edab686657f3 100644 --- a/drivers/message/i2o/core.h +++ b/drivers/message/i2o/core.h @@ -14,8 +14,6 @@ */ /* Exec-OSM */ -extern struct bus_type i2o_bus_type; - extern struct i2o_driver i2o_exec_driver; extern int i2o_exec_lct_get(struct i2o_controller *); @@ -23,6 +21,8 @@ extern int __init i2o_exec_init(void); extern void __exit i2o_exec_exit(void); /* driver */ +extern struct bus_type i2o_bus_type; + extern int i2o_driver_dispatch(struct i2o_controller *, u32); extern int __init i2o_driver_init(void); @@ -45,9 +45,6 @@ extern void i2o_iop_free(struct i2o_controller *); extern int i2o_iop_add(struct i2o_controller *); extern void i2o_iop_remove(struct i2o_controller *); -/* config */ -extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int); - /* control registers relative to c->base */ #define I2O_IRQ_STATUS 0x30 #define I2O_IRQ_MASK 0x34 diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c index a5e260b7a3b6..773b0a4cfe21 100644 --- a/drivers/message/i2o/device.c +++ b/drivers/message/i2o/device.c @@ -176,6 +176,7 @@ static ssize_t i2o_device_show_tid(struct device *dev, return strlen(buf) + 1; } +/* I2O device attributes */ struct device_attribute i2o_device_attrs[] = { __ATTR(class_id, S_IRUGO, i2o_device_show_class_id, NULL), __ATTR(tid, S_IRUGO, i2o_device_show_tid, NULL), @@ -505,12 +506,12 @@ int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field, * else return specific fields * ibuf contains fieldindexes * - * if oper == I2O_PARAMS_LIST_GET, get from specific rows - * if fieldcount == -1 return all fields + * if oper == I2O_PARAMS_LIST_GET, get from specific rows + * if fieldcount == -1 return all fields * ibuf contains rowcount, keyvalues - * else return specific fields + * else return specific fields * fieldcount is # of fieldindexes - * ibuf contains fieldindexes, rowcount, keyvalues + * ibuf contains fieldindexes, rowcount, keyvalues * * You could also use directly function i2o_issue_params(). */ diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c index 9c631c873dc6..45f4119a75f6 100644 --- a/drivers/message/i2o/driver.c +++ b/drivers/message/i2o/driver.c @@ -64,7 +64,7 @@ static int i2o_bus_match(struct device *dev, struct device_driver *drv) struct bus_type i2o_bus_type = { .name = "i2o", .match = i2o_bus_match, - .dev_attrs = i2o_device_attrs, + .dev_attrs = i2o_device_attrs }; /** diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index ed2df54bf464..3e865b793f2c 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c @@ -59,10 +59,12 @@ #include #include +#include + #include "i2o_block.h" #define OSM_NAME "block-osm" -#define OSM_VERSION "1.287" +#define OSM_VERSION "1.316" #define OSM_DESCRIPTION "I2O Block Device OSM" static struct i2o_driver i2o_block_driver; @@ -845,10 +847,10 @@ static int i2o_block_transfer(struct request *req) * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME */ if (rq_data_dir(req) == READ) { - cmd[0] = 0x28; + cmd[0] = READ_10; scsi_flags = 0x60a0000a; } else { - cmd[0] = 0x2A; + cmd[0] = WRITE_10; scsi_flags = 0xa0a0000a; } diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c index d559a1758363..2a0c42b8cda5 100644 --- a/drivers/message/i2o/i2o_proc.c +++ b/drivers/message/i2o/i2o_proc.c @@ -28,7 +28,7 @@ */ #define OSM_NAME "proc-osm" -#define OSM_VERSION "1.145" +#define OSM_VERSION "1.316" #define OSM_DESCRIPTION "I2O ProcFS OSM" #define I2O_MAX_MODULES 4 diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c index 76b9516b1934..f9e5a23697a1 100644 --- a/drivers/message/i2o/i2o_scsi.c +++ b/drivers/message/i2o/i2o_scsi.c @@ -70,7 +70,7 @@ #include #define OSM_NAME "scsi-osm" -#define OSM_VERSION "1.282" +#define OSM_VERSION "1.316" #define OSM_DESCRIPTION "I2O SCSI Peripheral OSM" static struct i2o_driver i2o_scsi_driver; diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c index 7411a0504bf2..0e465186196c 100644 --- a/drivers/message/i2o/iop.c +++ b/drivers/message/i2o/iop.c @@ -32,7 +32,7 @@ #include "core.h" #define OSM_NAME "i2o" -#define OSM_VERSION "1.288" +#define OSM_VERSION "1.316" #define OSM_DESCRIPTION "I2O subsystem" /* global I2O controller list */ @@ -730,10 +730,6 @@ static int i2o_iop_systab_set(struct i2o_controller *c) * Provide three SGL-elements: * System table (SysTab), Private memory space declaration and * Private i/o space declaration - * - * FIXME: is this still true? - * Nasty one here. We can't use dma_alloc_coherent to send the - * same table to everyone. We have to go remap it for them all */ msg->body[0] = cpu_to_le32(c->unit + 2); @@ -756,8 +752,6 @@ static int i2o_iop_systab_set(struct i2o_controller *c) else osm_debug("%s: SysTab set.\n", c->name); - i2o_status_get(c); // Entered READY state - return rc; } @@ -767,7 +761,7 @@ static int i2o_iop_systab_set(struct i2o_controller *c) * * Send the system table and enable the I2O controller. * - * Returns 0 on success or negativer error code on failure. + * Returns 0 on success or negative error code on failure. */ static int i2o_iop_online(struct i2o_controller *c) { @@ -977,7 +971,7 @@ int i2o_status_get(struct i2o_controller *c) * The HRT contains information about possible hidden devices but is * mostly useless to us. * - * Returns 0 on success or negativer error code on failure. + * Returns 0 on success or negative error code on failure. */ static int i2o_hrt_get(struct i2o_controller *c) { -- cgit v1.2.3 From f6ed39a6e1a88240eec629a3da17c3a47ada3b89 Mon Sep 17 00:00:00 2001 From: Markus Lidel Date: Fri, 6 Jan 2006 00:19:33 -0800 Subject: [PATCH] I2O: Optimizing - make i2o_iop_free() static inline (from Adrian Bunk) - changed kmalloc() + memset(0) into kzalloc() Signed-off-by: Markus Lidel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/message/i2o/config-osm.c | 2 +- drivers/message/i2o/core.h | 11 ++++++++++- drivers/message/i2o/device.c | 4 +--- drivers/message/i2o/driver.c | 7 ++----- drivers/message/i2o/exec-osm.c | 4 +--- drivers/message/i2o/i2o_block.c | 5 ++--- drivers/message/i2o/i2o_config.c | 6 ++---- drivers/message/i2o/iop.c | 18 +++--------------- 8 files changed, 22 insertions(+), 35 deletions(-) (limited to 'drivers') diff --git a/drivers/message/i2o/config-osm.c b/drivers/message/i2o/config-osm.c index b613890026be..3bba7aa82e58 100644 --- a/drivers/message/i2o/config-osm.c +++ b/drivers/message/i2o/config-osm.c @@ -22,7 +22,7 @@ #include #define OSM_NAME "config-osm" -#define OSM_VERSION "1.317" +#define OSM_VERSION "1.323" #define OSM_DESCRIPTION "I2O Configuration OSM" /* access mode user rw */ diff --git a/drivers/message/i2o/core.h b/drivers/message/i2o/core.h index edab686657f3..90628562851e 100644 --- a/drivers/message/i2o/core.h +++ b/drivers/message/i2o/core.h @@ -40,7 +40,16 @@ extern int i2o_device_parse_lct(struct i2o_controller *); /* IOP */ extern struct i2o_controller *i2o_iop_alloc(void); -extern void i2o_iop_free(struct i2o_controller *); + +/** + * i2o_iop_free - Free the i2o_controller struct + * @c: I2O controller to free + */ +static inline void i2o_iop_free(struct i2o_controller *c) +{ + i2o_pool_free(&c->in_msg); + kfree(c); +} extern int i2o_iop_add(struct i2o_controller *); extern void i2o_iop_remove(struct i2o_controller *); diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c index 773b0a4cfe21..34976b26b265 100644 --- a/drivers/message/i2o/device.c +++ b/drivers/message/i2o/device.c @@ -195,12 +195,10 @@ static struct i2o_device *i2o_device_alloc(void) { struct i2o_device *dev; - dev = kmalloc(sizeof(*dev), GFP_KERNEL); + dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return ERR_PTR(-ENOMEM); - memset(dev, 0, sizeof(*dev)); - INIT_LIST_HEAD(&dev->list); init_MUTEX(&dev->lock); diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c index 45f4119a75f6..64130227574f 100644 --- a/drivers/message/i2o/driver.c +++ b/drivers/message/i2o/driver.c @@ -217,10 +217,9 @@ int i2o_driver_dispatch(struct i2o_controller *c, u32 m) /* cut of header from message size (in 32-bit words) */ size = (le32_to_cpu(msg->u.head[0]) >> 16) - 5; - evt = kmalloc(size * 4 + sizeof(*evt), GFP_ATOMIC); + evt = kzalloc(size * 4 + sizeof(*evt), GFP_ATOMIC); if (!evt) return -ENOMEM; - memset(evt, 0, size * 4 + sizeof(*evt)); evt->size = size; evt->tcntxt = le32_to_cpu(msg->u.s.tcntxt); @@ -348,12 +347,10 @@ int __init i2o_driver_init(void) osm_info("max drivers = %d\n", i2o_max_drivers); i2o_drivers = - kmalloc(i2o_max_drivers * sizeof(*i2o_drivers), GFP_KERNEL); + kzalloc(i2o_max_drivers * sizeof(*i2o_drivers), GFP_KERNEL); if (!i2o_drivers) return -ENOMEM; - memset(i2o_drivers, 0, i2o_max_drivers * sizeof(*i2o_drivers)); - rc = bus_register(&i2o_bus_type); if (rc < 0) diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c index d24548f4c033..d418ad35dcd0 100644 --- a/drivers/message/i2o/exec-osm.c +++ b/drivers/message/i2o/exec-osm.c @@ -75,12 +75,10 @@ static struct i2o_exec_wait *i2o_exec_wait_alloc(void) { struct i2o_exec_wait *wait; - wait = kmalloc(sizeof(*wait), GFP_KERNEL); + wait = kzalloc(sizeof(*wait), GFP_KERNEL); if (!wait) return NULL; - memset(wait, 0, sizeof(*wait)); - INIT_LIST_HEAD(&wait->list); return wait; diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index 3e865b793f2c..c5807d6e85cc 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c @@ -64,7 +64,7 @@ #include "i2o_block.h" #define OSM_NAME "block-osm" -#define OSM_VERSION "1.316" +#define OSM_VERSION "1.325" #define OSM_DESCRIPTION "I2O Block Device OSM" static struct i2o_driver i2o_block_driver; @@ -981,13 +981,12 @@ static struct i2o_block_device *i2o_block_device_alloc(void) struct request_queue *queue; int rc; - dev = kmalloc(sizeof(*dev), GFP_KERNEL); + dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { osm_err("Insufficient memory to allocate I2O Block disk.\n"); rc = -ENOMEM; goto exit; } - memset(dev, 0, sizeof(*dev)); INIT_LIST_HEAD(&dev->open_queue); spin_lock_init(&dev->lock); diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c index 286fef3240c4..89daf67b764d 100644 --- a/drivers/message/i2o/i2o_config.c +++ b/drivers/message/i2o/i2o_config.c @@ -583,13 +583,12 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, reply_size >>= 16; reply_size <<= 2; - reply = kmalloc(reply_size, GFP_KERNEL); + reply = kzalloc(reply_size, GFP_KERNEL); if (!reply) { printk(KERN_WARNING "%s: Could not allocate reply buffer\n", c->name); return -ENOMEM; } - memset(reply, 0, reply_size); sg_offset = (msg->u.head[0] >> 4) & 0x0f; @@ -817,13 +816,12 @@ static int i2o_cfg_passthru(unsigned long arg) reply_size >>= 16; reply_size <<= 2; - reply = kmalloc(reply_size, GFP_KERNEL); + reply = kzalloc(reply_size, GFP_KERNEL); if (!reply) { printk(KERN_WARNING "%s: Could not allocate reply buffer\n", c->name); return -ENOMEM; } - memset(reply, 0, reply_size); sg_offset = (msg->u.head[0] >> 4) & 0x0f; diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c index 0e465186196c..492167446936 100644 --- a/drivers/message/i2o/iop.c +++ b/drivers/message/i2o/iop.c @@ -32,7 +32,7 @@ #include "core.h" #define OSM_NAME "i2o" -#define OSM_VERSION "1.316" +#define OSM_VERSION "1.325" #define OSM_DESCRIPTION "I2O subsystem" /* global I2O controller list */ @@ -838,12 +838,11 @@ static int i2o_systab_build(void) i2o_systab.len = sizeof(struct i2o_sys_tbl) + num_controllers * sizeof(struct i2o_sys_tbl_entry); - systab = i2o_systab.virt = kmalloc(i2o_systab.len, GFP_KERNEL); + systab = i2o_systab.virt = kzalloc(i2o_systab.len, GFP_KERNEL); if (!systab) { osm_err("unable to allocate memory for System Table\n"); return -ENOMEM; } - memset(systab, 0, i2o_systab.len); systab->version = I2OVERSION; systab->change_ind = change_ind + 1; @@ -1019,16 +1018,6 @@ static int i2o_hrt_get(struct i2o_controller *c) return -EBUSY; } -/** - * i2o_iop_free - Free the i2o_controller struct - * @c: I2O controller to free - */ -void i2o_iop_free(struct i2o_controller *c) -{ - i2o_pool_free(&c->in_msg); - kfree(c); -}; - /** * i2o_iop_release - release the memory for a I2O controller * @dev: I2O controller which should be released @@ -1058,13 +1047,12 @@ struct i2o_controller *i2o_iop_alloc(void) struct i2o_controller *c; char poolname[32]; - c = kmalloc(sizeof(*c), GFP_KERNEL); + c = kzalloc(sizeof(*c), GFP_KERNEL); if (!c) { osm_err("i2o: Insufficient memory to allocate a I2O controller." "\n"); return ERR_PTR(-ENOMEM); } - memset(c, 0, sizeof(*c)); c->unit = unit++; sprintf(c->name, "iop%d", c->unit); -- cgit v1.2.3 From 524e3b623a9228efbdb70484b5214f27a1ca985d Mon Sep 17 00:00:00 2001 From: Markus Lidel Date: Fri, 6 Jan 2006 00:19:34 -0800 Subject: [PATCH] I2O: Lindent run Signed-off-by: Markus Lidel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/message/i2o/device.c | 21 +++++++++------------ drivers/message/i2o/exec-osm.c | 2 +- drivers/message/i2o/i2o_lan.h | 38 +++++++++++++++++++------------------- 3 files changed, 29 insertions(+), 32 deletions(-) (limited to 'drivers') diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c index 34976b26b265..ee183053fa23 100644 --- a/drivers/message/i2o/device.c +++ b/drivers/message/i2o/device.c @@ -43,7 +43,7 @@ static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd, msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); msg->u.head[1] = - cpu_to_le32(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid); + cpu_to_le32(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid); msg->body[0] = cpu_to_le32(type); return i2o_msg_post_wait(dev->iop, msg, 60); @@ -123,7 +123,6 @@ int i2o_device_claim_release(struct i2o_device *dev) return rc; } - /** * i2o_device_release - release the memory for a I2O device * @dev: I2O device which should be released @@ -140,7 +139,6 @@ static void i2o_device_release(struct device *dev) kfree(i2o_dev); } - /** * i2o_device_show_class_id - Displays class id of I2O device * @dev: device of which the class id should be displayed @@ -250,10 +248,10 @@ static struct i2o_device *i2o_device_add(struct i2o_controller *c, /* create user entries refering to this device */ list_for_each_entry(tmp, &c->devices, list) - if ((tmp->lct_data.user_tid == i2o_dev->lct_data.tid) - && (tmp != i2o_dev)) - sysfs_create_link(&tmp->device.kobj, - &i2o_dev->device.kobj, "user"); + if ((tmp->lct_data.user_tid == i2o_dev->lct_data.tid) + && (tmp != i2o_dev)) + sysfs_create_link(&tmp->device.kobj, + &i2o_dev->device.kobj, "user"); /* create parent entries for this device */ tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid); @@ -263,10 +261,10 @@ static struct i2o_device *i2o_device_add(struct i2o_controller *c, /* create parent entries refering to this device */ list_for_each_entry(tmp, &c->devices, list) - if ((tmp->lct_data.parent_tid == i2o_dev->lct_data.tid) - && (tmp != i2o_dev)) - sysfs_create_link(&tmp->device.kobj, - &i2o_dev->device.kobj, "parent"); + if ((tmp->lct_data.parent_tid == i2o_dev->lct_data.tid) + && (tmp != i2o_dev)) + sysfs_create_link(&tmp->device.kobj, + &i2o_dev->device.kobj, "parent"); i2o_driver_notify_device_add_all(i2o_dev); @@ -410,7 +408,6 @@ int i2o_device_parse_lct(struct i2o_controller *c) return 0; } - /* * Run time support routines */ diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c index d418ad35dcd0..9bb9859f6dfe 100644 --- a/drivers/message/i2o/exec-osm.c +++ b/drivers/message/i2o/exec-osm.c @@ -33,7 +33,7 @@ #include #include #include -#include /* wait_event_interruptible_timeout() needs this */ +#include /* wait_event_interruptible_timeout() needs this */ #include /* HZ */ #include "core.h" diff --git a/drivers/message/i2o/i2o_lan.h b/drivers/message/i2o/i2o_lan.h index 561d63304d7e..6502b817df58 100644 --- a/drivers/message/i2o/i2o_lan.h +++ b/drivers/message/i2o/i2o_lan.h @@ -103,14 +103,14 @@ #define I2O_LAN_DSC_SUSPENDED 0x11 struct i2o_packet_info { - u32 offset : 24; - u32 flags : 8; - u32 len : 24; - u32 status : 8; + u32 offset:24; + u32 flags:8; + u32 len:24; + u32 status:8; }; struct i2o_bucket_descriptor { - u32 context; /* FIXME: 64bit support */ + u32 context; /* FIXME: 64bit support */ struct i2o_packet_info packet_info[1]; }; @@ -127,14 +127,14 @@ struct i2o_lan_local { u8 unit; struct i2o_device *i2o_dev; - struct fddi_statistics stats; /* see also struct net_device_stats */ - unsigned short (*type_trans)(struct sk_buff *, struct net_device *); - atomic_t buckets_out; /* nbr of unused buckets on DDM */ - atomic_t tx_out; /* outstanding TXes */ - u8 tx_count; /* packets in one TX message frame */ - u16 tx_max_out; /* DDM's Tx queue len */ - u8 sgl_max; /* max SGLs in one message frame */ - u32 m; /* IOP address of the batch msg frame */ + struct fddi_statistics stats; /* see also struct net_device_stats */ + unsigned short (*type_trans) (struct sk_buff *, struct net_device *); + atomic_t buckets_out; /* nbr of unused buckets on DDM */ + atomic_t tx_out; /* outstanding TXes */ + u8 tx_count; /* packets in one TX message frame */ + u16 tx_max_out; /* DDM's Tx queue len */ + u8 sgl_max; /* max SGLs in one message frame */ + u32 m; /* IOP address of the batch msg frame */ struct work_struct i2o_batch_send_task; int send_active; @@ -144,16 +144,16 @@ struct i2o_lan_local { spinlock_t tx_lock; - u32 max_size_mc_table; /* max number of multicast addresses */ + u32 max_size_mc_table; /* max number of multicast addresses */ /* LAN OSM configurable parameters are here: */ - u16 max_buckets_out; /* max nbr of buckets to send to DDM */ - u16 bucket_thresh; /* send more when this many used */ + u16 max_buckets_out; /* max nbr of buckets to send to DDM */ + u16 bucket_thresh; /* send more when this many used */ u16 rx_copybreak; - u8 tx_batch_mode; /* Set when using batch mode sends */ - u32 i2o_event_mask; /* To turn on interesting event flags */ + u8 tx_batch_mode; /* Set when using batch mode sends */ + u32 i2o_event_mask; /* To turn on interesting event flags */ }; -#endif /* _I2O_LAN_H */ +#endif /* _I2O_LAN_H */ -- cgit v1.2.3 From c660629059abbbd0eb56e12f9bb4494f01800bbc Mon Sep 17 00:00:00 2001 From: Marko Kohtala Date: Fri, 6 Jan 2006 00:19:43 -0800 Subject: [PATCH] parport: buffer overflow fix Fix potential buffer overflow in case the device ID did not end in semicolon. Also might fail to negotiate back to IEEE1284_MODE_COMPAT in case of failure. parport_device_id did not return what Documentation/parport-lowlevel.txt said, so I changed it to match it. Determining device ID length is overly complicated, but Tim Waugh recalled on linux-parport seeing some buggy device that might need it. Signed-off-by: Marko Kohtala Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/parport/probe.c | 193 ++++++++++++++++++++++++++++++++---------------- 1 file changed, 130 insertions(+), 63 deletions(-) (limited to 'drivers') diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c index 4b48b31ec235..5c29e8222211 100644 --- a/drivers/parport/probe.c +++ b/drivers/parport/probe.c @@ -128,8 +128,131 @@ static void parse_data(struct parport *port, int device, char *str) kfree(txt); } +/* Read up to count-1 bytes of device id. Terminate buffer with + * '\0'. Buffer begins with two Device ID length bytes as given by + * device. */ +static ssize_t parport_read_device_id (struct parport *port, char *buffer, + size_t count) +{ + unsigned char length[2]; + unsigned lelen, belen; + size_t idlens[4]; + unsigned numidlens; + unsigned current_idlen; + ssize_t retval; + size_t len; + + /* First two bytes are MSB,LSB of inclusive length. */ + retval = parport_read (port, length, 2); + + if (retval < 0) + return retval; + if (retval != 2) + return -EIO; + + if (count < 2) + return 0; + memcpy(buffer, length, 2); + len = 2; + + /* Some devices wrongly send LE length, and some send it two + * bytes short. Construct a sorted array of lengths to try. */ + belen = (length[0] << 8) + length[1]; + lelen = (length[1] << 8) + length[0]; + idlens[0] = min(belen, lelen); + idlens[1] = idlens[0]+2; + if (belen != lelen) { + int off = 2; + /* Don't try lenghts of 0x100 and 0x200 as 1 and 2 */ + if (idlens[0] <= 2) + off = 0; + idlens[off] = max(belen, lelen); + idlens[off+1] = idlens[off]+2; + numidlens = off+2; + } + else { + /* Some devices don't truly implement Device ID, but + * just return constant nibble forever. This catches + * also those cases. */ + if (idlens[0] == 0 || idlens[0] > 0xFFF) { + printk (KERN_DEBUG "%s: reported broken Device ID" + " length of %#zX bytes\n", + port->name, idlens[0]); + return -EIO; + } + numidlens = 2; + } + + /* Try to respect the given ID length despite all the bugs in + * the ID length. Read according to shortest possible ID + * first. */ + for (current_idlen = 0; current_idlen < numidlens; ++current_idlen) { + size_t idlen = idlens[current_idlen]; + if (idlen+1 >= count) + break; + + retval = parport_read (port, buffer+len, idlen-len); + + if (retval < 0) + return retval; + len += retval; + + if (port->physport->ieee1284.phase != IEEE1284_PH_HBUSY_DAVAIL) { + if (belen != len) { + printk (KERN_DEBUG "%s: Device ID was %d bytes" + " while device told it would be %d" + " bytes\n", + port->name, len, belen); + } + goto done; + } + + /* This might end reading the Device ID too + * soon. Hopefully the needed fields were already in + * the first 256 bytes or so that we must have read so + * far. */ + if (buffer[len-1] == ';') { + printk (KERN_DEBUG "%s: Device ID reading stopped" + " before device told data not available. " + "Current idlen %d of %d, len bytes %02X %02X\n", + port->name, current_idlen, numidlens, + length[0], length[1]); + goto done; + } + } + if (current_idlen < numidlens) { + /* Buffer not large enough, read to end of buffer. */ + size_t idlen, len2; + if (len+1 < count) { + retval = parport_read (port, buffer+len, count-len-1); + if (retval < 0) + return retval; + len += retval; + } + /* Read the whole ID since some devices would not + * otherwise give back the Device ID from beginning + * next time when asked. */ + idlen = idlens[current_idlen]; + len2 = len; + while(len2 < idlen && retval > 0) { + char tmp[4]; + retval = parport_read (port, tmp, + min(sizeof tmp, idlen-len2)); + if (retval < 0) + return retval; + len2 += retval; + } + } + /* In addition, there are broken devices out there that don't + even finish off with a semi-colon. We do not need to care + about those at this time. */ + done: + buffer[len] = '\0'; + return len; +} + /* Get Std 1284 Device ID. */ -ssize_t parport_device_id (int devnum, char *buffer, size_t len) +ssize_t parport_device_id (int devnum, char *buffer, size_t count) { ssize_t retval = -ENXIO; struct pardevice *dev = parport_open (devnum, "Device ID probe", @@ -139,76 +262,20 @@ ssize_t parport_device_id (int devnum, char *buffer, size_t len) parport_claim_or_block (dev); - /* Negotiate to compatibility mode, and then to device ID mode. - * (This is in case we are already in device ID mode.) */ + /* Negotiate to compatibility mode, and then to device ID + * mode. (This so that we start form beginning of device ID if + * already in device ID mode.) */ parport_negotiate (dev->port, IEEE1284_MODE_COMPAT); retval = parport_negotiate (dev->port, IEEE1284_MODE_NIBBLE | IEEE1284_DEVICEID); if (!retval) { - int idlen; - unsigned char length[2]; - - /* First two bytes are MSB,LSB of inclusive length. */ - retval = parport_read (dev->port, length, 2); - - if (retval != 2) goto end_id; - - idlen = (length[0] << 8) + length[1] - 2; - /* - * Check if the caller-allocated buffer is large enough - * otherwise bail out or there will be an at least off by one. - */ - if (idlen + 1 < len) - len = idlen; - else { - retval = -EINVAL; - goto out; - } - retval = parport_read (dev->port, buffer, len); - - if (retval != len) - printk (KERN_DEBUG "%s: only read %Zd of %Zd ID bytes\n", - dev->port->name, retval, - len); - - /* Some printer manufacturers mistakenly believe that - the length field is supposed to be _exclusive_. - In addition, there are broken devices out there - that don't even finish off with a semi-colon. */ - if (buffer[len - 1] != ';') { - ssize_t diff; - diff = parport_read (dev->port, buffer + len, 2); - retval += diff; - - if (diff) - printk (KERN_DEBUG - "%s: device reported incorrect " - "length field (%d, should be %Zd)\n", - dev->port->name, idlen, retval); - else { - /* One semi-colon short of a device ID. */ - buffer[len++] = ';'; - printk (KERN_DEBUG "%s: faking semi-colon\n", - dev->port->name); - - /* If we get here, I don't think we - need to worry about the possible - standard violation of having read - more than we were told to. The - device is non-compliant anyhow. */ - } - } - - end_id: - buffer[len] = '\0'; + retval = parport_read_device_id (dev->port, buffer, count); parport_negotiate (dev->port, IEEE1284_MODE_COMPAT); + if (retval > 2) + parse_data (dev->port, dev->daisy, buffer+2); } - if (retval > 2) - parse_data (dev->port, dev->daisy, buffer); - -out: parport_release (dev); parport_close (dev); return retval; -- cgit v1.2.3 From 742ec650e9b63ea61891455bb6f76bac37025c78 Mon Sep 17 00:00:00 2001 From: Marko Kohtala Date: Fri, 6 Jan 2006 00:19:44 -0800 Subject: [PATCH] parport: phase fixes Did not move the parport interface properly into IEEE1284_PH_REV_IDLE phase at end of data due to comparing bytes with nibbles. Internal phase IEEE1284_PH_HBUSY_DNA became unused, so remove it. Signed-off-by: Marko Kohtala Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/media/video/cpia_pp.c | 30 +++++++++----------- drivers/parport/ieee1284_ops.c | 62 ++++++++++++++++++++---------------------- 2 files changed, 42 insertions(+), 50 deletions(-) (limited to 'drivers') diff --git a/drivers/media/video/cpia_pp.c b/drivers/media/video/cpia_pp.c index ddf184f95d80..6861d408f1b3 100644 --- a/drivers/media/video/cpia_pp.c +++ b/drivers/media/video/cpia_pp.c @@ -170,16 +170,9 @@ static size_t cpia_read_nibble (struct parport *port, /* Does the error line indicate end of data? */ if (((i /*& 1*/) == 0) && (parport_read_status(port) & PARPORT_STATUS_ERROR)) { - port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; - DBG("%s: No more nibble data (%d bytes)\n", - port->name, i/2); - - /* Go to reverse idle phase. */ - parport_frob_control (port, - PARPORT_CONTROL_AUTOFD, - PARPORT_CONTROL_AUTOFD); - port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE; - break; + DBG("%s: No more nibble data (%d bytes)\n", + port->name, i/2); + goto end_of_data; } /* Event 7: Set nAutoFd low. */ @@ -227,18 +220,21 @@ static size_t cpia_read_nibble (struct parport *port, byte = nibble; } - i /= 2; /* i is now in bytes */ - if (i == len) { /* Read the last nibble without checking data avail. */ - port = port->physport; - if (parport_read_status (port) & PARPORT_STATUS_ERROR) - port->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; + if (parport_read_status (port) & PARPORT_STATUS_ERROR) { + end_of_data: + /* Go to reverse idle phase. */ + parport_frob_control (port, + PARPORT_CONTROL_AUTOFD, + PARPORT_CONTROL_AUTOFD); + port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE; + } else - port->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL; + port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL; } - return i; + return i/2; } /* CPiA nonstandard "Nibble Stream" mode (2 nibbles per cycle, instead of 1) diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c index ce1e2aad8b10..d6c77658231e 100644 --- a/drivers/parport/ieee1284_ops.c +++ b/drivers/parport/ieee1284_ops.c @@ -165,17 +165,7 @@ size_t parport_ieee1284_read_nibble (struct parport *port, /* Does the error line indicate end of data? */ if (((i & 1) == 0) && (parport_read_status(port) & PARPORT_STATUS_ERROR)) { - port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; - DPRINTK (KERN_DEBUG - "%s: No more nibble data (%d bytes)\n", - port->name, i/2); - - /* Go to reverse idle phase. */ - parport_frob_control (port, - PARPORT_CONTROL_AUTOFD, - PARPORT_CONTROL_AUTOFD); - port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE; - break; + goto end_of_data; } /* Event 7: Set nAutoFd low. */ @@ -225,18 +215,25 @@ size_t parport_ieee1284_read_nibble (struct parport *port, byte = nibble; } - i /= 2; /* i is now in bytes */ - if (i == len) { /* Read the last nibble without checking data avail. */ - port = port->physport; - if (parport_read_status (port) & PARPORT_STATUS_ERROR) - port->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; + if (parport_read_status (port) & PARPORT_STATUS_ERROR) { + end_of_data: + DPRINTK (KERN_DEBUG + "%s: No more nibble data (%d bytes)\n", + port->name, i/2); + + /* Go to reverse idle phase. */ + parport_frob_control (port, + PARPORT_CONTROL_AUTOFD, + PARPORT_CONTROL_AUTOFD); + port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE; + } else - port->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL; + port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL; } - return i; + return i/2; #endif /* IEEE1284 support */ } @@ -256,17 +253,7 @@ size_t parport_ieee1284_read_byte (struct parport *port, /* Data available? */ if (parport_read_status (port) & PARPORT_STATUS_ERROR) { - port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; - DPRINTK (KERN_DEBUG - "%s: No more byte data (%Zd bytes)\n", - port->name, count); - - /* Go to reverse idle phase. */ - parport_frob_control (port, - PARPORT_CONTROL_AUTOFD, - PARPORT_CONTROL_AUTOFD); - port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE; - break; + goto end_of_data; } /* Event 14: Place data bus in high impedance state. */ @@ -318,11 +305,20 @@ size_t parport_ieee1284_read_byte (struct parport *port, if (count == len) { /* Read the last byte without checking data avail. */ - port = port->physport; - if (parport_read_status (port) & PARPORT_STATUS_ERROR) - port->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; + if (parport_read_status (port) & PARPORT_STATUS_ERROR) { + end_of_data: + DPRINTK (KERN_DEBUG + "%s: No more byte data (%Zd bytes)\n", + port->name, count); + + /* Go to reverse idle phase. */ + parport_frob_control (port, + PARPORT_CONTROL_AUTOFD, + PARPORT_CONTROL_AUTOFD); + port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE; + } else - port->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL; + port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL; } return count; -- cgit v1.2.3 From 310c8c324f988625a2880deab67607bf4e5aeb8a Mon Sep 17 00:00:00 2001 From: Marko Kohtala Date: Fri, 6 Jan 2006 00:19:45 -0800 Subject: [PATCH] parport: daisy chain end detection fix Daisy chain end detection failed at least with older daisy chain devices that do not implement the last device signal. Signed-off-by: Marko Kohtala Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/parport/daisy.c | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c index 075c7eb5c85d..6915114b9536 100644 --- a/drivers/parport/daisy.c +++ b/drivers/parport/daisy.c @@ -436,7 +436,7 @@ static int select_port (struct parport *port) static int assign_addrs (struct parport *port) { - unsigned char s, last_dev; + unsigned char s; unsigned char daisy; int thisdev = numdevs; int detected; @@ -472,10 +472,13 @@ static int assign_addrs (struct parport *port) } parport_write_data (port, 0x78); udelay (2); - last_dev = 0; /* We've just been speaking to a device, so we - know there must be at least _one_ out there. */ + s = parport_read_status (port); - for (daisy = 0; daisy < 4; daisy++) { + for (daisy = 0; + (s & (PARPORT_STATUS_PAPEROUT|PARPORT_STATUS_SELECT)) + == (PARPORT_STATUS_PAPEROUT|PARPORT_STATUS_SELECT) + && daisy < 4; + ++daisy) { parport_write_data (port, daisy); udelay (2); parport_frob_control (port, @@ -485,14 +488,18 @@ static int assign_addrs (struct parport *port) parport_frob_control (port, PARPORT_CONTROL_STROBE, 0); udelay (1); - if (last_dev) - /* No more devices. */ - break; + add_dev (numdevs++, port, daisy); - last_dev = !(parport_read_status (port) - & PARPORT_STATUS_BUSY); + /* See if this device thought it was the last in the + * chain. */ + if (!(s & PARPORT_STATUS_BUSY)) + break; - add_dev (numdevs++, port, daisy); + /* We are seeing pass through status now. We see + last_dev from next device or if last_dev does not + work status lines from some non-daisy chain + device. */ + s = parport_read_status (port); } parport_write_data (port, 0xff); udelay (2); -- cgit v1.2.3 From c29a75ed0d94fae64b59345ea96e52424ae9c6a2 Mon Sep 17 00:00:00 2001 From: Marko Kohtala Date: Fri, 6 Jan 2006 00:19:46 -0800 Subject: [PATCH] parport: daisy chain device id reading fix Device ID reading from daisy chain devices failed because the daisy device could not be opened. Signed-off-by: Marko Kohtala Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/parport/daisy.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c index 6915114b9536..37dc17933518 100644 --- a/drivers/parport/daisy.c +++ b/drivers/parport/daisy.c @@ -252,7 +252,7 @@ struct pardevice *parport_open (int devnum, const char *name, selected = port->daisy; parport_release (dev); - if (selected != port->daisy) { + if (selected != daisy) { /* No corresponding device. */ parport_unregister_device (dev); return NULL; -- cgit v1.2.3 From 7c9cc3be1094b267a2da2e0016cbd6ced663da6d Mon Sep 17 00:00:00 2001 From: Marko Kohtala Date: Fri, 6 Jan 2006 00:19:46 -0800 Subject: [PATCH] parport: parport_daisy_select return value fix parport_daisy_select returned wrong status that is read at wrong time during daisy command execution. Signed-off-by: Marko Kohtala Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/parport/daisy.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c index 37dc17933518..9109a40fc8c0 100644 --- a/drivers/parport/daisy.c +++ b/drivers/parport/daisy.c @@ -344,9 +344,9 @@ static int cpp_daisy (struct parport *port, int cmd) PARPORT_CONTROL_STROBE, PARPORT_CONTROL_STROBE); udelay (1); + s = parport_read_status (port); parport_frob_control (port, PARPORT_CONTROL_STROBE, 0); udelay (1); - s = parport_read_status (port); parport_write_data (port, 0xff); udelay (2); return s; @@ -395,15 +395,15 @@ int parport_daisy_select (struct parport *port, int daisy, int mode) case IEEE1284_MODE_EPP: case IEEE1284_MODE_EPPSL: case IEEE1284_MODE_EPPSWE: - return (cpp_daisy (port, 0x20 + daisy) & - PARPORT_STATUS_ERROR); + return !(cpp_daisy (port, 0x20 + daisy) & + PARPORT_STATUS_ERROR); // For these modes we should switch to ECP mode: case IEEE1284_MODE_ECP: case IEEE1284_MODE_ECPRLE: case IEEE1284_MODE_ECPSWE: - return (cpp_daisy (port, 0xd0 + daisy) & - PARPORT_STATUS_ERROR); + return !(cpp_daisy (port, 0xd0 + daisy) & + PARPORT_STATUS_ERROR); // Nothing was told for BECP in Daisy chain specification. // May be it's wise to use ECP? @@ -413,8 +413,8 @@ int parport_daisy_select (struct parport *port, int daisy, int mode) case IEEE1284_MODE_BYTE: case IEEE1284_MODE_COMPAT: default: - return (cpp_daisy (port, 0xe0 + daisy) & - PARPORT_STATUS_ERROR); + return !(cpp_daisy (port, 0xe0 + daisy) & + PARPORT_STATUS_ERROR); } } -- cgit v1.2.3 From b44d3bdd6fcf6233b381bf5bd0893ed235f497a9 Mon Sep 17 00:00:00 2001 From: Marko Kohtala Date: Fri, 6 Jan 2006 00:19:47 -0800 Subject: [PATCH] parport: use complete slab buffer Use the complete slab buffer that is allocated by kmalloc. Signed-off-by: Marko Kohtala Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/parport/daisy.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c index 9109a40fc8c0..9ee67321b630 100644 --- a/drivers/parport/daisy.c +++ b/drivers/parport/daisy.c @@ -144,9 +144,9 @@ again: add_dev (numdevs++, port, -1); /* Find out the legacy device's IEEE 1284 device ID. */ - deviceid = kmalloc (1000, GFP_KERNEL); + deviceid = kmalloc (1024, GFP_KERNEL); if (deviceid) { - if (parport_device_id (numdevs - 1, deviceid, 1000) > 2) + if (parport_device_id (numdevs - 1, deviceid, 1024) > 2) detected++; kfree (deviceid); @@ -508,11 +508,11 @@ static int assign_addrs (struct parport *port) detected); /* Ask the new devices to introduce themselves. */ - deviceid = kmalloc (1000, GFP_KERNEL); + deviceid = kmalloc (1024, GFP_KERNEL); if (!deviceid) return 0; for (daisy = 0; thisdev < numdevs; thisdev++, daisy++) - parport_device_id (thisdev, deviceid, 1000); + parport_device_id (thisdev, deviceid, 1024); kfree (deviceid); return detected; -- cgit v1.2.3 From a6767b7cc674ee39635db75ed2f6f65ed0012239 Mon Sep 17 00:00:00 2001 From: Marko Kohtala Date: Fri, 6 Jan 2006 00:19:48 -0800 Subject: [PATCH] parport: constification Trivial "const" additions to places in parport that truly are const. Signed-off-by: Marko Kohtala Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/parport/parport_pc.c | 30 ++++++++++++++++++------------ drivers/parport/probe.c | 6 +++--- 2 files changed, 21 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index c6493ad7c0c8..18e85ccdae67 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -1169,7 +1169,7 @@ dump_parport_state ("fwd idle", port); /* GCC is not inlining extern inline function later overwriten to non-inline, so we use outlined_ variants here. */ -static struct parport_operations parport_pc_ops = +static const struct parport_operations parport_pc_ops = { .write_data = parport_pc_write_data, .read_data = parport_pc_read_data, @@ -1211,10 +1211,11 @@ static struct parport_operations parport_pc_ops = static void __devinit show_parconfig_smsc37c669(int io, int key) { int cr1,cr4,cra,cr23,cr26,cr27,i=0; - static const char *modes[]={ "SPP and Bidirectional (PS/2)", - "EPP and SPP", - "ECP", - "ECP and EPP" }; + static const char *const modes[]={ + "SPP and Bidirectional (PS/2)", + "EPP and SPP", + "ECP", + "ECP and EPP" }; outb(key,io); outb(key,io); @@ -1288,7 +1289,7 @@ static void __devinit show_parconfig_smsc37c669(int io, int key) static void __devinit show_parconfig_winbond(int io, int key) { int cr30,cr60,cr61,cr70,cr74,crf0,i=0; - static const char *modes[] = { + static const char *const modes[] = { "Standard (SPP) and Bidirectional(PS/2)", /* 0 */ "EPP-1.9 and SPP", "ECP", @@ -1297,7 +1298,9 @@ static void __devinit show_parconfig_winbond(int io, int key) "EPP-1.7 and SPP", /* 5 */ "undefined!", "ECP and EPP-1.7" }; - static char *irqtypes[] = { "pulsed low, high-Z", "follows nACK" }; + static char *const irqtypes[] = { + "pulsed low, high-Z", + "follows nACK" }; /* The registers are called compatible-PnP because the register layout is modelled after ISA-PnP, the access @@ -2396,7 +2399,8 @@ EXPORT_SYMBOL (parport_pc_unregister_port); /* ITE support maintained by Rich Liu */ static int __devinit sio_ite_8872_probe (struct pci_dev *pdev, int autoirq, - int autodma, struct parport_pc_via_data *via) + int autodma, + const struct parport_pc_via_data *via) { short inta_addr[6] = { 0x2A0, 0x2C0, 0x220, 0x240, 0x1E0 }; struct resource *base_res; @@ -2524,7 +2528,8 @@ static struct parport_pc_via_data via_8231_data __devinitdata = { }; static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq, - int autodma, struct parport_pc_via_data *via) + int autodma, + const struct parport_pc_via_data *via) { u8 tmp, tmp2, siofunc; u8 ppcontrol = 0; @@ -2694,8 +2699,9 @@ enum parport_pc_sio_types { /* each element directly indexed from enum list, above */ static struct parport_pc_superio { - int (*probe) (struct pci_dev *pdev, int autoirq, int autodma, struct parport_pc_via_data *via); - struct parport_pc_via_data *via; + int (*probe) (struct pci_dev *pdev, int autoirq, int autodma, + const struct parport_pc_via_data *via); + const struct parport_pc_via_data *via; } parport_pc_superio_info[] __devinitdata = { { sio_via_probe, &via_686a_data, }, { sio_via_probe, &via_8231_data, }, @@ -2828,7 +2834,7 @@ static struct parport_pc_pci { /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } }, /* untested */ }; -static struct pci_device_id parport_pc_pci_tbl[] = { +static const struct pci_device_id parport_pc_pci_tbl[] = { /* Super-IO onboard chips */ { 0x1106, 0x0686, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sio_via_686a }, { 0x1106, 0x8231, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sio_via_8231 }, diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c index 5c29e8222211..b62aee8de3cb 100644 --- a/drivers/parport/probe.c +++ b/drivers/parport/probe.c @@ -11,9 +11,9 @@ #include #include -static struct { - char *token; - char *descr; +static const struct { + const char *token; + const char *descr; } classes[] = { { "", "Legacy device" }, { "PRINTER", "Printer" }, -- cgit v1.2.3 From 6a19b41b35bf45fc27a46dccf26005b3f44c1aa1 Mon Sep 17 00:00:00 2001 From: Marko Kohtala Date: Fri, 6 Jan 2006 00:19:49 -0800 Subject: [PATCH] parport: Kconfig dependency fixes Make drivers that use directly PC parport HW depend on PARPORT_PC rather than HW independent PARPORT. Signed-off-by: Marko Kohtala Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/block/Kconfig | 2 +- drivers/block/paride/Kconfig | 5 +++-- drivers/scsi/Kconfig | 8 ++++---- 3 files changed, 8 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index c4b9d2adfc08..139cbba76180 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -117,7 +117,7 @@ config BLK_DEV_XD config PARIDE tristate "Parallel port IDE device support" - depends on PARPORT + depends on PARPORT_PC ---help--- There are many external CD-ROM and disk devices that connect through your computer's parallel port. Most of them are actually IDE devices diff --git a/drivers/block/paride/Kconfig b/drivers/block/paride/Kconfig index 17ff40561257..c0d2854dd097 100644 --- a/drivers/block/paride/Kconfig +++ b/drivers/block/paride/Kconfig @@ -4,11 +4,12 @@ # PARIDE doesn't need PARPORT, but if PARPORT is configured as a module, # PARIDE must also be a module. The bogus CONFIG_PARIDE_PARPORT option # controls the choices given to the user ... +# PARIDE only supports PC style parports. Tough for USB or other parports... config PARIDE_PARPORT tristate depends on PARIDE!=n - default m if PARPORT=m - default y if PARPORT!=m + default m if PARPORT_PC=m + default y if PARPORT_PC!=m comment "Parallel IDE high-level drivers" depends on PARIDE diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 9e8254f0256c..3c606cf8c8ca 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -914,7 +914,7 @@ config SCSI_INIA100 config SCSI_PPA tristate "IOMEGA parallel port (ppa - older drives)" - depends on SCSI && PARPORT + depends on SCSI && PARPORT_PC ---help--- This driver supports older versions of IOMEGA's parallel port ZIP drive (a 100 MB removable media device). @@ -941,7 +941,7 @@ config SCSI_PPA config SCSI_IMM tristate "IOMEGA parallel port (imm - newer drives)" - depends on SCSI && PARPORT + depends on SCSI && PARPORT_PC ---help--- This driver supports newer versions of IOMEGA's parallel port ZIP drive (a 100 MB removable media device). @@ -968,7 +968,7 @@ config SCSI_IMM config SCSI_IZIP_EPP16 bool "ppa/imm option - Use slow (but safe) EPP-16" - depends on PARPORT && (SCSI_PPA || SCSI_IMM) + depends on SCSI_PPA || SCSI_IMM ---help--- EPP (Enhanced Parallel Port) is a standard for parallel ports which allows them to act as expansion buses that can handle up to 64 @@ -983,7 +983,7 @@ config SCSI_IZIP_EPP16 config SCSI_IZIP_SLOW_CTR bool "ppa/imm option - Assume slow parport control register" - depends on PARPORT && (SCSI_PPA || SCSI_IMM) + depends on SCSI_PPA || SCSI_IMM help Some parallel ports are known to have excessive delays between changing the parallel port control register and good data being -- cgit v1.2.3 From 6a85081d1c3ab7935c3ade8f4b2700a860d6fb2e Mon Sep 17 00:00:00 2001 From: Marko Kohtala Date: Fri, 6 Jan 2006 00:19:51 -0800 Subject: [PATCH] parport: include fixes Small cleanup of includes meant for older implementation. Signed-off-by: Marko Kohtala Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/net/plip.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/plip.c b/drivers/net/plip.c index 1bd22cd40c75..87ee3271b17d 100644 --- a/drivers/net/plip.c +++ b/drivers/net/plip.c @@ -98,7 +98,6 @@ static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n" #include #include #include -#include #include #include #include @@ -106,7 +105,6 @@ static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n" #include #include #include -#include #include #include #include -- cgit v1.2.3 From 94b82095d0f5d6a72a0c619f54645727ebf66642 Mon Sep 17 00:00:00 2001 From: Marko Kohtala Date: Fri, 6 Jan 2006 00:19:51 -0800 Subject: [PATCH] parport: export parport_get_port() Help external ppSCSI driver by exporting parport_get_port to match the parport_put_port. Signed-off-by: Marko Kohtala Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/parport/share.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/parport/share.c b/drivers/parport/share.c index 9cb3ab156b09..ea62bed6bc83 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c @@ -1002,6 +1002,7 @@ EXPORT_SYMBOL(parport_register_driver); EXPORT_SYMBOL(parport_unregister_driver); EXPORT_SYMBOL(parport_register_device); EXPORT_SYMBOL(parport_unregister_device); +EXPORT_SYMBOL(parport_get_port); EXPORT_SYMBOL(parport_put_port); EXPORT_SYMBOL(parport_find_number); EXPORT_SYMBOL(parport_find_base); -- cgit v1.2.3 From a1b9168d83962fbb05859c1ecaa57fd4f53cf38e Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Fri, 6 Jan 2006 00:19:52 -0800 Subject: [PATCH] simplify PARPORT_PC_PCMCIA dependencies Unless I miss something, this should be the simplest way to express the intended dependencies. Signed-off-by: Adrian Bunk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/parport/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig index 725a14119f2a..b8241561da45 100644 --- a/drivers/parport/Kconfig +++ b/drivers/parport/Kconfig @@ -77,7 +77,7 @@ config PARPORT_PC_SUPERIO config PARPORT_PC_PCMCIA tristate "Support for PCMCIA management for PC-style ports" - depends on PARPORT!=n && (PCMCIA!=n && PARPORT_PC=m && PARPORT_PC || PARPORT_PC=y && PCMCIA) + depends on PCMCIA && PARPORT_PC help Say Y here if you need PCMCIA support for your PC-style parallel ports. If unsure, say N. -- cgit v1.2.3 From 066bb8d03b6e52e4844d37145573d6a2bedaa339 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 6 Jan 2006 00:19:53 -0800 Subject: [PATCH] fix remaining list_for_each_safe_rcu in -mm (take 2) I missed a use of list_for_each_rcu_safe() in -mm tree. Here is an updated patch to fix it. This time tested on a machine that actually uses IPMI... (Thanks to Serge Hallyn for spotting this.) Signed-off-by: "Paul E. McKenney" Cc: Corey Minyard Cc: Matt Domsch Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/ipmi/ipmi_msghandler.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 1f56b4cf0f58..561430ed94af 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -787,7 +787,6 @@ int ipmi_destroy_user(ipmi_user_t user) int i; unsigned long flags; struct cmd_rcvr *rcvr; - struct list_head *entry1, *entry2; struct cmd_rcvr *rcvrs = NULL; user->valid = 1; @@ -812,8 +811,7 @@ int ipmi_destroy_user(ipmi_user_t user) * synchronize_rcu()) then free everything in that list. */ down(&intf->cmd_rcvrs_lock); - list_for_each_safe_rcu(entry1, entry2, &intf->cmd_rcvrs) { - rcvr = list_entry(entry1, struct cmd_rcvr, link); + list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { if (rcvr->user == user) { list_del_rcu(&rcvr->link); rcvr->next = rcvrs; -- cgit v1.2.3 From 637842cfdbe2b981f7088f7633e630570f58efaf Mon Sep 17 00:00:00 2001 From: David Teigland Date: Fri, 6 Jan 2006 00:20:00 -0800 Subject: [PATCH] device-mapper: add dm_find_md Abstract dm_find_md() from dm_get_mdptr() to allow use elsewhere. Signed-off-by: Alasdair G Kergon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/dm.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 930b9fc27953..27cd234cf682 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -902,10 +902,9 @@ int dm_create_with_minor(unsigned int minor, struct mapped_device **result) return create_aux(minor, 1, result); } -void *dm_get_mdptr(dev_t dev) +static struct mapped_device *dm_find_md(dev_t dev) { struct mapped_device *md; - void *mdptr = NULL; unsigned minor = MINOR(dev); if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) @@ -914,12 +913,22 @@ void *dm_get_mdptr(dev_t dev) down(&_minor_lock); md = idr_find(&_minor_idr, minor); - - if (md && (dm_disk(md)->first_minor == minor)) - mdptr = md->interface_ptr; + if (!md || (dm_disk(md)->first_minor != minor)) + md = NULL; up(&_minor_lock); + return md; +} + +void *dm_get_mdptr(dev_t dev) +{ + struct mapped_device *md; + void *mdptr = NULL; + + md = dm_find_md(dev); + if (md) + mdptr = md->interface_ptr; return mdptr; } -- cgit v1.2.3 From d229a9589ff3b988d3f999cdcfa350f97a372673 Mon Sep 17 00:00:00 2001 From: David Teigland Date: Fri, 6 Jan 2006 00:20:01 -0800 Subject: [PATCH] device-mapper: add dm_get_md Add dm_get_dev() to get a mapped device given its dev_t. Signed-off-by: Alasdair G Kergon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/dm.c | 10 ++++++++++ drivers/md/dm.h | 1 + 2 files changed, 11 insertions(+) (limited to 'drivers') diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 27cd234cf682..9e8c1edd89dd 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -921,6 +921,16 @@ static struct mapped_device *dm_find_md(dev_t dev) return md; } +struct mapped_device *dm_get_md(dev_t dev) +{ + struct mapped_device *md = dm_find_md(dev); + + if (md) + dm_get(md); + + return md; +} + void *dm_get_mdptr(dev_t dev) { struct mapped_device *md; diff --git a/drivers/md/dm.h b/drivers/md/dm.h index e38c3fc1a1db..ab078a249396 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -58,6 +58,7 @@ int dm_create(struct mapped_device **md); int dm_create_with_minor(unsigned int minor, struct mapped_device **md); void dm_set_mdptr(struct mapped_device *md, void *ptr); void *dm_get_mdptr(dev_t dev); +struct mapped_device *dm_get_md(dev_t dev); /* * Reference counting for md. -- cgit v1.2.3 From 81f1777a55e8c631b61e5fa5980fb7a2004287af Mon Sep 17 00:00:00 2001 From: "goggin, edward" Date: Fri, 6 Jan 2006 00:20:01 -0800 Subject: [PATCH] device-mapper ioctl: event on rename After changing the name of a mapped device, trigger a dm event. (For userspace multipath tools.) Signed-off-by: Alasdair G Kergon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/dm-ioctl.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers') diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 07d44e19536e..3e327db57310 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -270,6 +270,7 @@ static int dm_hash_rename(const char *old, const char *new) { char *new_name, *old_name; struct hash_cell *hc; + struct dm_table *table; /* * duplicate new. @@ -317,6 +318,15 @@ static int dm_hash_rename(const char *old, const char *new) /* rename the device node in devfs */ register_with_devfs(hc); + /* + * Wake up any dm event waiters. + */ + table = dm_get_table(hc->md); + if (table) { + dm_table_event(table); + dm_table_put(table); + } + up_write(&_hash_lock); kfree(old_name); return 0; -- cgit v1.2.3 From 2d38fe204461dc542bb38f2b01a9cd115b367b36 Mon Sep 17 00:00:00 2001 From: Alasdair G Kergon Date: Fri, 6 Jan 2006 00:20:02 -0800 Subject: [PATCH] device-mapper snapshot: metadata reading separation More snapshot metadata reading into separate function, to prepare for changing the place it gets called from. Signed-off-by: Alasdair G Kergon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/dm-snap.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index ab54f99b7c3b..4b9dd8fb1e5c 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -371,6 +371,20 @@ static inline ulong round_up(ulong n, ulong size) return (n + size) & ~size; } +static void read_snapshot_metadata(struct dm_snapshot *s) +{ + if (s->have_metadata) + return; + + if (s->store.read_metadata(&s->store)) { + down_write(&s->lock); + s->valid = 0; + up_write(&s->lock); + } + + s->have_metadata = 1; +} + /* * Construct a snapshot mapping:

*/ @@ -848,16 +862,7 @@ static void snapshot_resume(struct dm_target *ti) { struct dm_snapshot *s = (struct dm_snapshot *) ti->private; - if (s->have_metadata) - return; - - if (s->store.read_metadata(&s->store)) { - down_write(&s->lock); - s->valid = 0; - up_write(&s->lock); - } - - s->have_metadata = 1; + read_snapshot_metadata(s); } static int snapshot_status(struct dm_target *ti, status_type_t type, -- cgit v1.2.3 From e6c276159c812ab25f47b7c7b683a5c97c442dd5 Mon Sep 17 00:00:00 2001 From: Andrew Stribblehill Date: Fri, 6 Jan 2006 00:20:03 -0800 Subject: [PATCH] device-mapper: remove unused definition This patch removes an unused #define. Signed-off-by: Alasdair G Kergon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/dm-io.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/md/dm-io.h b/drivers/md/dm-io.h index 1a77f3265706..f9035bfd1a9f 100644 --- a/drivers/md/dm-io.h +++ b/drivers/md/dm-io.h @@ -9,9 +9,6 @@ #include "dm.h" -/* FIXME make this configurable */ -#define DM_MAX_IO_REGIONS 8 - struct io_region { struct block_device *bdev; sector_t sector; -- cgit v1.2.3 From 2d5fe68987341a59a3fd97c71695efcabb0c6fd5 Mon Sep 17 00:00:00 2001 From: Alasdair G Kergon Date: Fri, 6 Jan 2006 00:20:04 -0800 Subject: [PATCH] device-mapper: scanf sector format change Use %llu not %Lu in sscanf/printf format strings. Signed-off-by: Alasdair G Kergon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/dm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/dm.h b/drivers/md/dm.h index ab078a249396..95a0cfbe5b4d 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -28,7 +28,7 @@ * in types.h. */ #ifdef CONFIG_LBD -#define SECTOR_FORMAT "%Lu" +#define SECTOR_FORMAT "%llu" #else #define SECTOR_FORMAT "%lu" #endif -- cgit v1.2.3 From a1a190807074bd6ad8771e00b00752771ae586cb Mon Sep 17 00:00:00 2001 From: Jonathan E Brassow Date: Fri, 6 Jan 2006 00:20:05 -0800 Subject: [PATCH] device-mapper raid1: add default mirror This patch introduces a new field to the mirror_set (default_mirror) to store the default mirror. (A subsequent patch will allow us to change the default mirror in the event of a failure.) Signed-off-by: Alasdair G Kergon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/dm-raid1.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 6b0fc1670929..6cfa8d435d55 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -562,6 +562,8 @@ struct mirror_set { region_t nr_regions; int in_sync; + struct mirror *default_mirror; /* Default mirror */ + unsigned int nr_mirrors; struct mirror mirror[0]; }; @@ -611,7 +613,7 @@ static int recover(struct mirror_set *ms, struct region *reg) unsigned long flags = 0; /* fill in the source */ - m = ms->mirror + DEFAULT_MIRROR; + m = ms->default_mirror; from.bdev = m->dev->bdev; from.sector = m->offset + region_to_sector(reg->rh, reg->key); if (reg->key == (ms->nr_regions - 1)) { @@ -627,7 +629,7 @@ static int recover(struct mirror_set *ms, struct region *reg) /* fill in the destinations */ for (i = 0, dest = to; i < ms->nr_mirrors; i++) { - if (i == DEFAULT_MIRROR) + if (&ms->mirror[i] == ms->default_mirror) continue; m = ms->mirror + i; @@ -682,7 +684,7 @@ static void do_recovery(struct mirror_set *ms) static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) { /* FIXME: add read balancing */ - return ms->mirror + DEFAULT_MIRROR; + return ms->default_mirror; } /* @@ -709,7 +711,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads) if (rh_in_sync(&ms->rh, region, 0)) m = choose_mirror(ms, bio->bi_sector); else - m = ms->mirror + DEFAULT_MIRROR; + m = ms->default_mirror; map_bio(ms, m, bio); generic_make_request(bio); @@ -833,7 +835,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) rh_delay(&ms->rh, bio); while ((bio = bio_list_pop(&nosync))) { - map_bio(ms, ms->mirror + DEFAULT_MIRROR, bio); + map_bio(ms, ms->default_mirror, bio); generic_make_request(bio); } } @@ -900,6 +902,7 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors, ms->nr_mirrors = nr_mirrors; ms->nr_regions = dm_sector_div_up(ti->len, region_size); ms->in_sync = 0; + ms->default_mirror = &ms->mirror[DEFAULT_MIRROR]; if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) { ti->error = "dm-mirror: Error creating dirty region hash"; -- cgit v1.2.3 From e39e2e95eb8bd536b61654e8fda1516d0a6a3cd1 Mon Sep 17 00:00:00 2001 From: Alasdair G Kergon Date: Fri, 6 Jan 2006 00:20:05 -0800 Subject: [PATCH] device-mapper: rename frozen_bdev Rename frozen_bdev to suspended_bdev and move the bdget outside lockfs. (This prepares for making lockfs optional.) Signed-off-by: Alasdair G Kergon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/dm.c | 51 +++++++++++++++++++++++++-------------------------- 1 file changed, 25 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 9e8c1edd89dd..fc335e09d072 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -97,7 +97,7 @@ struct mapped_device { * freeze/thaw support require holding onto a super block */ struct super_block *frozen_sb; - struct block_device *frozen_bdev; + struct block_device *suspended_bdev; }; #define MIN_IOS 256 @@ -836,9 +836,9 @@ static void __set_size(struct mapped_device *md, sector_t size) { set_capacity(md->disk, size); - down(&md->frozen_bdev->bd_inode->i_sem); - i_size_write(md->frozen_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); - up(&md->frozen_bdev->bd_inode->i_sem); + down(&md->suspended_bdev->bd_inode->i_sem); + i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); + up(&md->suspended_bdev->bd_inode->i_sem); } static int __bind(struct mapped_device *md, struct dm_table *t) @@ -1010,43 +1010,27 @@ out: */ static int lock_fs(struct mapped_device *md) { - int r = -ENOMEM; - - md->frozen_bdev = bdget_disk(md->disk, 0); - if (!md->frozen_bdev) { - DMWARN("bdget failed in lock_fs"); - goto out; - } + int r; WARN_ON(md->frozen_sb); - md->frozen_sb = freeze_bdev(md->frozen_bdev); + md->frozen_sb = freeze_bdev(md->suspended_bdev); if (IS_ERR(md->frozen_sb)) { r = PTR_ERR(md->frozen_sb); - goto out_bdput; + md->frozen_sb = NULL; + return r; } /* don't bdput right now, we don't want the bdev - * to go away while it is locked. We'll bdput - * in unlock_fs + * to go away while it is locked. */ return 0; - -out_bdput: - bdput(md->frozen_bdev); - md->frozen_sb = NULL; - md->frozen_bdev = NULL; -out: - return r; } static void unlock_fs(struct mapped_device *md) { - thaw_bdev(md->frozen_bdev, md->frozen_sb); - bdput(md->frozen_bdev); - + thaw_bdev(md->suspended_bdev, md->frozen_sb); md->frozen_sb = NULL; - md->frozen_bdev = NULL; } /* @@ -1072,6 +1056,13 @@ int dm_suspend(struct mapped_device *md) /* This does not get reverted if there's an error later. */ dm_table_presuspend_targets(map); + md->suspended_bdev = bdget_disk(md->disk, 0); + if (!md->suspended_bdev) { + DMWARN("bdget failed in dm_suspend"); + r = -ENOMEM; + goto out; + } + /* Flush I/O to the device. */ r = lock_fs(md); if (r) @@ -1124,6 +1115,11 @@ int dm_suspend(struct mapped_device *md) r = 0; out: + if (r && md->suspended_bdev) { + bdput(md->suspended_bdev); + md->suspended_bdev = NULL; + } + dm_table_put(map); up(&md->suspend_lock); return r; @@ -1154,6 +1150,9 @@ int dm_resume(struct mapped_device *md) unlock_fs(md); + bdput(md->suspended_bdev); + md->suspended_bdev = NULL; + clear_bit(DMF_SUSPENDED, &md->flags); dm_table_unplug_all(map); -- cgit v1.2.3 From aa8d7c2fbe619d8c0837296d2eaf4c14cebac198 Mon Sep 17 00:00:00 2001 From: Alasdair G Kergon Date: Fri, 6 Jan 2006 00:20:06 -0800 Subject: [PATCH] device-mapper: make lock_fs optional Devices only needs syncing when creating snapshots, so make this optional when suspending a device. Signed-off-by: Alasdair G Kergon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/dm-ioctl.c | 4 ++-- drivers/md/dm.c | 17 +++++++++++++---- drivers/md/dm.h | 2 +- 3 files changed, 16 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 3e327db57310..dbc07afd4462 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -700,7 +700,7 @@ static int do_suspend(struct dm_ioctl *param) return -ENXIO; if (!dm_suspended(md)) - r = dm_suspend(md); + r = dm_suspend(md, 1); if (!r) r = __dev_status(md, param); @@ -738,7 +738,7 @@ static int do_resume(struct dm_ioctl *param) if (new_map) { /* Suspend if it isn't already suspended */ if (!dm_suspended(md)) - dm_suspend(md); + dm_suspend(md, 1); r = dm_swap_table(md, new_map); if (r) { diff --git a/drivers/md/dm.c b/drivers/md/dm.c index fc335e09d072..0e481512f918 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -55,6 +55,7 @@ union map_info *dm_get_mapinfo(struct bio *bio) */ #define DMF_BLOCK_IO 0 #define DMF_SUSPENDED 1 +#define DMF_FROZEN 2 struct mapped_device { struct rw_semaphore io_lock; @@ -1021,6 +1022,8 @@ static int lock_fs(struct mapped_device *md) return r; } + set_bit(DMF_FROZEN, &md->flags); + /* don't bdput right now, we don't want the bdev * to go away while it is locked. */ @@ -1029,8 +1032,12 @@ static int lock_fs(struct mapped_device *md) static void unlock_fs(struct mapped_device *md) { + if (!test_bit(DMF_FROZEN, &md->flags)) + return; + thaw_bdev(md->suspended_bdev, md->frozen_sb); md->frozen_sb = NULL; + clear_bit(DMF_FROZEN, &md->flags); } /* @@ -1040,7 +1047,7 @@ static void unlock_fs(struct mapped_device *md) * dm_bind_table, dm_suspend must be called to flush any in * flight bios and ensure that any further io gets deferred. */ -int dm_suspend(struct mapped_device *md) +int dm_suspend(struct mapped_device *md, int do_lockfs) { struct dm_table *map = NULL; DECLARE_WAITQUEUE(wait, current); @@ -1064,9 +1071,11 @@ int dm_suspend(struct mapped_device *md) } /* Flush I/O to the device. */ - r = lock_fs(md); - if (r) - goto out; + if (do_lockfs) { + r = lock_fs(md); + if (r) + goto out; + } /* * First we set the BLOCK_IO flag so no more ios will be mapped. diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 95a0cfbe5b4d..4eaf075da217 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -69,7 +69,7 @@ void dm_put(struct mapped_device *md); /* * A device can still be used while suspended, but I/O is deferred. */ -int dm_suspend(struct mapped_device *md); +int dm_suspend(struct mapped_device *md, int with_lockfs); int dm_resume(struct mapped_device *md); /* -- cgit v1.2.3 From 6da487dcc0c6f4c827779687a20016efeffc4d60 Mon Sep 17 00:00:00 2001 From: Alasdair G Kergon Date: Fri, 6 Jan 2006 00:20:07 -0800 Subject: [PATCH] device-mapper ioctl: add skip lock_fs flag Add ioctl DM_SKIP_LOCKFS_FLAG for userspace to request that lock_fs is bypassed when suspending a device. There's no change to the behaviour of existing code that doesn't know about the new flag. Signed-off-by: Alasdair G Kergon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/dm-ioctl.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index dbc07afd4462..561bda5011e0 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -693,14 +693,18 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size) static int do_suspend(struct dm_ioctl *param) { int r = 0; + int do_lockfs = 1; struct mapped_device *md; md = find_device(param); if (!md) return -ENXIO; + if (param->flags & DM_SKIP_LOCKFS_FLAG) + do_lockfs = 0; + if (!dm_suspended(md)) - r = dm_suspend(md, 1); + r = dm_suspend(md, do_lockfs); if (!r) r = __dev_status(md, param); @@ -712,6 +716,7 @@ static int do_suspend(struct dm_ioctl *param) static int do_resume(struct dm_ioctl *param) { int r = 0; + int do_lockfs = 1; struct hash_cell *hc; struct mapped_device *md; struct dm_table *new_map; @@ -737,8 +742,10 @@ static int do_resume(struct dm_ioctl *param) /* Do we need to load a new map ? */ if (new_map) { /* Suspend if it isn't already suspended */ + if (param->flags & DM_SKIP_LOCKFS_FLAG) + do_lockfs = 0; if (!dm_suspended(md)) - dm_suspend(md, 1); + dm_suspend(md, do_lockfs); r = dm_swap_table(md, new_map); if (r) { -- cgit v1.2.3 From 0b56306e56784d0513e1193d58c05a6bd97bd1a9 Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Fri, 6 Jan 2006 00:20:08 -0800 Subject: [PATCH] drivers/md/kcopyd.c: #if 0 kcopyd_cancel() This patch #if 0's the not yet implemented global function kcopyd_cancel(). Signed-off-by: Adrian Bunk Acked-by: Alasdair G Kergon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/kcopyd.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c index eb7036485975..ca99979c868a 100644 --- a/drivers/md/kcopyd.c +++ b/drivers/md/kcopyd.c @@ -561,11 +561,13 @@ int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from, * Cancels a kcopyd job, eg. someone might be deactivating a * mirror. */ +#if 0 int kcopyd_cancel(struct kcopyd_job *job, int block) { /* FIXME: finish */ return -1; } +#endif /* 0 */ /*----------------------------------------------------------------- * Unit setup @@ -684,4 +686,3 @@ void kcopyd_client_destroy(struct kcopyd_client *kc) EXPORT_SYMBOL(kcopyd_client_create); EXPORT_SYMBOL(kcopyd_client_destroy); EXPORT_SYMBOL(kcopyd_copy); -EXPORT_SYMBOL(kcopyd_cancel); -- cgit v1.2.3 From 9d3520a339d62f942085e9888f66905eb8b350bd Mon Sep 17 00:00:00 2001 From: Stefan Rompf Date: Fri, 6 Jan 2006 00:20:08 -0800 Subject: [PATCH] dm-crypt: zero key before freeing it Zap the memory before freeing it so we don't leave crypto information around in memory. Signed-off-by: Stefan Rompf Acked-by: Clemens Fruhwirth Acked-by: Alasdair G Kergon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/dm-crypt.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index cf6631056683..a601a427885c 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -690,6 +690,8 @@ bad3: bad2: crypto_free_tfm(tfm); bad1: + /* Must zero key material before freeing */ + memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); kfree(cc); return -EINVAL; } @@ -706,6 +708,9 @@ static void crypt_dtr(struct dm_target *ti) cc->iv_gen_ops->dtr(cc); crypto_free_tfm(cc->tfm); dm_put_device(ti, cc->dev); + + /* Must zero key material before freeing */ + memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); kfree(cc); } -- cgit v1.2.3 From ac81b2ee45eb811fdb0aa1cfb71d468d944d00ce Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Fri, 6 Jan 2006 00:20:11 -0800 Subject: [PATCH] make dm-mirror not issue invalid resync requests I've been attempting to set up a (Host)RAID mirror with dm_mirror on 2.6.14.3, and I've been having a strange little problem. The configuration in question is a set of 9GB SCSI disks that have 17942584 sectors. I set up the dm_mirror table as such: 0 17942528 mirror core 2 2048 nosync 2 8:48 0 8:64 0 If I'm not mistaken, this sets up a 9GB RAID1 mriror with 1MB stripes across both SCSI disks. The sector count of the dm device is less than the size of the disks, so we shouldn't fall off the end. However, I always get the messages like this in dmesg when I set up the dm table: attempt to access beyond end of device sdd: rw=0, want=17958656, limit=17942584 Clearly, something is trying to read sectors past the end of the drive. I traced it down to the __rh_recovery_prepare function in dm-raid1.c, which gets called when we're putting the mirror set together. This function calls the dirty region log's get_resync_work function to see if there's any resync that needs to be done, and queues up any areas that are out of sync. The log's get_resync_work function is actually a pointer to the core_get_resync_work function in dm-log.c. The core_get_resync_work function queries a bitset lc->sync_bits to find out if there are any regions that are out of date (i.e. the bit is 0), which is where the problem occurs. If every bit in lc->sync_bits is 1 (which is the case when we've just configured a new RAID1 with the nosync option), the find_next_zero_bit does NOT return the size parameter (lc->region_count in this case), it returns the size parameter rounded up to the nearest multiple of 32! I don't know if this is intentional, but i386 and x86_64 both exhibit this behavior. In any case, the statement "if (*region == lc->region_count)" looks like it's supposed to catch the case where are no regions to resync and return 0. Since find_next_zero_bit apparently has a habit of returning a value that's larger than lc->region_count, the enclosed patch changes the equality test to a greater-than test so that we don't try to resync areas outside of the RAID1 region. Seeing as the HostRAID metadata lives just past the end of the RAID1 data, mucking around in that area is not a good idea. I suppose another way to fix this would be to amend find_next_zero_bit so that it doesn't return values larger than "size", but I don't know if there's a reason for the current behavior. Signed-Off-By: Darrick J. Wong Acked-by: Alasdair G Kergon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/dm-log.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index a76349cb10a5..efe4adf78530 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c @@ -573,7 +573,7 @@ static int core_get_resync_work(struct dirty_log *log, region_t *region) lc->sync_search); lc->sync_search = *region + 1; - if (*region == lc->region_count) + if (*region >= lc->region_count) return 0; } while (log_test_bit(lc->recovering_bits, *region)); -- cgit v1.2.3 From 17999be4aa408e7ff3b9d32c735649676567a3cd Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:12 -0800 Subject: [PATCH] md: improve raid1 "IO Barrier" concept raid1 needs to put up a barrier to new requests while it does resync or other background recovery. The code for this is currently open-coded, slighty obscure by its use of two waitqueues, and not documented. This patch gathers all the related code into 4 functions, and includes a comment which (hopefully) explains what is happening. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid1.c | 167 ++++++++++++++++++++++++++++------------------------- 1 file changed, 89 insertions(+), 78 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 229d7b204297..f5204149ab65 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -51,6 +51,8 @@ static mdk_personality_t raid1_personality; static void unplug_slaves(mddev_t *mddev); +static void allow_barrier(conf_t *conf); +static void lower_barrier(conf_t *conf); static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) { @@ -160,20 +162,13 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio) static inline void free_r1bio(r1bio_t *r1_bio) { - unsigned long flags; - conf_t *conf = mddev_to_conf(r1_bio->mddev); /* * Wake up any possible resync thread that waits for the device * to go idle. */ - spin_lock_irqsave(&conf->resync_lock, flags); - if (!--conf->nr_pending) { - wake_up(&conf->wait_idle); - wake_up(&conf->wait_resume); - } - spin_unlock_irqrestore(&conf->resync_lock, flags); + allow_barrier(conf); put_all_bios(conf, r1_bio); mempool_free(r1_bio, conf->r1bio_pool); @@ -182,22 +177,10 @@ static inline void free_r1bio(r1bio_t *r1_bio) static inline void put_buf(r1bio_t *r1_bio) { conf_t *conf = mddev_to_conf(r1_bio->mddev); - unsigned long flags; mempool_free(r1_bio, conf->r1buf_pool); - spin_lock_irqsave(&conf->resync_lock, flags); - if (!conf->barrier) - BUG(); - --conf->barrier; - wake_up(&conf->wait_resume); - wake_up(&conf->wait_idle); - - if (!--conf->nr_pending) { - wake_up(&conf->wait_idle); - wake_up(&conf->wait_resume); - } - spin_unlock_irqrestore(&conf->resync_lock, flags); + lower_barrier(conf); } static void reschedule_retry(r1bio_t *r1_bio) @@ -210,6 +193,7 @@ static void reschedule_retry(r1bio_t *r1_bio) list_add(&r1_bio->retry_list, &conf->retry_list); spin_unlock_irqrestore(&conf->device_lock, flags); + wake_up(&conf->wait_barrier); md_wakeup_thread(mddev->thread); } @@ -593,30 +577,83 @@ static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk, return ret; } -/* - * Throttle resync depth, so that we can both get proper overlapping of - * requests, but are still able to handle normal requests quickly. +/* Barriers.... + * Sometimes we need to suspend IO while we do something else, + * either some resync/recovery, or reconfigure the array. + * To do this we raise a 'barrier'. + * The 'barrier' is a counter that can be raised multiple times + * to count how many activities are happening which preclude + * normal IO. + * We can only raise the barrier if there is no pending IO. + * i.e. if nr_pending == 0. + * We choose only to raise the barrier if no-one is waiting for the + * barrier to go down. This means that as soon as an IO request + * is ready, no other operations which require a barrier will start + * until the IO request has had a chance. + * + * So: regular IO calls 'wait_barrier'. When that returns there + * is no backgroup IO happening, It must arrange to call + * allow_barrier when it has finished its IO. + * backgroup IO calls must call raise_barrier. Once that returns + * there is no normal IO happeing. It must arrange to call + * lower_barrier when the particular background IO completes. */ #define RESYNC_DEPTH 32 -static void device_barrier(conf_t *conf, sector_t sect) +static void raise_barrier(conf_t *conf) { spin_lock_irq(&conf->resync_lock); - wait_event_lock_irq(conf->wait_idle, !waitqueue_active(&conf->wait_resume), - conf->resync_lock, raid1_unplug(conf->mddev->queue)); - - if (!conf->barrier++) { - wait_event_lock_irq(conf->wait_idle, !conf->nr_pending, - conf->resync_lock, raid1_unplug(conf->mddev->queue)); - if (conf->nr_pending) - BUG(); + + /* Wait until no block IO is waiting */ + wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, + conf->resync_lock, + raid1_unplug(conf->mddev->queue)); + + /* block any new IO from starting */ + conf->barrier++; + + /* No wait for all pending IO to complete */ + wait_event_lock_irq(conf->wait_barrier, + !conf->nr_pending && conf->barrier < RESYNC_DEPTH, + conf->resync_lock, + raid1_unplug(conf->mddev->queue)); + + spin_unlock_irq(&conf->resync_lock); +} + +static void lower_barrier(conf_t *conf) +{ + unsigned long flags; + spin_lock_irqsave(&conf->resync_lock, flags); + conf->barrier--; + spin_unlock_irqrestore(&conf->resync_lock, flags); + wake_up(&conf->wait_barrier); +} + +static void wait_barrier(conf_t *conf) +{ + spin_lock_irq(&conf->resync_lock); + if (conf->barrier) { + conf->nr_waiting++; + wait_event_lock_irq(conf->wait_barrier, !conf->barrier, + conf->resync_lock, + raid1_unplug(conf->mddev->queue)); + conf->nr_waiting--; } - wait_event_lock_irq(conf->wait_resume, conf->barrier < RESYNC_DEPTH, - conf->resync_lock, raid1_unplug(conf->mddev->queue)); - conf->next_resync = sect; + conf->nr_pending++; spin_unlock_irq(&conf->resync_lock); } +static void allow_barrier(conf_t *conf) +{ + unsigned long flags; + spin_lock_irqsave(&conf->resync_lock, flags); + conf->nr_pending--; + spin_unlock_irqrestore(&conf->resync_lock, flags); + wake_up(&conf->wait_barrier); +} + + /* duplicate the data pages for behind I/O */ static struct page **alloc_behind_pages(struct bio *bio) { @@ -678,10 +715,7 @@ static int make_request(request_queue_t *q, struct bio * bio) */ md_write_start(mddev, bio); /* wait on superblock update early */ - spin_lock_irq(&conf->resync_lock); - wait_event_lock_irq(conf->wait_resume, !conf->barrier, conf->resync_lock, ); - conf->nr_pending++; - spin_unlock_irq(&conf->resync_lock); + wait_barrier(conf); disk_stat_inc(mddev->gendisk, ios[rw]); disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); @@ -909,13 +943,8 @@ static void print_conf(conf_t *conf) static void close_sync(conf_t *conf) { - spin_lock_irq(&conf->resync_lock); - wait_event_lock_irq(conf->wait_resume, !conf->barrier, - conf->resync_lock, raid1_unplug(conf->mddev->queue)); - spin_unlock_irq(&conf->resync_lock); - - if (conf->barrier) BUG(); - if (waitqueue_active(&conf->wait_idle)) BUG(); + wait_barrier(conf); + allow_barrier(conf); mempool_destroy(conf->r1buf_pool); conf->r1buf_pool = NULL; @@ -1317,12 +1346,16 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i return sync_blocks; } /* - * If there is non-resync activity waiting for us then - * put in a delay to throttle resync. + * If there is non-resync activity waiting for a turn, + * and resync is going fast enough, + * then let it though before starting on this new sync request. */ - if (!go_faster && waitqueue_active(&conf->wait_resume)) + if (!go_faster && conf->nr_waiting) msleep_interruptible(1000); - device_barrier(conf, sector_nr + RESYNC_SECTORS); + + raise_barrier(conf); + + conf->next_resync = sector_nr; /* * If reconstructing, and >1 working disc, @@ -1355,10 +1388,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); - spin_lock_irq(&conf->resync_lock); - conf->nr_pending++; - spin_unlock_irq(&conf->resync_lock); - r1_bio->mddev = mddev; r1_bio->sector = sector_nr; r1_bio->state = 0; @@ -1542,8 +1571,7 @@ static int run(mddev_t *mddev) mddev->recovery_cp = MaxSector; spin_lock_init(&conf->resync_lock); - init_waitqueue_head(&conf->wait_idle); - init_waitqueue_head(&conf->wait_resume); + init_waitqueue_head(&conf->wait_barrier); bio_list_init(&conf->pending_bio_list); bio_list_init(&conf->flushing_bio_list); @@ -1714,11 +1742,7 @@ static int raid1_reshape(mddev_t *mddev, int raid_disks) } memset(newmirrors, 0, sizeof(struct mirror_info)*raid_disks); - spin_lock_irq(&conf->resync_lock); - conf->barrier++; - wait_event_lock_irq(conf->wait_idle, !conf->nr_pending, - conf->resync_lock, raid1_unplug(mddev->queue)); - spin_unlock_irq(&conf->resync_lock); + raise_barrier(conf); /* ok, everything is stopped */ oldpool = conf->r1bio_pool; @@ -1738,12 +1762,7 @@ static int raid1_reshape(mddev_t *mddev, int raid_disks) conf->raid_disks = mddev->raid_disks = raid_disks; conf->last_used = 0; /* just make sure it is in-range */ - spin_lock_irq(&conf->resync_lock); - conf->barrier--; - spin_unlock_irq(&conf->resync_lock); - wake_up(&conf->wait_resume); - wake_up(&conf->wait_idle); - + lower_barrier(conf); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); @@ -1758,18 +1777,10 @@ static void raid1_quiesce(mddev_t *mddev, int state) switch(state) { case 1: - spin_lock_irq(&conf->resync_lock); - conf->barrier++; - wait_event_lock_irq(conf->wait_idle, !conf->nr_pending, - conf->resync_lock, raid1_unplug(mddev->queue)); - spin_unlock_irq(&conf->resync_lock); + raise_barrier(conf); break; case 0: - spin_lock_irq(&conf->resync_lock); - conf->barrier--; - spin_unlock_irq(&conf->resync_lock); - wake_up(&conf->wait_resume); - wake_up(&conf->wait_idle); + lower_barrier(conf); break; } if (mddev->thread) { -- cgit v1.2.3 From 0a27ec96b6fb1abf867e36d7b0b681d67588767a Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:13 -0800 Subject: [PATCH] md: improve raid10 "IO Barrier" concept raid10 needs to put up a barrier to new requests while it does resync or other background recovery. The code for this is currently open-coded, slighty obscure by its use of two waitqueues, and not documented. This patch gathers all the related code into 4 functions, and includes a comment which (hopefully) explains what is happening. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid10.c | 135 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 79 insertions(+), 56 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 713dc9c2c730..50bd7b152f28 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -47,6 +47,9 @@ static void unplug_slaves(mddev_t *mddev); +static void allow_barrier(conf_t *conf); +static void lower_barrier(conf_t *conf); + static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) { conf_t *conf = data; @@ -175,20 +178,13 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio) static inline void free_r10bio(r10bio_t *r10_bio) { - unsigned long flags; - conf_t *conf = mddev_to_conf(r10_bio->mddev); /* * Wake up any possible resync thread that waits for the device * to go idle. */ - spin_lock_irqsave(&conf->resync_lock, flags); - if (!--conf->nr_pending) { - wake_up(&conf->wait_idle); - wake_up(&conf->wait_resume); - } - spin_unlock_irqrestore(&conf->resync_lock, flags); + allow_barrier(conf); put_all_bios(conf, r10_bio); mempool_free(r10_bio, conf->r10bio_pool); @@ -197,22 +193,10 @@ static inline void free_r10bio(r10bio_t *r10_bio) static inline void put_buf(r10bio_t *r10_bio) { conf_t *conf = mddev_to_conf(r10_bio->mddev); - unsigned long flags; mempool_free(r10_bio, conf->r10buf_pool); - spin_lock_irqsave(&conf->resync_lock, flags); - if (!conf->barrier) - BUG(); - --conf->barrier; - wake_up(&conf->wait_resume); - wake_up(&conf->wait_idle); - - if (!--conf->nr_pending) { - wake_up(&conf->wait_idle); - wake_up(&conf->wait_resume); - } - spin_unlock_irqrestore(&conf->resync_lock, flags); + lower_barrier(conf); } static void reschedule_retry(r10bio_t *r10_bio) @@ -640,30 +624,82 @@ static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk, return ret; } -/* - * Throttle resync depth, so that we can both get proper overlapping of - * requests, but are still able to handle normal requests quickly. +/* Barriers.... + * Sometimes we need to suspend IO while we do something else, + * either some resync/recovery, or reconfigure the array. + * To do this we raise a 'barrier'. + * The 'barrier' is a counter that can be raised multiple times + * to count how many activities are happening which preclude + * normal IO. + * We can only raise the barrier if there is no pending IO. + * i.e. if nr_pending == 0. + * We choose only to raise the barrier if no-one is waiting for the + * barrier to go down. This means that as soon as an IO request + * is ready, no other operations which require a barrier will start + * until the IO request has had a chance. + * + * So: regular IO calls 'wait_barrier'. When that returns there + * is no backgroup IO happening, It must arrange to call + * allow_barrier when it has finished its IO. + * backgroup IO calls must call raise_barrier. Once that returns + * there is no normal IO happeing. It must arrange to call + * lower_barrier when the particular background IO completes. */ #define RESYNC_DEPTH 32 -static void device_barrier(conf_t *conf, sector_t sect) +static void raise_barrier(conf_t *conf) { spin_lock_irq(&conf->resync_lock); - wait_event_lock_irq(conf->wait_idle, !waitqueue_active(&conf->wait_resume), - conf->resync_lock, unplug_slaves(conf->mddev)); - - if (!conf->barrier++) { - wait_event_lock_irq(conf->wait_idle, !conf->nr_pending, - conf->resync_lock, unplug_slaves(conf->mddev)); - if (conf->nr_pending) - BUG(); + + /* Wait until no block IO is waiting */ + wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, + conf->resync_lock, + raid10_unplug(conf->mddev->queue)); + + /* block any new IO from starting */ + conf->barrier++; + + /* No wait for all pending IO to complete */ + wait_event_lock_irq(conf->wait_barrier, + !conf->nr_pending && conf->barrier < RESYNC_DEPTH, + conf->resync_lock, + raid10_unplug(conf->mddev->queue)); + + spin_unlock_irq(&conf->resync_lock); +} + +static void lower_barrier(conf_t *conf) +{ + unsigned long flags; + spin_lock_irqsave(&conf->resync_lock, flags); + conf->barrier--; + spin_unlock_irqrestore(&conf->resync_lock, flags); + wake_up(&conf->wait_barrier); +} + +static void wait_barrier(conf_t *conf) +{ + spin_lock_irq(&conf->resync_lock); + if (conf->barrier) { + conf->nr_waiting++; + wait_event_lock_irq(conf->wait_barrier, !conf->barrier, + conf->resync_lock, + raid10_unplug(conf->mddev->queue)); + conf->nr_waiting--; } - wait_event_lock_irq(conf->wait_resume, conf->barrier < RESYNC_DEPTH, - conf->resync_lock, unplug_slaves(conf->mddev)); - conf->next_resync = sect; + conf->nr_pending++; spin_unlock_irq(&conf->resync_lock); } +static void allow_barrier(conf_t *conf) +{ + unsigned long flags; + spin_lock_irqsave(&conf->resync_lock, flags); + conf->nr_pending--; + spin_unlock_irqrestore(&conf->resync_lock, flags); + wake_up(&conf->wait_barrier); +} + static int make_request(request_queue_t *q, struct bio * bio) { mddev_t *mddev = q->queuedata; @@ -719,10 +755,7 @@ static int make_request(request_queue_t *q, struct bio * bio) * thread has put up a bar for new requests. * Continue immediately if no resync is active currently. */ - spin_lock_irq(&conf->resync_lock); - wait_event_lock_irq(conf->wait_resume, !conf->barrier, conf->resync_lock, ); - conf->nr_pending++; - spin_unlock_irq(&conf->resync_lock); + wait_barrier(conf); disk_stat_inc(mddev->gendisk, ios[rw]); disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); @@ -897,13 +930,8 @@ static void print_conf(conf_t *conf) static void close_sync(conf_t *conf) { - spin_lock_irq(&conf->resync_lock); - wait_event_lock_irq(conf->wait_resume, !conf->barrier, - conf->resync_lock, unplug_slaves(conf->mddev)); - spin_unlock_irq(&conf->resync_lock); - - if (conf->barrier) BUG(); - if (waitqueue_active(&conf->wait_idle)) BUG(); + wait_barrier(conf); + allow_barrier(conf); mempool_destroy(conf->r10buf_pool); conf->r10buf_pool = NULL; @@ -1395,9 +1423,10 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i * If there is non-resync activity waiting for us then * put in a delay to throttle resync. */ - if (!go_faster && waitqueue_active(&conf->wait_resume)) + if (!go_faster && conf->nr_waiting) msleep_interruptible(1000); - device_barrier(conf, sector_nr + RESYNC_SECTORS); + raise_barrier(conf); + conf->next_resync = sector_nr; /* Again, very different code for resync and recovery. * Both must result in an r10bio with a list of bios that @@ -1427,7 +1456,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); spin_lock_irq(&conf->resync_lock); - conf->nr_pending++; if (rb2) conf->barrier++; spin_unlock_irq(&conf->resync_lock); atomic_set(&r10_bio->remaining, 0); @@ -1500,10 +1528,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i int count = 0; r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); - spin_lock_irq(&conf->resync_lock); - conf->nr_pending++; - spin_unlock_irq(&conf->resync_lock); - r10_bio->mddev = mddev; atomic_set(&r10_bio->remaining, 0); @@ -1713,8 +1737,7 @@ static int run(mddev_t *mddev) INIT_LIST_HEAD(&conf->retry_list); spin_lock_init(&conf->resync_lock); - init_waitqueue_head(&conf->wait_idle); - init_waitqueue_head(&conf->wait_resume); + init_waitqueue_head(&conf->wait_barrier); /* need to check that every block has at least one working mirror */ if (!enough(conf)) { -- cgit v1.2.3 From 14f8d26b8ea3413b28f2cac208c9a93600fe3a80 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:14 -0800 Subject: [PATCH] md: small cleanups for raid5 Resync code: A test that isn't needed, a 'compute_block' that makes more sense elsewhere (And then doesn't need a test), a couple of BUG_ONs to confirm the change makes sense. Printks: A few were missing KERN_* Also fix a typo in a comment.. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid5.c | 41 +++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index fafc4bc045f7..334ff7a07283 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -417,7 +417,7 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done, set_bit(R5_UPTODATE, &sh->dev[i].flags); #endif if (test_bit(R5_ReadError, &sh->dev[i].flags)) { - printk("R5: read error corrected!!\n"); + printk(KERN_INFO "raid5: read error corrected!!\n"); clear_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReWrite, &sh->dev[i].flags); } @@ -428,13 +428,14 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done, clear_bit(R5_UPTODATE, &sh->dev[i].flags); atomic_inc(&conf->disks[i].rdev->read_errors); if (conf->mddev->degraded) - printk("R5: read error not correctable.\n"); + printk(KERN_WARNING "raid5: read error not correctable.\n"); else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) /* Oh, no!!! */ - printk("R5: read error NOT corrected!!\n"); + printk(KERN_WARNING "raid5: read error NOT corrected!!\n"); else if (atomic_read(&conf->disks[i].rdev->read_errors) > conf->max_nr_stripes) - printk("raid5: Too many read errors, failing device.\n"); + printk(KERN_WARNING + "raid5: Too many read errors, failing device.\n"); else retry = 1; if (retry) @@ -604,7 +605,7 @@ static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks, *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; break; default: - printk("raid5: unsupported algorithm %d\n", + printk(KERN_ERR "raid5: unsupported algorithm %d\n", conf->algorithm); } @@ -645,7 +646,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i) i -= (sh->pd_idx + 1); break; default: - printk("raid5: unsupported algorithm %d\n", + printk(KERN_ERR "raid5: unsupported algorithm %d\n", conf->algorithm); } @@ -654,7 +655,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i) check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf); if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) { - printk("compute_blocknr: map not correct\n"); + printk(KERN_ERR "compute_blocknr: map not correct\n"); return 0; } return r_sector; @@ -737,7 +738,7 @@ static void compute_block(struct stripe_head *sh, int dd_idx) if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) ptr[count++] = p; else - printk("compute_block() %d, stripe %llu, %d" + printk(KERN_ERR "compute_block() %d, stripe %llu, %d" " not present\n", dd_idx, (unsigned long long)sh->sector, i); @@ -1005,7 +1006,7 @@ static void handle_stripe(struct stripe_head *sh) if (dev->written) written++; rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */ if (!rdev || !test_bit(In_sync, &rdev->flags)) { - /* The ReadError flag wil just be confusing now */ + /* The ReadError flag will just be confusing now */ clear_bit(R5_ReadError, &dev->flags); clear_bit(R5_ReWrite, &dev->flags); } @@ -1288,7 +1289,7 @@ static void handle_stripe(struct stripe_head *sh) * is available */ if (syncing && locked == 0 && - !test_bit(STRIPE_INSYNC, &sh->state) && failed <= 1) { + !test_bit(STRIPE_INSYNC, &sh->state)) { set_bit(STRIPE_HANDLE, &sh->state); if (failed == 0) { char *pagea; @@ -1306,21 +1307,20 @@ static void handle_stripe(struct stripe_head *sh) if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) /* don't try to repair!! */ set_bit(STRIPE_INSYNC, &sh->state); + else { + compute_block(sh, sh->pd_idx); + uptodate++; + } } } if (!test_bit(STRIPE_INSYNC, &sh->state)) { + /* either failed parity check, or recovery is happening */ if (failed==0) failed_num = sh->pd_idx; - /* should be able to compute the missing block and write it to spare */ - if (!test_bit(R5_UPTODATE, &sh->dev[failed_num].flags)) { - if (uptodate+1 != disks) - BUG(); - compute_block(sh, failed_num); - uptodate++; - } - if (uptodate != disks) - BUG(); dev = &sh->dev[failed_num]; + BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); + BUG_ON(uptodate != disks); + set_bit(R5_LOCKED, &dev->flags); set_bit(R5_Wantwrite, &dev->flags); clear_bit(STRIPE_DEGRADED, &sh->state); @@ -1822,7 +1822,8 @@ static int run(mddev_t *mddev) struct list_head *tmp; if (mddev->level != 5 && mddev->level != 4) { - printk("raid5: %s: raid level not set to 4/5 (%d)\n", mdname(mddev), mddev->level); + printk(KERN_ERR "raid5: %s: raid level not set to 4/5 (%d)\n", + mdname(mddev), mddev->level); return -EIO; } -- cgit v1.2.3 From 6ff8d8ec06690f4011a6c3ad9e0759b9094f0601 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:15 -0800 Subject: [PATCH] md: allow dirty raid[456] arrays to be started at boot See patch to md.txt for more details Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 4 ++++ drivers/md/raid5.c | 15 +++++++++++---- drivers/md/raid6main.c | 13 +++++++++---- 3 files changed, 24 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 8175a2a222da..b4fb7247b3ed 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1937,6 +1937,7 @@ static void md_safemode_timeout(unsigned long data) md_wakeup_thread(mddev->thread); } +static int start_dirty_degraded; static int do_md_run(mddev_t * mddev) { @@ -2048,6 +2049,7 @@ static int do_md_run(mddev_t * mddev) mddev->recovery = 0; mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */ mddev->barriers_work = 1; + mddev->ok_start_degraded = start_dirty_degraded; if (start_readonly) mddev->ro = 2; /* read-only, but switch on first write */ @@ -4509,6 +4511,8 @@ static int set_ro(const char *val, struct kernel_param *kp) } module_param_call(start_ro, set_ro, get_ro, NULL, 0600); +module_param(start_dirty_degraded, int, 0644); + EXPORT_SYMBOL(register_md_personality); EXPORT_SYMBOL(unregister_md_personality); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 334ff7a07283..53a0f2ce76c8 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1904,10 +1904,17 @@ static int run(mddev_t *mddev) if (mddev->degraded == 1 && mddev->recovery_cp != MaxSector) { - printk(KERN_ERR - "raid5: cannot start dirty degraded array for %s\n", - mdname(mddev)); - goto abort; + if (mddev->ok_start_degraded) + printk(KERN_WARNING + "raid5: starting dirty degraded array: %s" + "- data corruption possible.\n", + mdname(mddev)); + else { + printk(KERN_ERR + "raid5: cannot start dirty degraded array for %s\n", + mdname(mddev)); + goto abort; + } } { diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index 0000d162d198..9ac6dcd55127 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c @@ -1929,13 +1929,18 @@ static int run(mddev_t *mddev) goto abort; } -#if 0 /* FIX: For now */ if (mddev->degraded > 0 && mddev->recovery_cp != MaxSector) { - printk(KERN_ERR "raid6: cannot start dirty degraded array for %s\n", mdname(mddev)); - goto abort; + if (mddev->ok_start_degraded) + printk(KERN_WARNING "raid6: starting dirty degraded array:%s" + "- data corruption possible.\n", + mdname(mddev)); + else { + printk(KERN_ERR "raid6: cannot start dirty degraded array" + " for %s\n", mdname(mddev)); + goto abort; + } } -#endif { mddev->thread = md_register_thread(raid6d, mddev, "%s_raid6"); -- cgit v1.2.3 From b15c2e57f0f5bf596a19e9c5571e5b07cdfc7363 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:16 -0800 Subject: [PATCH] md: move bitmap_create to after md array has been initialised This is important because bitmap_create uses mddev->resync_max_sectors and that doesn't have a valid value until after the array has been initialised (with pers->run()). [It doesn't make a difference for current personalities that support bitmaps, but will make a difference for raid10] This has the added advantage of meaning with can move the thread->timeout manipulation inside the bitmap.c code instead of sprinkling identical code throughout all personalities. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/bitmap.c | 4 ++++ drivers/md/md.c | 16 +++++++++------- drivers/md/raid1.c | 8 -------- drivers/md/raid5.c | 11 +---------- drivers/md/raid6main.c | 11 +---------- 5 files changed, 15 insertions(+), 35 deletions(-) (limited to 'drivers') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 252d55df9642..b65c36d9e240 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1530,6 +1530,8 @@ void bitmap_destroy(mddev_t *mddev) return; mddev->bitmap = NULL; /* disconnect from the md device */ + if (mddev->thread) + mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; bitmap_free(bitmap); } @@ -1636,6 +1638,8 @@ int bitmap_create(mddev_t *mddev) if (IS_ERR(bitmap->writeback_daemon)) return PTR_ERR(bitmap->writeback_daemon); + mddev->thread->timeout = bitmap->daemon_sleep * HZ; + return bitmap_update_sb(bitmap); error: diff --git a/drivers/md/md.c b/drivers/md/md.c index b4fb7247b3ed..ee199d462520 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2054,13 +2054,15 @@ static int do_md_run(mddev_t * mddev) if (start_readonly) mddev->ro = 2; /* read-only, but switch on first write */ - /* before we start the array running, initialise the bitmap */ - err = bitmap_create(mddev); - if (err) - printk(KERN_ERR "%s: failed to create bitmap (%d)\n", - mdname(mddev), err); - else - err = mddev->pers->run(mddev); + err = mddev->pers->run(mddev); + if (!err && mddev->pers->sync_request) { + err = bitmap_create(mddev); + if (err) { + printk(KERN_ERR "%s: failed to create bitmap (%d)\n", + mdname(mddev), err); + mddev->pers->stop(mddev); + } + } if (err) { printk(KERN_ERR "md: pers->run() failed ...\n"); module_put(mddev->pers->owner); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index f5204149ab65..c618015f07f6 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1611,7 +1611,6 @@ static int run(mddev_t *mddev) mdname(mddev)); goto out_free_conf; } - if (mddev->bitmap) mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; printk(KERN_INFO "raid1: raid set %s active with %d out of %d mirrors\n", @@ -1783,13 +1782,6 @@ static void raid1_quiesce(mddev_t *mddev, int state) lower_barrier(conf); break; } - if (mddev->thread) { - if (mddev->bitmap) - mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; - else - mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; - md_wakeup_thread(mddev->thread); - } } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 53a0f2ce76c8..0d016a844ec6 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1964,9 +1964,6 @@ static int run(mddev_t *mddev) /* Ok, everything is just fine now */ sysfs_create_group(&mddev->kobj, &raid5_attrs_group); - if (mddev->bitmap) - mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; - mddev->queue->unplug_fn = raid5_unplug_device; mddev->queue->issue_flush_fn = raid5_issue_flush; @@ -2200,14 +2197,8 @@ static void raid5_quiesce(mddev_t *mddev, int state) spin_unlock_irq(&conf->device_lock); break; } - if (mddev->thread) { - if (mddev->bitmap) - mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; - else - mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; - md_wakeup_thread(mddev->thread); - } } + static mdk_personality_t raid5_personality= { .name = "raid5", diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index 9ac6dcd55127..304455d236f9 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c @@ -1990,9 +1990,6 @@ static int run(mddev_t *mddev) /* Ok, everything is just fine now */ mddev->array_size = mddev->size * (mddev->raid_disks - 2); - if (mddev->bitmap) - mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; - mddev->queue->unplug_fn = raid6_unplug_device; mddev->queue->issue_flush_fn = raid6_issue_flush; return 0; @@ -2228,14 +2225,8 @@ static void raid6_quiesce(mddev_t *mddev, int state) spin_unlock_irq(&conf->device_lock); break; } - if (mddev->thread) { - if (mddev->bitmap) - mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; - else - mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; - md_wakeup_thread(mddev->thread); - } } + static mdk_personality_t raid6_personality= { .name = "raid6", -- cgit v1.2.3 From 6cce3b23f6f8e974c00af7a9b88f1d413ba368a8 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:16 -0800 Subject: [PATCH] md: write intent bitmap support for raid10 Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 10 +-- drivers/md/raid10.c | 178 +++++++++++++++++++++++++++++++++++++++++++++------- 2 files changed, 163 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index ee199d462520..64e7da3701a5 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -714,9 +714,10 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) if (sb->state & (1<bitmap_file == NULL) { - if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6) { + if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6 + && mddev->level != 10) { /* FIXME use a better test */ - printk(KERN_WARNING "md: bitmaps only support for raid1\n"); + printk(KERN_WARNING "md: bitmaps not supported for this level.\n"); return -EINVAL; } mddev->bitmap_offset = mddev->default_bitmap_offset; @@ -1037,8 +1038,9 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && mddev->bitmap_file == NULL ) { - if (mddev->level != 1) { - printk(KERN_WARNING "md: bitmaps only supported for raid1\n"); + if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6 + && mddev->level != 10) { + printk(KERN_WARNING "md: bitmaps not supported for this level.\n"); return -EINVAL; } mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 50bd7b152f28..8f58a447d9f0 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -18,7 +18,9 @@ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include "dm-bio-list.h" #include +#include /* * RAID10 provides a combination of RAID0 and RAID1 functionality. @@ -306,9 +308,11 @@ static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, in /* * this branch is our 'one mirror IO has finished' event handler: */ - if (!uptodate) + if (!uptodate) { md_error(r10_bio->mddev, conf->mirrors[dev].rdev); - else + /* an I/O failed, we can't clear the bitmap */ + set_bit(R10BIO_Degraded, &r10_bio->state); + } else /* * Set R10BIO_Uptodate in our master bio, so that * we will return a good error code for to the higher @@ -328,6 +332,11 @@ static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, in * already. */ if (atomic_dec_and_test(&r10_bio->remaining)) { + /* clear the bitmap if all writes complete successfully */ + bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, + r10_bio->sectors, + !test_bit(R10BIO_Degraded, &r10_bio->state), + 0); md_write_end(r10_bio->mddev); raid_end_bio_io(r10_bio); } @@ -486,8 +495,9 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) rcu_read_lock(); /* * Check if we can balance. We can balance on the whole - * device if no resync is going on, or below the resync window. - * We take the first readable disk when above the resync window. + * device if no resync is going on (recovery is ok), or below + * the resync window. We take the first readable disk when + * above the resync window. */ if (conf->mddev->recovery_cp < MaxSector && (this_sector + sectors >= conf->next_resync)) { @@ -591,7 +601,10 @@ static void unplug_slaves(mddev_t *mddev) static void raid10_unplug(request_queue_t *q) { + mddev_t *mddev = q->queuedata; + unplug_slaves(q->queuedata); + md_wakeup_thread(mddev->thread); } static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk, @@ -647,12 +660,13 @@ static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk, */ #define RESYNC_DEPTH 32 -static void raise_barrier(conf_t *conf) +static void raise_barrier(conf_t *conf, int force) { + BUG_ON(force && !conf->barrier); spin_lock_irq(&conf->resync_lock); - /* Wait until no block IO is waiting */ - wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, + /* Wait until no block IO is waiting (unless 'force') */ + wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, conf->resync_lock, raid10_unplug(conf->mddev->queue)); @@ -710,6 +724,8 @@ static int make_request(request_queue_t *q, struct bio * bio) int i; int chunk_sects = conf->chunk_mask + 1; const int rw = bio_data_dir(bio); + struct bio_list bl; + unsigned long flags; if (unlikely(bio_barrier(bio))) { bio_endio(bio, bio->bi_size, -EOPNOTSUPP); @@ -767,6 +783,7 @@ static int make_request(request_queue_t *q, struct bio * bio) r10_bio->mddev = mddev; r10_bio->sector = bio->bi_sector; + r10_bio->state = 0; if (rw == READ) { /* @@ -811,13 +828,16 @@ static int make_request(request_queue_t *q, struct bio * bio) !test_bit(Faulty, &rdev->flags)) { atomic_inc(&rdev->nr_pending); r10_bio->devs[i].bio = bio; - } else + } else { r10_bio->devs[i].bio = NULL; + set_bit(R10BIO_Degraded, &r10_bio->state); + } } rcu_read_unlock(); - atomic_set(&r10_bio->remaining, 1); + atomic_set(&r10_bio->remaining, 0); + bio_list_init(&bl); for (i = 0; i < conf->copies; i++) { struct bio *mbio; int d = r10_bio->devs[i].devnum; @@ -835,13 +855,14 @@ static int make_request(request_queue_t *q, struct bio * bio) mbio->bi_private = r10_bio; atomic_inc(&r10_bio->remaining); - generic_make_request(mbio); + bio_list_add(&bl, mbio); } - if (atomic_dec_and_test(&r10_bio->remaining)) { - md_write_end(mddev); - raid_end_bio_io(r10_bio); - } + bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0); + spin_lock_irqsave(&conf->device_lock, flags); + bio_list_merge(&conf->pending_bio_list, &bl); + blk_plug_device(mddev->queue); + spin_unlock_irqrestore(&conf->device_lock, flags); return 0; } @@ -999,7 +1020,12 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) if (!enough(conf)) return 0; - for (mirror=0; mirror < mddev->raid_disks; mirror++) + if (rdev->saved_raid_disk >= 0 && + conf->mirrors[rdev->saved_raid_disk].rdev == NULL) + mirror = rdev->saved_raid_disk; + else + mirror = 0; + for ( ; mirror < mddev->raid_disks; mirror++) if ( !(p=conf->mirrors+mirror)->rdev) { blk_queue_stack_limits(mddev->queue, @@ -1015,6 +1041,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) p->head_position = 0; rdev->raid_disk = mirror; found = 1; + if (rdev->saved_raid_disk != mirror) + conf->fullsync = 1; rcu_assign_pointer(p->rdev, rdev); break; } @@ -1282,6 +1310,26 @@ static void raid10d(mddev_t *mddev) for (;;) { char b[BDEVNAME_SIZE]; spin_lock_irqsave(&conf->device_lock, flags); + + if (conf->pending_bio_list.head) { + bio = bio_list_get(&conf->pending_bio_list); + blk_remove_plug(mddev->queue); + spin_unlock_irqrestore(&conf->device_lock, flags); + /* flush any pending bitmap writes to disk before proceeding w/ I/O */ + if (bitmap_unplug(mddev->bitmap) != 0) + printk("%s: bitmap file write failed!\n", mdname(mddev)); + + while (bio) { /* submit pending writes */ + struct bio *next = bio->bi_next; + bio->bi_next = NULL; + generic_make_request(bio); + bio = next; + } + unplug = 1; + + continue; + } + if (list_empty(head)) break; r10_bio = list_entry(head->prev, r10bio_t, retry_list); @@ -1388,6 +1436,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i sector_t max_sector, nr_sectors; int disk; int i; + int max_sync; + int sync_blocks; sector_t sectors_skipped = 0; int chunks_skipped = 0; @@ -1401,6 +1451,29 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) max_sector = mddev->resync_max_sectors; if (sector_nr >= max_sector) { + /* If we aborted, we need to abort the + * sync on the 'current' bitmap chucks (there can + * be several when recovering multiple devices). + * as we may have started syncing it but not finished. + * We can find the current address in + * mddev->curr_resync, but for recovery, + * we need to convert that to several + * virtual addresses. + */ + if (mddev->curr_resync < max_sector) { /* aborted */ + if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) + bitmap_end_sync(mddev->bitmap, mddev->curr_resync, + &sync_blocks, 1); + else for (i=0; iraid_disks; i++) { + sector_t sect = + raid10_find_virt(conf, mddev->curr_resync, i); + bitmap_end_sync(mddev->bitmap, sect, + &sync_blocks, 1); + } + } else /* completed sync */ + conf->fullsync = 0; + + bitmap_close_sync(mddev->bitmap); close_sync(conf); *skipped = 1; return sectors_skipped; @@ -1425,8 +1498,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i */ if (!go_faster && conf->nr_waiting) msleep_interruptible(1000); - raise_barrier(conf); - conf->next_resync = sector_nr; /* Again, very different code for resync and recovery. * Both must result in an r10bio with a list of bios that @@ -1443,6 +1514,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i * end_sync_write if we will want to write. */ + max_sync = RESYNC_PAGES << (PAGE_SHIFT-9); if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { /* recovery... the complicated one */ int i, j, k; @@ -1451,13 +1523,29 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i for (i=0 ; iraid_disks; i++) if (conf->mirrors[i].rdev && !test_bit(In_sync, &conf->mirrors[i].rdev->flags)) { + int still_degraded = 0; /* want to reconstruct this device */ r10bio_t *rb2 = r10_bio; + sector_t sect = raid10_find_virt(conf, sector_nr, i); + int must_sync; + /* Unless we are doing a full sync, we only need + * to recover the block if it is set in the bitmap + */ + must_sync = bitmap_start_sync(mddev->bitmap, sect, + &sync_blocks, 1); + if (sync_blocks < max_sync) + max_sync = sync_blocks; + if (!must_sync && + !conf->fullsync) { + /* yep, skip the sync_blocks here, but don't assume + * that there will never be anything to do here + */ + chunks_skipped = -1; + continue; + } r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); - spin_lock_irq(&conf->resync_lock); - if (rb2) conf->barrier++; - spin_unlock_irq(&conf->resync_lock); + raise_barrier(conf, rb2 != NULL); atomic_set(&r10_bio->remaining, 0); r10_bio->master_bio = (struct bio*)rb2; @@ -1465,8 +1553,21 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i atomic_inc(&rb2->remaining); r10_bio->mddev = mddev; set_bit(R10BIO_IsRecover, &r10_bio->state); - r10_bio->sector = raid10_find_virt(conf, sector_nr, i); + r10_bio->sector = sect; + raid10_find_phys(conf, r10_bio); + /* Need to check if this section will still be + * degraded + */ + for (j=0; jcopies;j++) { + int d = r10_bio->devs[j].devnum; + if (conf->mirrors[d].rdev == NULL || + test_bit(Faulty, &conf->mirrors[d].rdev->flags)) + still_degraded = 1; + } + must_sync = bitmap_start_sync(mddev->bitmap, sect, + &sync_blocks, still_degraded); + for (j=0; jcopies;j++) { int d = r10_bio->devs[j].devnum; if (conf->mirrors[d].rdev && @@ -1526,10 +1627,22 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i } else { /* resync. Schedule a read for every block at this virt offset */ int count = 0; + + if (!bitmap_start_sync(mddev->bitmap, sector_nr, + &sync_blocks, mddev->degraded) && + !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { + /* We can skip this block */ + *skipped = 1; + return sync_blocks + sectors_skipped; + } + if (sync_blocks < max_sync) + max_sync = sync_blocks; r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); r10_bio->mddev = mddev; atomic_set(&r10_bio->remaining, 0); + raise_barrier(conf, 0); + conf->next_resync = sector_nr; r10_bio->master_bio = NULL; r10_bio->sector = sector_nr; @@ -1582,6 +1695,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i } nr_sectors = 0; + if (sector_nr + max_sync < max_sector) + max_sector = sector_nr + max_sync; do { struct page *page; int len = PAGE_SIZE; @@ -1821,6 +1936,26 @@ static int stop(mddev_t *mddev) return 0; } +static void raid10_quiesce(mddev_t *mddev, int state) +{ + conf_t *conf = mddev_to_conf(mddev); + + switch(state) { + case 1: + raise_barrier(conf, 0); + break; + case 0: + lower_barrier(conf); + break; + } + if (mddev->thread) { + if (mddev->bitmap) + mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; + else + mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; + md_wakeup_thread(mddev->thread); + } +} static mdk_personality_t raid10_personality = { @@ -1835,6 +1970,7 @@ static mdk_personality_t raid10_personality = .hot_remove_disk= raid10_remove_disk, .spare_active = raid10_spare_active, .sync_request = sync_request, + .quiesce = raid10_quiesce, }; static int __init raid_init(void) -- cgit v1.2.3 From ca65b73bd9c301d243df93780f7b26579e6c9204 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:17 -0800 Subject: [PATCH] md: fix raid6 resync check/repair code raid6 currently does not check the P/Q syndromes when doing a resync, it just calculates the correct value and writes it. Doing the check can reduce writes (often to 0) for a resync, and it is needed to properly implement the echo check > sync_action operation. This patch implements the appropriate checks and tidies up some related code. It also allows raid6 user-requested resync to bypass the intent bitmap. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid6main.c | 182 ++++++++++++++++++++++++++++--------------------- 1 file changed, 106 insertions(+), 76 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index 304455d236f9..52e8796bb8ac 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c @@ -805,7 +805,7 @@ static void compute_parity(struct stripe_head *sh, int method) } /* Compute one missing block */ -static void compute_block_1(struct stripe_head *sh, int dd_idx) +static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero) { raid6_conf_t *conf = sh->raid_conf; int i, count, disks = conf->raid_disks; @@ -821,7 +821,7 @@ static void compute_block_1(struct stripe_head *sh, int dd_idx) compute_parity(sh, UPDATE_PARITY); } else { ptr[0] = page_address(sh->dev[dd_idx].page); - memset(ptr[0], 0, STRIPE_SIZE); + if (!nozero) memset(ptr[0], 0, STRIPE_SIZE); count = 1; for (i = disks ; i--; ) { if (i == dd_idx || i == qd_idx) @@ -838,7 +838,8 @@ static void compute_block_1(struct stripe_head *sh, int dd_idx) } if (count != 1) xor_block(count, STRIPE_SIZE, ptr); - set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); + if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); + else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); } } @@ -871,7 +872,7 @@ static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2) return; } else { /* We're missing D+Q; recompute D from P */ - compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1); + compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1, 0); compute_parity(sh, UPDATE_PARITY); /* Is this necessary? */ return; } @@ -982,6 +983,12 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in } +static int page_is_zero(struct page *p) +{ + char *a = page_address(p); + return ((*(u32*)a) == 0 && + memcmp(a, a+4, STRIPE_SIZE-4)==0); +} /* * handle_stripe - do things to a stripe. * @@ -1000,7 +1007,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in * */ -static void handle_stripe(struct stripe_head *sh) +static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) { raid6_conf_t *conf = sh->raid_conf; int disks = conf->raid_disks; @@ -1228,7 +1235,7 @@ static void handle_stripe(struct stripe_head *sh) if (uptodate == disks-1) { PRINTK("Computing stripe %llu block %d\n", (unsigned long long)sh->sector, i); - compute_block_1(sh, i); + compute_block_1(sh, i, 0); uptodate++; } else if ( uptodate == disks-2 && failed >= 2 ) { /* Computing 2-failure is *very* expensive; only do it if failed >= 2 */ @@ -1323,7 +1330,7 @@ static void handle_stripe(struct stripe_head *sh) /* We have failed blocks and need to compute them */ switch ( failed ) { case 0: BUG(); - case 1: compute_block_1(sh, failed_num[0]); break; + case 1: compute_block_1(sh, failed_num[0], 0); break; case 2: compute_block_2(sh, failed_num[0], failed_num[1]); break; default: BUG(); /* This request should have been failed? */ } @@ -1338,12 +1345,10 @@ static void handle_stripe(struct stripe_head *sh) (unsigned long long)sh->sector, i); locked++; set_bit(R5_Wantwrite, &sh->dev[i].flags); -#if 0 /**** FIX: I don't understand the logic here... ****/ - if (!test_bit(R5_Insync, &sh->dev[i].flags) - || ((i==pd_idx || i==qd_idx) && failed == 0)) /* FIX? */ - set_bit(STRIPE_INSYNC, &sh->state); -#endif } + /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */ + set_bit(STRIPE_INSYNC, &sh->state); + if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { atomic_dec(&conf->preread_active_stripes); if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) @@ -1356,79 +1361,97 @@ static void handle_stripe(struct stripe_head *sh) * Any reads will already have been scheduled, so we just see if enough data * is available */ - if (syncing && locked == 0 && - !test_bit(STRIPE_INSYNC, &sh->state) && failed <= 2) { - set_bit(STRIPE_HANDLE, &sh->state); -#if 0 /* RAID-6: Don't support CHECK PARITY yet */ - if (failed == 0) { - char *pagea; - if (uptodate != disks) - BUG(); - compute_parity(sh, CHECK_PARITY); - uptodate--; - pagea = page_address(sh->dev[pd_idx].page); - if ((*(u32*)pagea) == 0 && - !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) { - /* parity is correct (on disc, not in buffer any more) */ - set_bit(STRIPE_INSYNC, &sh->state); - } - } -#endif - if (!test_bit(STRIPE_INSYNC, &sh->state)) { - int failed_needupdate[2]; - struct r5dev *adev, *bdev; - - if ( failed < 1 ) - failed_num[0] = pd_idx; - if ( failed < 2 ) - failed_num[1] = (failed_num[0] == qd_idx) ? pd_idx : qd_idx; + if (syncing && locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) { + int update_p = 0, update_q = 0; + struct r5dev *dev; - failed_needupdate[0] = !test_bit(R5_UPTODATE, &sh->dev[failed_num[0]].flags); - failed_needupdate[1] = !test_bit(R5_UPTODATE, &sh->dev[failed_num[1]].flags); + set_bit(STRIPE_HANDLE, &sh->state); - PRINTK("sync: failed=%d num=%d,%d fnu=%u%u\n", - failed, failed_num[0], failed_num[1], failed_needupdate[0], failed_needupdate[1]); + BUG_ON(failed>2); + BUG_ON(uptodate < disks); + /* Want to check and possibly repair P and Q. + * However there could be one 'failed' device, in which + * case we can only check one of them, possibly using the + * other to generate missing data + */ -#if 0 /* RAID-6: This code seems to require that CHECK_PARITY destroys the uptodateness of the parity */ - /* should be able to compute the missing block(s) and write to spare */ - if ( failed_needupdate[0] ^ failed_needupdate[1] ) { - if (uptodate+1 != disks) - BUG(); - compute_block_1(sh, failed_needupdate[0] ? failed_num[0] : failed_num[1]); - uptodate++; - } else if ( failed_needupdate[0] & failed_needupdate[1] ) { - if (uptodate+2 != disks) - BUG(); - compute_block_2(sh, failed_num[0], failed_num[1]); - uptodate += 2; + /* If !tmp_page, we cannot do the calculations, + * but as we have set STRIPE_HANDLE, we will soon be called + * by stripe_handle with a tmp_page - just wait until then. + */ + if (tmp_page) { + if (failed == q_failed) { + /* The only possible failed device holds 'Q', so it makes + * sense to check P (If anything else were failed, we would + * have used P to recreate it). + */ + compute_block_1(sh, pd_idx, 1); + if (!page_is_zero(sh->dev[pd_idx].page)) { + compute_block_1(sh,pd_idx,0); + update_p = 1; + } + } + if (!q_failed && failed < 2) { + /* q is not failed, and we didn't use it to generate + * anything, so it makes sense to check it + */ + memcpy(page_address(tmp_page), + page_address(sh->dev[qd_idx].page), + STRIPE_SIZE); + compute_parity(sh, UPDATE_PARITY); + if (memcmp(page_address(tmp_page), + page_address(sh->dev[qd_idx].page), + STRIPE_SIZE)!= 0) { + clear_bit(STRIPE_INSYNC, &sh->state); + update_q = 1; + } + } + if (update_p || update_q) { + conf->mddev->resync_mismatches += STRIPE_SECTORS; + if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) + /* don't try to repair!! */ + update_p = update_q = 0; } -#else - compute_block_2(sh, failed_num[0], failed_num[1]); - uptodate += failed_needupdate[0] + failed_needupdate[1]; -#endif - if (uptodate != disks) - BUG(); + /* now write out any block on a failed drive, + * or P or Q if they need it + */ - PRINTK("Marking for sync stripe %llu blocks %d,%d\n", - (unsigned long long)sh->sector, failed_num[0], failed_num[1]); + if (failed == 2) { + dev = &sh->dev[failed_num[1]]; + locked++; + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantwrite, &dev->flags); + set_bit(R5_Syncio, &dev->flags); + } + if (failed >= 1) { + dev = &sh->dev[failed_num[0]]; + locked++; + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantwrite, &dev->flags); + set_bit(R5_Syncio, &dev->flags); + } - /**** FIX: Should we really do both of these unconditionally? ****/ - adev = &sh->dev[failed_num[0]]; - locked += !test_bit(R5_LOCKED, &adev->flags); - set_bit(R5_LOCKED, &adev->flags); - set_bit(R5_Wantwrite, &adev->flags); - bdev = &sh->dev[failed_num[1]]; - locked += !test_bit(R5_LOCKED, &bdev->flags); - set_bit(R5_LOCKED, &bdev->flags); + if (update_p) { + dev = &sh->dev[pd_idx]; + locked ++; + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantwrite, &dev->flags); + set_bit(R5_Syncio, &dev->flags); + } + if (update_q) { + dev = &sh->dev[qd_idx]; + locked++; + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantwrite, &dev->flags); + set_bit(R5_Syncio, &dev->flags); + } clear_bit(STRIPE_DEGRADED, &sh->state); - set_bit(R5_Wantwrite, &bdev->flags); set_bit(STRIPE_INSYNC, &sh->state); - set_bit(R5_Syncio, &adev->flags); - set_bit(R5_Syncio, &bdev->flags); } } + if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { md_done_sync(conf->mddev, STRIPE_SECTORS,1); clear_bit(STRIPE_SYNCING, &sh->state); @@ -1664,7 +1687,7 @@ static int make_request (request_queue_t *q, struct bio * bi) } finish_wait(&conf->wait_for_overlap, &w); raid6_plug_device(conf); - handle_stripe(sh); + handle_stripe(sh, NULL); release_stripe(sh); } else { /* cannot get stripe for read-ahead, just give-up */ @@ -1728,6 +1751,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i return rv; } if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && + !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && !conf->fullsync && sync_blocks >= STRIPE_SECTORS) { /* we can skip this block, and probably more */ sync_blocks /= STRIPE_SECTORS; @@ -1765,7 +1789,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i clear_bit(STRIPE_INSYNC, &sh->state); spin_unlock(&sh->lock); - handle_stripe(sh); + handle_stripe(sh, NULL); release_stripe(sh); return STRIPE_SECTORS; @@ -1821,7 +1845,7 @@ static void raid6d (mddev_t *mddev) spin_unlock_irq(&conf->device_lock); handled++; - handle_stripe(sh); + handle_stripe(sh, conf->spare_page); release_stripe(sh); spin_lock_irq(&conf->device_lock); @@ -1860,6 +1884,10 @@ static int run(mddev_t *mddev) goto abort; memset(conf->stripe_hashtbl, 0, HASH_PAGES * PAGE_SIZE); + conf->spare_page = alloc_page(GFP_KERNEL); + if (!conf->spare_page) + goto abort; + spin_lock_init(&conf->device_lock); init_waitqueue_head(&conf->wait_for_stripe); init_waitqueue_head(&conf->wait_for_overlap); @@ -1996,6 +2024,8 @@ static int run(mddev_t *mddev) abort: if (conf) { print_raid6_conf(conf); + if (conf->spare_page) + page_cache_release(conf->spare_page); if (conf->stripe_hashtbl) free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER); -- cgit v1.2.3 From d69762e98456b71167865db9e33e732a28dd36ab Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:18 -0800 Subject: [PATCH] md: improve handing of read errors with raid6 This is a simple port of match functionality across from raid5. If we get a read error, we don't kick the drive straight away, but try to over-write with good data first. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid6main.c | 70 +++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 66 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index 52e8796bb8ac..7a51553d8be5 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c @@ -367,8 +367,8 @@ static void shrink_stripes(raid6_conf_t *conf) conf->slab_cache = NULL; } -static int raid6_end_read_request (struct bio * bi, unsigned int bytes_done, - int error) +static int raid6_end_read_request(struct bio * bi, unsigned int bytes_done, + int error) { struct stripe_head *sh = bi->bi_private; raid6_conf_t *conf = sh->raid_conf; @@ -420,9 +420,35 @@ static int raid6_end_read_request (struct bio * bi, unsigned int bytes_done, #else set_bit(R5_UPTODATE, &sh->dev[i].flags); #endif + if (test_bit(R5_ReadError, &sh->dev[i].flags)) { + printk(KERN_INFO "raid6: read error corrected!!\n"); + clear_bit(R5_ReadError, &sh->dev[i].flags); + clear_bit(R5_ReWrite, &sh->dev[i].flags); + } + if (atomic_read(&conf->disks[i].rdev->read_errors)) + atomic_set(&conf->disks[i].rdev->read_errors, 0); } else { - md_error(conf->mddev, conf->disks[i].rdev); + int retry = 0; clear_bit(R5_UPTODATE, &sh->dev[i].flags); + atomic_inc(&conf->disks[i].rdev->read_errors); + if (conf->mddev->degraded) + printk(KERN_WARNING "raid6: read error not correctable.\n"); + else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) + /* Oh, no!!! */ + printk(KERN_WARNING "raid6: read error NOT corrected!!\n"); + else if (atomic_read(&conf->disks[i].rdev->read_errors) + > conf->max_nr_stripes) + printk(KERN_WARNING + "raid6: Too many read errors, failing device.\n"); + else + retry = 1; + if (retry) + set_bit(R5_ReadError, &sh->dev[i].flags); + else { + clear_bit(R5_ReadError, &sh->dev[i].flags); + clear_bit(R5_ReWrite, &sh->dev[i].flags); + md_error(conf->mddev, conf->disks[i].rdev); + } } rdev_dec_pending(conf->disks[i].rdev, conf->mddev); #if 0 @@ -1079,6 +1105,12 @@ static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) if (dev->written) written++; rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */ if (!rdev || !test_bit(In_sync, &rdev->flags)) { + /* The ReadError flag will just be confusing now */ + clear_bit(R5_ReadError, &dev->flags); + clear_bit(R5_ReWrite, &dev->flags); + } + if (!rdev || !test_bit(In_sync, &rdev->flags) + || test_bit(R5_ReadError, &dev->flags)) { if ( failed < 2 ) failed_num[failed] = i; failed++; @@ -1095,6 +1127,14 @@ static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) if (failed > 2 && to_read+to_write+written) { for (i=disks; i--; ) { int bitmap_end = 0; + + if (test_bit(R5_ReadError, &sh->dev[i].flags)) { + mdk_rdev_t *rdev = conf->disks[i].rdev; + if (rdev && test_bit(In_sync, &rdev->flags)) + /* multiple read failures in one stripe */ + md_error(conf->mddev, rdev); + } + spin_lock_irq(&conf->device_lock); /* fail all writes first */ bi = sh->dev[i].towrite; @@ -1130,7 +1170,8 @@ static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) } /* fail any reads if this device is non-operational */ - if (!test_bit(R5_Insync, &sh->dev[i].flags)) { + if (!test_bit(R5_Insync, &sh->dev[i].flags) || + test_bit(R5_ReadError, &sh->dev[i].flags)) { bi = sh->dev[i].toread; sh->dev[i].toread = NULL; if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) @@ -1457,6 +1498,27 @@ static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) clear_bit(STRIPE_SYNCING, &sh->state); } + /* If the failed drives are just a ReadError, then we might need + * to progress the repair/check process + */ + if (failed <= 2 && ! conf->mddev->ro) + for (i=0; idev[failed_num[i]]; + if (test_bit(R5_ReadError, &dev->flags) + && !test_bit(R5_LOCKED, &dev->flags) + && test_bit(R5_UPTODATE, &dev->flags) + ) { + if (!test_bit(R5_ReWrite, &dev->flags)) { + set_bit(R5_Wantwrite, &dev->flags); + set_bit(R5_ReWrite, &dev->flags); + set_bit(R5_LOCKED, &dev->flags); + } else { + /* let's read it back */ + set_bit(R5_Wantread, &dev->flags); + set_bit(R5_LOCKED, &dev->flags); + } + } + } spin_unlock(&sh->lock); while ((bi=return_bi)) { -- cgit v1.2.3 From ddaf22abaa831763e75775e6d4c7693504237997 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:19 -0800 Subject: [PATCH] md: attempt to auto-correct read errors in raid1 On a read-error we suspend the array, then synchronously read the block from other arrays until we find one where we can read it. Then we try writing the good data back everywhere and make sure it works. If any write or subsequent read fails, only then do we fail the device out of the array. To be able to suspend the array, we need to also keep track of how many requests are queued for handling by raid1d. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 1 + drivers/md/raid1.c | 115 ++++++++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 106 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 64e7da3701a5..1364a1c97e6f 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -461,6 +461,7 @@ int sync_page_io(struct block_device *bdev, sector_t sector, int size, bio_put(bio); return ret; } +EXPORT_SYMBOL(sync_page_io); static int read_disk_sb(mdk_rdev_t * rdev, int size) { diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index c618015f07f6..b3856db8d6c2 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -191,6 +191,7 @@ static void reschedule_retry(r1bio_t *r1_bio) spin_lock_irqsave(&conf->device_lock, flags); list_add(&r1_bio->retry_list, &conf->retry_list); + conf->nr_queued ++; spin_unlock_irqrestore(&conf->device_lock, flags); wake_up(&conf->wait_barrier); @@ -245,9 +246,9 @@ static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int /* * this branch is our 'one mirror IO has finished' event handler: */ - if (!uptodate) - md_error(r1_bio->mddev, conf->mirrors[mirror].rdev); - else + update_head_pos(mirror, r1_bio); + + if (uptodate || conf->working_disks <= 1) { /* * Set R1BIO_Uptodate in our master bio, so that * we will return a good error code for to the higher @@ -259,14 +260,8 @@ static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int */ set_bit(R1BIO_Uptodate, &r1_bio->state); - update_head_pos(mirror, r1_bio); - - /* - * we have only one bio on the read side - */ - if (uptodate) raid_end_bio_io(r1_bio); - else { + } else { /* * oops, read error: */ @@ -653,6 +648,32 @@ static void allow_barrier(conf_t *conf) wake_up(&conf->wait_barrier); } +static void freeze_array(conf_t *conf) +{ + /* stop syncio and normal IO and wait for everything to + * go quite. + * We increment barrier and nr_waiting, and then + * wait until barrier+nr_pending match nr_queued+2 + */ + spin_lock_irq(&conf->resync_lock); + conf->barrier++; + conf->nr_waiting++; + wait_event_lock_irq(conf->wait_barrier, + conf->barrier+conf->nr_pending == conf->nr_queued+2, + conf->resync_lock, + raid1_unplug(conf->mddev->queue)); + spin_unlock_irq(&conf->resync_lock); +} +static void unfreeze_array(conf_t *conf) +{ + /* reverse the effect of the freeze */ + spin_lock_irq(&conf->resync_lock); + conf->barrier--; + conf->nr_waiting--; + wake_up(&conf->wait_barrier); + spin_unlock_irq(&conf->resync_lock); +} + /* duplicate the data pages for behind I/O */ static struct page **alloc_behind_pages(struct bio *bio) @@ -1196,6 +1217,7 @@ static void raid1d(mddev_t *mddev) break; r1_bio = list_entry(head->prev, r1bio_t, retry_list); list_del(head->prev); + conf->nr_queued--; spin_unlock_irqrestore(&conf->device_lock, flags); mddev = r1_bio->mddev; @@ -1235,6 +1257,74 @@ static void raid1d(mddev_t *mddev) } } else { int disk; + + /* we got a read error. Maybe the drive is bad. Maybe just + * the block and we can fix it. + * We freeze all other IO, and try reading the block from + * other devices. When we find one, we re-write + * and check it that fixes the read error. + * This is all done synchronously while the array is + * frozen + */ + sector_t sect = r1_bio->sector; + int sectors = r1_bio->sectors; + freeze_array(conf); + while(sectors) { + int s = sectors; + int d = r1_bio->read_disk; + int success = 0; + + if (s > (PAGE_SIZE>>9)) + s = PAGE_SIZE >> 9; + + do { + rdev = conf->mirrors[d].rdev; + if (rdev && + test_bit(In_sync, &rdev->flags) && + sync_page_io(rdev->bdev, + sect + rdev->data_offset, + s<<9, + conf->tmppage, READ)) + success = 1; + else { + d++; + if (d == conf->raid_disks) + d = 0; + } + } while (!success && d != r1_bio->read_disk); + + if (success) { + /* write it back and re-read */ + while (d != r1_bio->read_disk) { + if (d==0) + d = conf->raid_disks; + d--; + rdev = conf->mirrors[d].rdev; + if (rdev && + test_bit(In_sync, &rdev->flags)) { + if (sync_page_io(rdev->bdev, + sect + rdev->data_offset, + s<<9, conf->tmppage, WRITE) == 0 || + sync_page_io(rdev->bdev, + sect + rdev->data_offset, + s<<9, conf->tmppage, READ) == 0) { + /* Well, this device is dead */ + md_error(mddev, rdev); + } + } + } + } else { + /* Cannot read from anywhere -- bye bye array */ + md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); + break; + } + sectors -= s; + sect += s; + } + + + unfreeze_array(conf); + bio = r1_bio->bios[r1_bio->read_disk]; if ((disk=read_balance(conf, r1_bio)) == -1) { printk(KERN_ALERT "raid1: %s: unrecoverable I/O" @@ -1529,6 +1619,10 @@ static int run(mddev_t *mddev) memset(conf->mirrors, 0, sizeof(struct mirror_info)*mddev->raid_disks); + conf->tmppage = alloc_page(GFP_KERNEL); + if (!conf->tmppage) + goto out_no_mem; + conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL); if (!conf->poolinfo) goto out_no_mem; @@ -1635,6 +1729,7 @@ out_free_conf: if (conf->r1bio_pool) mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); + __free_page(conf->tmppage); kfree(conf->poolinfo); kfree(conf); mddev->private = NULL; -- cgit v1.2.3 From 3e198f7826f830604f3aa7c20359a773e70cbeaa Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:21 -0800 Subject: [PATCH] md: tidyup some issues with raid1 resync and prepare for catching read errors We are dereferencing ->rdev without an rcu lock! Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid1.c | 110 +++++++++++++++++++++++++++-------------------------- 1 file changed, 56 insertions(+), 54 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index b3856db8d6c2..ea1f1eb93c77 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -177,6 +177,13 @@ static inline void free_r1bio(r1bio_t *r1_bio) static inline void put_buf(r1bio_t *r1_bio) { conf_t *conf = mddev_to_conf(r1_bio->mddev); + int i; + + for (i=0; iraid_disks; i++) { + struct bio *bio = r1_bio->bios[i]; + if (bio->bi_end_io) + rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); + } mempool_free(r1_bio, conf->r1buf_pool); @@ -1085,7 +1092,6 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) conf->mirrors[r1_bio->read_disk].rdev); } else set_bit(R1BIO_Uptodate, &r1_bio->state); - rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev); reschedule_retry(r1_bio); return 0; } @@ -1116,7 +1122,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error) md_done_sync(mddev, r1_bio->sectors, uptodate); put_buf(r1_bio); } - rdev_dec_pending(conf->mirrors[mirror].rdev, mddev); return 0; } @@ -1153,10 +1158,14 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) atomic_set(&r1_bio->remaining, 1); for (i = 0; i < disks ; i++) { wbio = r1_bio->bios[i]; - if (wbio->bi_end_io != end_sync_write) + if (wbio->bi_end_io == NULL || + (wbio->bi_end_io == end_sync_read && + (i == r1_bio->read_disk || + !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) continue; - atomic_inc(&conf->mirrors[i].rdev->nr_pending); + wbio->bi_rw = WRITE; + wbio->bi_end_io = end_sync_write; atomic_inc(&r1_bio->remaining); md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9); @@ -1388,14 +1397,13 @@ static int init_resync(conf_t *conf) static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) { conf_t *conf = mddev_to_conf(mddev); - mirror_info_t *mirror; r1bio_t *r1_bio; struct bio *bio; sector_t max_sector, nr_sectors; - int disk; + int disk = -1; int i; - int wonly; - int write_targets = 0; + int wonly = -1; + int write_targets = 0, read_targets = 0; int sync_blocks; int still_degraded = 0; @@ -1447,44 +1455,24 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i conf->next_resync = sector_nr; + r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); + rcu_read_lock(); /* - * If reconstructing, and >1 working disc, - * could dedicate one to rebuild and others to - * service read requests .. + * If we get a correctably read error during resync or recovery, + * we might want to read from a different device. So we + * flag all drives that could conceivably be read from for READ, + * and any others (which will be non-In_sync devices) for WRITE. + * If a read fails, we try reading from something else for which READ + * is OK. */ - disk = conf->last_used; - /* make sure disk is operational */ - wonly = disk; - while (conf->mirrors[disk].rdev == NULL || - !test_bit(In_sync, &conf->mirrors[disk].rdev->flags) || - test_bit(WriteMostly, &conf->mirrors[disk].rdev->flags) - ) { - if (conf->mirrors[disk].rdev && - test_bit(In_sync, &conf->mirrors[disk].rdev->flags)) - wonly = disk; - if (disk <= 0) - disk = conf->raid_disks; - disk--; - if (disk == conf->last_used) { - disk = wonly; - break; - } - } - conf->last_used = disk; - atomic_inc(&conf->mirrors[disk].rdev->nr_pending); - - - mirror = conf->mirrors + disk; - - r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); r1_bio->mddev = mddev; r1_bio->sector = sector_nr; r1_bio->state = 0; set_bit(R1BIO_IsSync, &r1_bio->state); - r1_bio->read_disk = disk; for (i=0; i < conf->raid_disks; i++) { + mdk_rdev_t *rdev; bio = r1_bio->bios[i]; /* take from bio_init */ @@ -1499,35 +1487,49 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i bio->bi_end_io = NULL; bio->bi_private = NULL; - if (i == disk) { - bio->bi_rw = READ; - bio->bi_end_io = end_sync_read; - } else if (conf->mirrors[i].rdev == NULL || - test_bit(Faulty, &conf->mirrors[i].rdev->flags)) { + rdev = rcu_dereference(conf->mirrors[i].rdev); + if (rdev == NULL || + test_bit(Faulty, &rdev->flags)) { still_degraded = 1; continue; - } else if (!test_bit(In_sync, &conf->mirrors[i].rdev->flags) || - sector_nr + RESYNC_SECTORS > mddev->recovery_cp || - test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { + } else if (!test_bit(In_sync, &rdev->flags)) { bio->bi_rw = WRITE; bio->bi_end_io = end_sync_write; write_targets ++; - } else - /* no need to read or write here */ - continue; - bio->bi_sector = sector_nr + conf->mirrors[i].rdev->data_offset; - bio->bi_bdev = conf->mirrors[i].rdev->bdev; + } else { + /* may need to read from here */ + bio->bi_rw = READ; + bio->bi_end_io = end_sync_read; + if (test_bit(WriteMostly, &rdev->flags)) { + if (wonly < 0) + wonly = i; + } else { + if (disk < 0) + disk = i; + } + read_targets++; + } + atomic_inc(&rdev->nr_pending); + bio->bi_sector = sector_nr + rdev->data_offset; + bio->bi_bdev = rdev->bdev; bio->bi_private = r1_bio; } + rcu_read_unlock(); + if (disk < 0) + disk = wonly; + r1_bio->read_disk = disk; - if (write_targets == 0) { + if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0) + /* extra read targets are also write targets */ + write_targets += read_targets-1; + + if (write_targets == 0 || read_targets == 0) { /* There is nowhere to write, so all non-sync * drives must be failed - so we are finished */ sector_t rv = max_sector - sector_nr; *skipped = 1; put_buf(r1_bio); - rdev_dec_pending(conf->mirrors[disk].rdev, mddev); return rv; } @@ -1578,10 +1580,10 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i sync_blocks -= (len>>9); } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES); bio_full: - bio = r1_bio->bios[disk]; + bio = r1_bio->bios[r1_bio->read_disk]; r1_bio->sectors = nr_sectors; - md_sync_acct(mirror->rdev->bdev, nr_sectors); + md_sync_acct(conf->mirrors[r1_bio->read_disk].rdev->bdev, nr_sectors); generic_make_request(bio); -- cgit v1.2.3 From 69382e85371c232df71524137a806b9c210ec021 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:22 -0800 Subject: [PATCH] md: better handling for read error in raid1 during resync Handling of read errors during resync is separate from handling of read errors during normal IO in raid1. A previous patch added support for read errors during normal IO. This one adds support for read errors during resync or recovery. The key differences are that we don't need to freeze the array, because the normal handling of resync means that this part of the array will be idle except for resync, and the read/overwrite/re-read is needed in a separate piece of code. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid1.c | 99 ++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 78 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index ea1f1eb93c77..14a8fe0349c7 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1072,9 +1072,7 @@ abort: static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) { - int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); - conf_t *conf = mddev_to_conf(r1_bio->mddev); if (bio->bi_size) return 1; @@ -1087,10 +1085,7 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) * or re-read if the read failed. * We don't do much here, just schedule handling by raid1d */ - if (!uptodate) { - md_error(r1_bio->mddev, - conf->mirrors[r1_bio->read_disk].rdev); - } else + if (test_bit(BIO_UPTODATE, &bio->bi_flags)) set_bit(R1BIO_Uptodate, &r1_bio->state); reschedule_retry(r1_bio); return 0; @@ -1134,27 +1129,89 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) bio = r1_bio->bios[r1_bio->read_disk]; -/* - if (r1_bio->sector == 0) printk("First sync write startss\n"); -*/ + /* * schedule writes */ if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) { - /* - * There is no point trying a read-for-reconstruct as - * reconstruct is about to be aborted + /* ouch - failed to read all of that. + * Try some synchronous reads of other devices to get + * good data, much like with normal read errors. Only + * read into the pages we already have so they we don't + * need to re-issue the read request. + * We don't need to freeze the array, because being in an + * active sync request, there is no normal IO, and + * no overlapping syncs. */ - char b[BDEVNAME_SIZE]; - printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error" - " for block %llu\n", - bdevname(bio->bi_bdev,b), - (unsigned long long)r1_bio->sector); - md_done_sync(mddev, r1_bio->sectors, 0); - put_buf(r1_bio); - return; + sector_t sect = r1_bio->sector; + int sectors = r1_bio->sectors; + int idx = 0; + + while(sectors) { + int s = sectors; + int d = r1_bio->read_disk; + int success = 0; + mdk_rdev_t *rdev; + + if (s > (PAGE_SIZE>>9)) + s = PAGE_SIZE >> 9; + do { + if (r1_bio->bios[d]->bi_end_io == end_sync_read) { + rdev = conf->mirrors[d].rdev; + if (sync_page_io(rdev->bdev, + sect + rdev->data_offset, + s<<9, + bio->bi_io_vec[idx].bv_page, + READ)) { + success = 1; + break; + } + } + d++; + if (d == conf->raid_disks) + d = 0; + } while (!success && d != r1_bio->read_disk); + + if (success) { + /* write it back and re-read */ + set_bit(R1BIO_Uptodate, &r1_bio->state); + while (d != r1_bio->read_disk) { + if (d == 0) + d = conf->raid_disks; + d--; + if (r1_bio->bios[d]->bi_end_io != end_sync_read) + continue; + rdev = conf->mirrors[d].rdev; + if (sync_page_io(rdev->bdev, + sect + rdev->data_offset, + s<<9, + bio->bi_io_vec[idx].bv_page, + WRITE) == 0 || + sync_page_io(rdev->bdev, + sect + rdev->data_offset, + s<<9, + bio->bi_io_vec[idx].bv_page, + READ) == 0) { + md_error(mddev, rdev); + } + } + } else { + char b[BDEVNAME_SIZE]; + /* Cannot read from anywhere, array is toast */ + md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); + printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error" + " for block %llu\n", + bdevname(bio->bi_bdev,b), + (unsigned long long)r1_bio->sector); + md_done_sync(mddev, r1_bio->sectors, 0); + put_buf(r1_bio); + return; + } + sectors -= s; + sect += s; + idx ++; + } } - atomic_set(&r1_bio->remaining, 1); for (i = 0; i < disks ; i++) { wbio = r1_bio->bios[i]; -- cgit v1.2.3 From cf30a473a02901fe4db37abc0b0fa26dd5ba3f72 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:23 -0800 Subject: [PATCH] md: handle errors when read-only Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid1.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 14a8fe0349c7..a8bc93d6ff63 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -154,7 +154,7 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio) for (i = 0; i < conf->raid_disks; i++) { struct bio **bio = r1_bio->bios + i; - if (*bio) + if (*bio && *bio != IO_BLOCKED) bio_put(*bio); *bio = NULL; } @@ -419,11 +419,13 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) new_disk = 0; for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev); + r1_bio->bios[new_disk] == IO_BLOCKED || !rdev || !test_bit(In_sync, &rdev->flags) || test_bit(WriteMostly, &rdev->flags); rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) { - if (rdev && test_bit(In_sync, &rdev->flags)) + if (rdev && test_bit(In_sync, &rdev->flags) && + r1_bio->bios[new_disk] != IO_BLOCKED) wonly_disk = new_disk; if (new_disk == conf->raid_disks - 1) { @@ -437,11 +439,13 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) /* make sure the disk is operational */ for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev); + r1_bio->bios[new_disk] == IO_BLOCKED || !rdev || !test_bit(In_sync, &rdev->flags) || test_bit(WriteMostly, &rdev->flags); rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) { - if (rdev && test_bit(In_sync, &rdev->flags)) + if (rdev && test_bit(In_sync, &rdev->flags) && + r1_bio->bios[new_disk] != IO_BLOCKED) wonly_disk = new_disk; if (new_disk <= 0) @@ -478,7 +482,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) rdev = rcu_dereference(conf->mirrors[disk].rdev); - if (!rdev || + if (!rdev || r1_bio->bios[disk] == IO_BLOCKED || !test_bit(In_sync, &rdev->flags) || test_bit(WriteMostly, &rdev->flags)) continue; @@ -1335,7 +1339,7 @@ static void raid1d(mddev_t *mddev) sector_t sect = r1_bio->sector; int sectors = r1_bio->sectors; freeze_array(conf); - while(sectors) { + if (mddev->ro == 0) while(sectors) { int s = sectors; int d = r1_bio->read_disk; int success = 0; @@ -1388,7 +1392,6 @@ static void raid1d(mddev_t *mddev) sect += s; } - unfreeze_array(conf); bio = r1_bio->bios[r1_bio->read_disk]; @@ -1399,7 +1402,8 @@ static void raid1d(mddev_t *mddev) (unsigned long long)r1_bio->sector); raid_end_bio_io(r1_bio); } else { - r1_bio->bios[r1_bio->read_disk] = NULL; + r1_bio->bios[r1_bio->read_disk] = + mddev->ro ? IO_BLOCKED : NULL; r1_bio->read_disk = disk; bio_put(bio); bio = bio_clone(r1_bio->master_bio, GFP_NOIO); -- cgit v1.2.3 From 9910f16af35419a5382fa7850eecc220103036fa Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:24 -0800 Subject: [PATCH] md: fix up some rdev rcu locking in raid5/6 There is this "FIXME" comment with a typo in it!! that been annoying me for days, so I just had to remove it. conf->disks[i].rdev should only be accessed if - we know we hold a reference or - the mddev->reconfig_sem is down or - we have a rcu_readlock handle_stripe was referencing rdev in three places without any of these. For the first two, get an rcu_readlock. For the last, the same access (md_sync_acct call) is made a little later after the rdev has been claimed under and rcu_readlock, if R5_Syncio is set. So just use that access... However R5_Syncio isn't really needed as the 'syncing' variable contains the same information. So use that instead. Issues, comment, and fix are identical in raid5 and raid6. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid5.c | 16 ++++++++-------- drivers/md/raid6main.c | 19 ++++++++----------- 2 files changed, 16 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 0d016a844ec6..0222ba1a6d35 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -961,11 +961,11 @@ static void handle_stripe(struct stripe_head *sh) syncing = test_bit(STRIPE_SYNCING, &sh->state); /* Now to look around and see what can be done */ + rcu_read_lock(); for (i=disks; i--; ) { mdk_rdev_t *rdev; dev = &sh->dev[i]; clear_bit(R5_Insync, &dev->flags); - clear_bit(R5_Syncio, &dev->flags); PRINTK("check %d: state 0x%lx read %p write %p written %p\n", i, dev->flags, dev->toread, dev->towrite, dev->written); @@ -1004,7 +1004,7 @@ static void handle_stripe(struct stripe_head *sh) non_overwrite++; } if (dev->written) written++; - rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */ + rdev = rcu_dereference(conf->disks[i].rdev); if (!rdev || !test_bit(In_sync, &rdev->flags)) { /* The ReadError flag will just be confusing now */ clear_bit(R5_ReadError, &dev->flags); @@ -1017,6 +1017,7 @@ static void handle_stripe(struct stripe_head *sh) } else set_bit(R5_Insync, &dev->flags); } + rcu_read_unlock(); PRINTK("locked=%d uptodate=%d to_read=%d" " to_write=%d failed=%d failed_num=%d\n", locked, uptodate, to_read, to_write, failed, failed_num); @@ -1028,10 +1029,13 @@ static void handle_stripe(struct stripe_head *sh) int bitmap_end = 0; if (test_bit(R5_ReadError, &sh->dev[i].flags)) { - mdk_rdev_t *rdev = conf->disks[i].rdev; + mdk_rdev_t *rdev; + rcu_read_lock(); + rdev = rcu_dereference(conf->disks[i].rdev); if (rdev && test_bit(In_sync, &rdev->flags)) /* multiple read failures in one stripe */ md_error(conf->mddev, rdev); + rcu_read_unlock(); } spin_lock_irq(&conf->device_lock); @@ -1180,9 +1184,6 @@ static void handle_stripe(struct stripe_head *sh) locked++; PRINTK("Reading block %d (sync=%d)\n", i, syncing); - if (syncing) - md_sync_acct(conf->disks[i].rdev->bdev, - STRIPE_SECTORS); } } } @@ -1326,7 +1327,6 @@ static void handle_stripe(struct stripe_head *sh) clear_bit(STRIPE_DEGRADED, &sh->state); locked++; set_bit(STRIPE_INSYNC, &sh->state); - set_bit(R5_Syncio, &dev->flags); } } if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { @@ -1392,7 +1392,7 @@ static void handle_stripe(struct stripe_head *sh) rcu_read_unlock(); if (rdev) { - if (test_bit(R5_Syncio, &sh->dev[i].flags)) + if (syncing) md_sync_acct(rdev->bdev, STRIPE_SECTORS); bi->bi_bdev = rdev->bdev; diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index 7a51553d8be5..b5b7a8d0b165 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c @@ -1060,11 +1060,11 @@ static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) syncing = test_bit(STRIPE_SYNCING, &sh->state); /* Now to look around and see what can be done */ + rcu_read_lock(); for (i=disks; i--; ) { mdk_rdev_t *rdev; dev = &sh->dev[i]; clear_bit(R5_Insync, &dev->flags); - clear_bit(R5_Syncio, &dev->flags); PRINTK("check %d: state 0x%lx read %p write %p written %p\n", i, dev->flags, dev->toread, dev->towrite, dev->written); @@ -1103,7 +1103,7 @@ static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) non_overwrite++; } if (dev->written) written++; - rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */ + rdev = rcu_dereference(conf->disks[i].rdev); if (!rdev || !test_bit(In_sync, &rdev->flags)) { /* The ReadError flag will just be confusing now */ clear_bit(R5_ReadError, &dev->flags); @@ -1117,6 +1117,7 @@ static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) } else set_bit(R5_Insync, &dev->flags); } + rcu_read_unlock(); PRINTK("locked=%d uptodate=%d to_read=%d" " to_write=%d failed=%d failed_num=%d,%d\n", locked, uptodate, to_read, to_write, failed, @@ -1129,10 +1130,13 @@ static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) int bitmap_end = 0; if (test_bit(R5_ReadError, &sh->dev[i].flags)) { - mdk_rdev_t *rdev = conf->disks[i].rdev; + mdk_rdev_t *rdev; + rcu_read_lock(); + rdev = rcu_dereference(conf->disks[i].rdev); if (rdev && test_bit(In_sync, &rdev->flags)) /* multiple read failures in one stripe */ md_error(conf->mddev, rdev); + rcu_read_unlock(); } spin_lock_irq(&conf->device_lock); @@ -1307,9 +1311,6 @@ static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) locked++; PRINTK("Reading block %d (sync=%d)\n", i, syncing); - if (syncing) - md_sync_acct(conf->disks[i].rdev->bdev, - STRIPE_SECTORS); } } } @@ -1463,14 +1464,12 @@ static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) locked++; set_bit(R5_LOCKED, &dev->flags); set_bit(R5_Wantwrite, &dev->flags); - set_bit(R5_Syncio, &dev->flags); } if (failed >= 1) { dev = &sh->dev[failed_num[0]]; locked++; set_bit(R5_LOCKED, &dev->flags); set_bit(R5_Wantwrite, &dev->flags); - set_bit(R5_Syncio, &dev->flags); } if (update_p) { @@ -1478,14 +1477,12 @@ static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) locked ++; set_bit(R5_LOCKED, &dev->flags); set_bit(R5_Wantwrite, &dev->flags); - set_bit(R5_Syncio, &dev->flags); } if (update_q) { dev = &sh->dev[qd_idx]; locked++; set_bit(R5_LOCKED, &dev->flags); set_bit(R5_Wantwrite, &dev->flags); - set_bit(R5_Syncio, &dev->flags); } clear_bit(STRIPE_DEGRADED, &sh->state); @@ -1557,7 +1554,7 @@ static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) rcu_read_unlock(); if (rdev) { - if (test_bit(R5_Syncio, &sh->dev[i].flags)) + if (syncing) md_sync_acct(rdev->bdev, STRIPE_SECTORS); bi->bi_bdev = rdev->bdev; -- cgit v1.2.3 From 18f08819f42b647783e4f6ea99141623881bf182 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:25 -0800 Subject: [PATCH] md: support check-without-repair of raid10 arrays Also keep count on the number of errors found. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid10.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 8f58a447d9f0..1fa70c34b7d2 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1206,6 +1206,10 @@ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio) break; if (j == vcnt) continue; + mddev->resync_mismatches += r10_bio->sectors; + if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) + /* Don't fix anything. */ + continue; /* Ok, we need to write this bio * First we need to fixup bv_offset, bv_len and * bi_vecs, as the read request might have corrupted these -- cgit v1.2.3 From d11c171e636cfd2df818cf3411d88222c2f4fcef Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:26 -0800 Subject: [PATCH] md: allow raid1 to check consistency Where performing a user-requested 'check' or 'repair', we read all readable devices, and compare the contents. We only write to blocks which had read errors, or blocks with content that differs from the first good device found. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid1.c | 156 +++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 128 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a8bc93d6ff63..7fbb60883280 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -106,15 +106,30 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) } /* * Allocate RESYNC_PAGES data pages and attach them to - * the first bio; + * the first bio. + * If this is a user-requested check/repair, allocate + * RESYNC_PAGES for each bio. */ - bio = r1_bio->bios[0]; - for (i = 0; i < RESYNC_PAGES; i++) { - page = alloc_page(gfp_flags); - if (unlikely(!page)) - goto out_free_pages; - - bio->bi_io_vec[i].bv_page = page; + if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) + j = pi->raid_disks; + else + j = 1; + while(j--) { + bio = r1_bio->bios[j]; + for (i = 0; i < RESYNC_PAGES; i++) { + page = alloc_page(gfp_flags); + if (unlikely(!page)) + goto out_free_pages; + + bio->bi_io_vec[i].bv_page = page; + } + } + /* If not user-requests, copy the page pointers to all bios */ + if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) { + for (i=0; iraid_disks; j++) + r1_bio->bios[j]->bi_io_vec[i].bv_page = + r1_bio->bios[0]->bi_io_vec[i].bv_page; } r1_bio->master_bio = NULL; @@ -122,8 +137,10 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) return r1_bio; out_free_pages: - for ( ; i > 0 ; i--) - __free_page(bio->bi_io_vec[i-1].bv_page); + for (i=0; i < RESYNC_PAGES ; i++) + for (j=0 ; j < pi->raid_disks; j++) + __free_page(r1_bio->bios[j]->bi_io_vec[i].bv_page); + j = -1; out_free_bio: while ( ++j < pi->raid_disks ) bio_put(r1_bio->bios[j]); @@ -134,14 +151,16 @@ out_free_bio: static void r1buf_pool_free(void *__r1_bio, void *data) { struct pool_info *pi = data; - int i; + int i,j; r1bio_t *r1bio = __r1_bio; - struct bio *bio = r1bio->bios[0]; - for (i = 0; i < RESYNC_PAGES; i++) { - __free_page(bio->bi_io_vec[i].bv_page); - bio->bi_io_vec[i].bv_page = NULL; - } + for (i = 0; i < RESYNC_PAGES; i++) + for (j = pi->raid_disks; j-- ;) { + if (j == 0 || + r1bio->bios[j]->bi_io_vec[i].bv_page != + r1bio->bios[0]->bi_io_vec[i].bv_page) + __free_page(r1bio->bios[j]->bi_io_vec[i].bv_page); + } for (i=0 ; i < pi->raid_disks; i++) bio_put(r1bio->bios[i]); @@ -1077,13 +1096,16 @@ abort: static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) { r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); + int i; if (bio->bi_size) return 1; - if (r1_bio->bios[r1_bio->read_disk] != bio) - BUG(); - update_head_pos(r1_bio->read_disk, r1_bio); + for (i=r1_bio->mddev->raid_disks; i--; ) + if (r1_bio->bios[i] == bio) + break; + BUG_ON(i < 0); + update_head_pos(i, r1_bio); /* * we have read a block, now it needs to be re-written, * or re-read if the read failed. @@ -1091,7 +1113,9 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) */ if (test_bit(BIO_UPTODATE, &bio->bi_flags)) set_bit(R1BIO_Uptodate, &r1_bio->state); - reschedule_retry(r1_bio); + + if (atomic_dec_and_test(&r1_bio->remaining)) + reschedule_retry(r1_bio); return 0; } @@ -1134,9 +1158,65 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) bio = r1_bio->bios[r1_bio->read_disk]; - /* - * schedule writes - */ + if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { + /* We have read all readable devices. If we haven't + * got the block, then there is no hope left. + * If we have, then we want to do a comparison + * and skip the write if everything is the same. + * If any blocks failed to read, then we need to + * attempt an over-write + */ + int primary; + if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) { + for (i=0; iraid_disks; i++) + if (r1_bio->bios[i]->bi_end_io == end_sync_read) + md_error(mddev, conf->mirrors[i].rdev); + + md_done_sync(mddev, r1_bio->sectors, 1); + put_buf(r1_bio); + return; + } + for (primary=0; primaryraid_disks; primary++) + if (r1_bio->bios[primary]->bi_end_io == end_sync_read && + test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) { + r1_bio->bios[primary]->bi_end_io = NULL; + break; + } + r1_bio->read_disk = primary; + for (i=0; iraid_disks; i++) + if (r1_bio->bios[i]->bi_end_io == end_sync_read && + test_bit(BIO_UPTODATE, &r1_bio->bios[i]->bi_flags)) { + int j; + int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9); + struct bio *pbio = r1_bio->bios[primary]; + struct bio *sbio = r1_bio->bios[i]; + for (j = vcnt; j-- ; ) + if (memcmp(page_address(pbio->bi_io_vec[j].bv_page), + page_address(sbio->bi_io_vec[j].bv_page), + PAGE_SIZE)) + break; + if (j >= 0) + mddev->resync_mismatches += r1_bio->sectors; + if (j < 0 || test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) + sbio->bi_end_io = NULL; + else { + /* fixup the bio for reuse */ + sbio->bi_vcnt = vcnt; + sbio->bi_size = r1_bio->sectors << 9; + sbio->bi_idx = 0; + sbio->bi_phys_segments = 0; + sbio->bi_hw_segments = 0; + sbio->bi_hw_front_size = 0; + sbio->bi_hw_back_size = 0; + sbio->bi_flags &= ~(BIO_POOL_MASK - 1); + sbio->bi_flags |= 1 << BIO_UPTODATE; + sbio->bi_next = NULL; + sbio->bi_sector = r1_bio->sector + + conf->mirrors[i].rdev->data_offset; + sbio->bi_bdev = conf->mirrors[i].rdev->bdev; + } + } + } if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) { /* ouch - failed to read all of that. * Try some synchronous reads of other devices to get @@ -1216,6 +1296,10 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) idx ++; } } + + /* + * schedule writes + */ atomic_set(&r1_bio->remaining, 1); for (i = 0; i < disks ; i++) { wbio = r1_bio->bios[i]; @@ -1618,10 +1702,10 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i for (i=0 ; i < conf->raid_disks; i++) { bio = r1_bio->bios[i]; if (bio->bi_end_io) { - page = r1_bio->bios[0]->bi_io_vec[bio->bi_vcnt].bv_page; + page = bio->bi_io_vec[bio->bi_vcnt].bv_page; if (bio_add_page(bio, page, len, 0) == 0) { /* stop here */ - r1_bio->bios[0]->bi_io_vec[bio->bi_vcnt].bv_page = page; + bio->bi_io_vec[bio->bi_vcnt].bv_page = page; while (i > 0) { i--; bio = r1_bio->bios[i]; @@ -1641,12 +1725,28 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i sync_blocks -= (len>>9); } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES); bio_full: - bio = r1_bio->bios[r1_bio->read_disk]; r1_bio->sectors = nr_sectors; - md_sync_acct(conf->mirrors[r1_bio->read_disk].rdev->bdev, nr_sectors); + /* For a user-requested sync, we read all readable devices and do a + * compare + */ + if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { + atomic_set(&r1_bio->remaining, read_targets); + for (i=0; iraid_disks; i++) { + bio = r1_bio->bios[i]; + if (bio->bi_end_io == end_sync_read) { + md_sync_acct(conf->mirrors[i].rdev->bdev, nr_sectors); + generic_make_request(bio); + } + } + } else { + atomic_set(&r1_bio->remaining, 1); + bio = r1_bio->bios[r1_bio->read_disk]; + md_sync_acct(conf->mirrors[r1_bio->read_disk].rdev->bdev, + nr_sectors); + generic_make_request(bio); - generic_make_request(bio); + } return nr_sectors; } -- cgit v1.2.3 From 220946c9018de74b952446e3a4dff1bfd4cbf310 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:27 -0800 Subject: [PATCH] md: make sure read error on last working drive of raid1 actually returns failure We are inadvertently setting the R1BIO_Uptodate bit on read errors when we decide not to try correcting (because there are no other working devices). This means that the read error is reported to the client as success. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid1.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 7fbb60883280..6c10f28bc25e 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -284,7 +284,8 @@ static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int * user-side. So if something waits for IO, then it will * wait for the 'master' bio. */ - set_bit(R1BIO_Uptodate, &r1_bio->state); + if (uptodate) + set_bit(R1BIO_Uptodate, &r1_bio->state); raid_end_bio_io(r1_bio); } else { -- cgit v1.2.3 From 4443ae10ca15d07922ceda622f03db8865fa3d13 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:28 -0800 Subject: [PATCH] md: auto-correct correctable read errors in raid10 Largely just a cross-port from raid1. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid10.c | 127 +++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 112 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 1fa70c34b7d2..64bb4ddc6798 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -209,6 +209,7 @@ static void reschedule_retry(r10bio_t *r10_bio) spin_lock_irqsave(&conf->device_lock, flags); list_add(&r10_bio->retry_list, &conf->retry_list); + conf->nr_queued ++; spin_unlock_irqrestore(&conf->device_lock, flags); md_wakeup_thread(mddev->thread); @@ -254,9 +255,9 @@ static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int /* * this branch is our 'one mirror IO has finished' event handler: */ - if (!uptodate) - md_error(r10_bio->mddev, conf->mirrors[dev].rdev); - else + update_head_pos(slot, r10_bio); + + if (uptodate) { /* * Set R10BIO_Uptodate in our master bio, so that * we will return a good error code to the higher @@ -267,15 +268,8 @@ static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int * wait for the 'master' bio. */ set_bit(R10BIO_Uptodate, &r10_bio->state); - - update_head_pos(slot, r10_bio); - - /* - * we have only one bio on the read side - */ - if (uptodate) raid_end_bio_io(r10_bio); - else { + } else { /* * oops, read error: */ @@ -714,6 +708,33 @@ static void allow_barrier(conf_t *conf) wake_up(&conf->wait_barrier); } +static void freeze_array(conf_t *conf) +{ + /* stop syncio and normal IO and wait for everything to + * go quite. + * We increment barrier and nr_waiting, and then + * wait until barrier+nr_pending match nr_queued+2 + */ + spin_lock_irq(&conf->resync_lock); + conf->barrier++; + conf->nr_waiting++; + wait_event_lock_irq(conf->wait_barrier, + conf->barrier+conf->nr_pending == conf->nr_queued+2, + conf->resync_lock, + raid10_unplug(conf->mddev->queue)); + spin_unlock_irq(&conf->resync_lock); +} + +static void unfreeze_array(conf_t *conf) +{ + /* reverse the effect of the freeze */ + spin_lock_irq(&conf->resync_lock); + conf->barrier--; + conf->nr_waiting--; + wake_up(&conf->wait_barrier); + spin_unlock_irq(&conf->resync_lock); +} + static int make_request(request_queue_t *q, struct bio * bio) { mddev_t *mddev = q->queuedata; @@ -1338,6 +1359,7 @@ static void raid10d(mddev_t *mddev) break; r10_bio = list_entry(head->prev, r10bio_t, retry_list); list_del(head->prev); + conf->nr_queued--; spin_unlock_irqrestore(&conf->device_lock, flags); mddev = r10_bio->mddev; @@ -1350,6 +1372,78 @@ static void raid10d(mddev_t *mddev) unplug = 1; } else { int mirror; + /* we got a read error. Maybe the drive is bad. Maybe just + * the block and we can fix it. + * We freeze all other IO, and try reading the block from + * other devices. When we find one, we re-write + * and check it that fixes the read error. + * This is all done synchronously while the array is + * frozen. + */ + int sect = 0; /* Offset from r10_bio->sector */ + int sectors = r10_bio->sectors; + freeze_array(conf); + if (mddev->ro == 0) while(sectors) { + int s = sectors; + int sl = r10_bio->read_slot; + int success = 0; + + if (s > (PAGE_SIZE>>9)) + s = PAGE_SIZE >> 9; + + do { + int d = r10_bio->devs[sl].devnum; + rdev = conf->mirrors[d].rdev; + if (rdev && + test_bit(In_sync, &rdev->flags) && + sync_page_io(rdev->bdev, + r10_bio->devs[sl].addr + + sect + rdev->data_offset, + s<<9, + conf->tmppage, READ)) + success = 1; + else { + sl++; + if (sl == conf->copies) + sl = 0; + } + } while (!success && sl != r10_bio->read_slot); + + if (success) { + /* write it back and re-read */ + while (sl != r10_bio->read_slot) { + int d; + if (sl==0) + sl = conf->copies; + sl--; + d = r10_bio->devs[sl].devnum; + rdev = conf->mirrors[d].rdev; + if (rdev && + test_bit(In_sync, &rdev->flags)) { + if (sync_page_io(rdev->bdev, + r10_bio->devs[sl].addr + + sect + rdev->data_offset, + s<<9, conf->tmppage, WRITE) == 0 || + sync_page_io(rdev->bdev, + r10_bio->devs[sl].addr + + sect + rdev->data_offset, + s<<9, conf->tmppage, READ) == 0) { + /* Well, this device is dead */ + md_error(mddev, rdev); + } + } + } + } else { + /* Cannot read from anywhere -- bye bye array */ + md_error(mddev, conf->mirrors[r10_bio->devs[r10_bio->read_slot].devnum].rdev); + break; + } + sectors -= s; + sect += s; + } + + unfreeze_array(conf); + bio = r10_bio->devs[r10_bio->read_slot].bio; r10_bio->devs[r10_bio->read_slot].bio = NULL; bio_put(bio); @@ -1793,22 +1887,24 @@ static int run(mddev_t *mddev) * bookkeeping area. [whatever we allocate in run(), * should be freed in stop()] */ - conf = kmalloc(sizeof(conf_t), GFP_KERNEL); + conf = kzalloc(sizeof(conf_t), GFP_KERNEL); mddev->private = conf; if (!conf) { printk(KERN_ERR "raid10: couldn't allocate memory for %s\n", mdname(mddev)); goto out; } - memset(conf, 0, sizeof(*conf)); - conf->mirrors = kmalloc(sizeof(struct mirror_info)*mddev->raid_disks, + conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks, GFP_KERNEL); if (!conf->mirrors) { printk(KERN_ERR "raid10: couldn't allocate memory for %s\n", mdname(mddev)); goto out_free_conf; } - memset(conf->mirrors, 0, sizeof(struct mirror_info)*mddev->raid_disks); + + conf->tmppage = alloc_page(GFP_KERNEL); + if (!conf->tmppage) + goto out_free_conf; conf->near_copies = nc; conf->far_copies = fc; @@ -1918,6 +2014,7 @@ static int run(mddev_t *mddev) out_free_conf: if (conf->r10bio_pool) mempool_destroy(conf->r10bio_pool); + put_page(conf->tmppage); kfree(conf->mirrors); kfree(conf); mddev->private = NULL; -- cgit v1.2.3 From 0eb3ff12aa8a12538ef681dc83f4361636a0699f Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:29 -0800 Subject: [PATCH] md: raid10 read-error handling - resync and read-only Add in correct read-error handling for resync and read-only situations. When read-only, we don't over-write, so we need to mark the failed drive in the r10_bio so we don't re-try it. During resync, we always read all blocks, so if there is a read error, we simply over-write it with the good block that we found (assuming we found one). Note that the recovery case still isn't handled in an interesting way. There is nothing useful to do for the 2-copies case. If there are 3 or more copies, then we could try reading from one of the non-missing copies, but this is a bit complicated and very rarely would be used, so I'm leaving it for now. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid10.c | 56 +++++++++++++++++++++++++++++++++-------------------- 1 file changed, 35 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 64bb4ddc6798..3f8df2ecbae3 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -172,7 +172,7 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio) for (i = 0; i < conf->copies; i++) { struct bio **bio = & r10_bio->devs[i].bio; - if (*bio) + if (*bio && *bio != IO_BLOCKED) bio_put(*bio); *bio = NULL; } @@ -500,6 +500,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) disk = r10_bio->devs[slot].devnum; while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL || + r10_bio->devs[slot].bio == IO_BLOCKED || !test_bit(In_sync, &rdev->flags)) { slot++; if (slot == conf->copies) { @@ -517,6 +518,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) slot = 0; disk = r10_bio->devs[slot].devnum; while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL || + r10_bio->devs[slot].bio == IO_BLOCKED || !test_bit(In_sync, &rdev->flags)) { slot ++; if (slot == conf->copies) { @@ -537,6 +539,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL || + r10_bio->devs[nslot].bio == IO_BLOCKED || !test_bit(In_sync, &rdev->flags)) continue; @@ -1104,7 +1107,6 @@ abort: static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) { - int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); conf_t *conf = mddev_to_conf(r10_bio->mddev); int i,d; @@ -1119,7 +1121,10 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) BUG(); update_head_pos(i, r10_bio); d = r10_bio->devs[i].devnum; - if (!uptodate) + + if (test_bit(BIO_UPTODATE, &bio->bi_flags)) + set_bit(R10BIO_Uptodate, &r10_bio->state); + else if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) md_error(r10_bio->mddev, conf->mirrors[d].rdev); @@ -1209,25 +1214,30 @@ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio) fbio = r10_bio->devs[i].bio; /* now find blocks with errors */ - for (i=first+1 ; i < conf->copies ; i++) { - int vcnt, j, d; + for (i=0 ; i < conf->copies ; i++) { + int j, d; + int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9); - if (!test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) - continue; - /* We know that the bi_io_vec layout is the same for - * both 'first' and 'i', so we just compare them. - * All vec entries are PAGE_SIZE; - */ tbio = r10_bio->devs[i].bio; - vcnt = r10_bio->sectors >> (PAGE_SHIFT-9); - for (j = 0; j < vcnt; j++) - if (memcmp(page_address(fbio->bi_io_vec[j].bv_page), - page_address(tbio->bi_io_vec[j].bv_page), - PAGE_SIZE)) - break; - if (j == vcnt) + + if (tbio->bi_end_io != end_sync_read) + continue; + if (i == first) continue; - mddev->resync_mismatches += r10_bio->sectors; + if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) { + /* We know that the bi_io_vec layout is the same for + * both 'first' and 'i', so we just compare them. + * All vec entries are PAGE_SIZE; + */ + for (j = 0; j < vcnt; j++) + if (memcmp(page_address(fbio->bi_io_vec[j].bv_page), + page_address(tbio->bi_io_vec[j].bv_page), + PAGE_SIZE)) + break; + if (j == vcnt) + continue; + mddev->resync_mismatches += r10_bio->sectors; + } if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) /* Don't fix anything. */ continue; @@ -1308,7 +1318,10 @@ static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio) atomic_inc(&conf->mirrors[d].rdev->nr_pending); md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9); - generic_make_request(wbio); + if (test_bit(R10BIO_Uptodate, &r10_bio->state)) + generic_make_request(wbio); + else + bio_endio(wbio, wbio->bi_size, -EIO); } @@ -1445,7 +1458,8 @@ static void raid10d(mddev_t *mddev) unfreeze_array(conf); bio = r10_bio->devs[r10_bio->read_slot].bio; - r10_bio->devs[r10_bio->read_slot].bio = NULL; + r10_bio->devs[r10_bio->read_slot].bio = + mddev->ro ? IO_BLOCKED : NULL; bio_put(bio); mirror = read_balance(conf, r10_bio); if (mirror == -1) { -- cgit v1.2.3 From d7603b7e3a7f802c67f9190b2387d4d5d111ec14 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:30 -0800 Subject: [PATCH] md: make /proc/mdstat pollable With this patch it is possible to poll /proc/mdstat to detect arrays appearing or disappearing, to detect failures, recovery starting, recovery completing, and devices being added and removed. It is similar to the poll-ability of /proc/mounts, though different in that: We always report that the file is readable (because face it, it is, even if only for EOF). We report POLLPRI when there is a change so that select() can detect it as an exceptional event. Not only are these exceptional events, but that is the mechanism that the current 'mdadm' uses to watch for events (It also polls after a timeout). (We also report POLLERR like /proc/mounts). Finally, we only reset the per-file event counter when the start of the file is read, rather than when poll() returns an event. This is more robust as it means that an fd will continue to report activity to poll/select until the program clearly responds to that activity. md_new_event takes an 'mddev' which isn't currently used, but it will be soon. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 81 +++++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 76 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 1364a1c97e6f..6101879a730f 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -42,6 +42,7 @@ #include #include /* for invalidate_bdev */ #include +#include #include @@ -133,6 +134,24 @@ static struct block_device_operations md_fops; static int start_readonly; +/* + * We have a system wide 'event count' that is incremented + * on any 'interesting' event, and readers of /proc/mdstat + * can use 'poll' or 'select' to find out when the event + * count increases. + * + * Events are: + * start array, stop array, error, add device, remove device, + * start build, activate spare + */ +DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); +static atomic_t md_event_count; +void md_new_event(mddev_t *mddev) +{ + atomic_inc(&md_event_count); + wake_up(&md_event_waiters); +} + /* * Enables to iterate over all existing md arrays * all_mddevs_lock protects this list. @@ -2111,6 +2130,7 @@ static int do_md_run(mddev_t * mddev) mddev->queue->make_request_fn = mddev->pers->make_request; mddev->changed = 1; + md_new_event(mddev); return 0; } @@ -2238,6 +2258,7 @@ static int do_md_stop(mddev_t * mddev, int ro) printk(KERN_INFO "md: %s switched to read-only mode.\n", mdname(mddev)); err = 0; + md_new_event(mddev); out: return err; } @@ -2712,6 +2733,7 @@ static int hot_remove_disk(mddev_t * mddev, dev_t dev) kick_rdev_from_array(rdev); md_update_sb(mddev); + md_new_event(mddev); return 0; busy: @@ -2802,7 +2824,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev) */ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); - + md_new_event(mddev); return 0; abort_unbind_export: @@ -3531,6 +3553,7 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev) set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); + md_new_event(mddev); } /* seq_file implementation /proc/mdstat */ @@ -3671,12 +3694,17 @@ static void md_seq_stop(struct seq_file *seq, void *v) mddev_put(mddev); } +struct mdstat_info { + int event; +}; + static int md_seq_show(struct seq_file *seq, void *v) { mddev_t *mddev = v; sector_t size; struct list_head *tmp2; mdk_rdev_t *rdev; + struct mdstat_info *mi = seq->private; int i; struct bitmap *bitmap; @@ -3689,6 +3717,7 @@ static int md_seq_show(struct seq_file *seq, void *v) spin_unlock(&pers_lock); seq_printf(seq, "\n"); + mi->event = atomic_read(&md_event_count); return 0; } if (v == (void*)2) { @@ -3797,16 +3826,52 @@ static struct seq_operations md_seq_ops = { static int md_seq_open(struct inode *inode, struct file *file) { int error; + struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL); + if (mi == NULL) + return -ENOMEM; error = seq_open(file, &md_seq_ops); + if (error) + kfree(mi); + else { + struct seq_file *p = file->private_data; + p->private = mi; + mi->event = atomic_read(&md_event_count); + } return error; } +static int md_seq_release(struct inode *inode, struct file *file) +{ + struct seq_file *m = file->private_data; + struct mdstat_info *mi = m->private; + m->private = NULL; + kfree(mi); + return seq_release(inode, file); +} + +static unsigned int mdstat_poll(struct file *filp, poll_table *wait) +{ + struct seq_file *m = filp->private_data; + struct mdstat_info *mi = m->private; + int mask; + + poll_wait(filp, &md_event_waiters, wait); + + /* always allow read */ + mask = POLLIN | POLLRDNORM; + + if (mi->event != atomic_read(&md_event_count)) + mask |= POLLERR | POLLPRI; + return mask; +} + static struct file_operations md_seq_fops = { .open = md_seq_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = md_seq_release, + .poll = mdstat_poll, }; int register_md_personality(int pnum, mdk_personality_t *p) @@ -4076,7 +4141,11 @@ static void md_do_sync(mddev_t *mddev) j += sectors; if (j>1) mddev->curr_resync = j; - + if (last_check == 0) + /* this is the earliers that rebuilt will be + * visible in /proc/mdstat + */ + md_new_event(mddev); if (last_check + window > io_sectors || j == max_sectors) continue; @@ -4262,6 +4331,7 @@ void md_check_recovery(mddev_t *mddev) mddev->recovery = 0; /* flag recovery needed just to double check */ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + md_new_event(mddev); goto unlock; } /* Clear some bits that don't mean anything, but @@ -4299,6 +4369,7 @@ void md_check_recovery(mddev_t *mddev) sprintf(nm, "rd%d", rdev->raid_disk); sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); spares++; + md_new_event(mddev); } else break; } @@ -4331,9 +4402,9 @@ void md_check_recovery(mddev_t *mddev) mdname(mddev)); /* leave the spares where they are, it shouldn't hurt */ mddev->recovery = 0; - } else { + } else md_wakeup_thread(mddev->sync_thread); - } + md_new_event(mddev); } unlock: mddev_unlock(mddev); -- cgit v1.2.3 From 2d1f3b5d1b2cd11a162eb29645df749ec0036413 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:31 -0800 Subject: [PATCH] md: clean up 'page' related names in md Substitute: page_cache_get -> get_page page_cache_release -> put_page PAGE_CACHE_SHIFT -> PAGE_SHIFT PAGE_CACHE_SIZE -> PAGE_SIZE PAGE_CACHE_MASK -> PAGE_MASK __free_page -> put_page because we aren't using the page cache, we are just using pages. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/bitmap.c | 44 ++++++++++++++++++++++---------------------- drivers/md/md.c | 2 +- drivers/md/raid0.c | 2 +- drivers/md/raid1.c | 10 +++++----- drivers/md/raid10.c | 8 ++++---- drivers/md/raid5.c | 4 ++-- drivers/md/raid6main.c | 6 +++--- 7 files changed, 38 insertions(+), 38 deletions(-) (limited to 'drivers') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index b65c36d9e240..fc05d1205aa0 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -341,7 +341,7 @@ static int write_page(struct bitmap *bitmap, struct page *page, int wait) /* add to list to be waited for by daemon */ struct page_list *item = mempool_alloc(bitmap->write_pool, GFP_NOIO); item->page = page; - page_cache_get(page); + get_page(page); spin_lock(&bitmap->write_lock); list_add(&item->list, &bitmap->complete_pages); spin_unlock(&bitmap->write_lock); @@ -357,10 +357,10 @@ static struct page *read_page(struct file *file, unsigned long index, struct inode *inode = file->f_mapping->host; struct page *page = NULL; loff_t isize = i_size_read(inode); - unsigned long end_index = isize >> PAGE_CACHE_SHIFT; + unsigned long end_index = isize >> PAGE_SHIFT; - PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_CACHE_SIZE, - (unsigned long long)index << PAGE_CACHE_SHIFT); + PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_SIZE, + (unsigned long long)index << PAGE_SHIFT); page = read_cache_page(inode->i_mapping, index, (filler_t *)inode->i_mapping->a_ops->readpage, file); @@ -368,7 +368,7 @@ static struct page *read_page(struct file *file, unsigned long index, goto out; wait_on_page_locked(page); if (!PageUptodate(page) || PageError(page)) { - page_cache_release(page); + put_page(page); page = ERR_PTR(-EIO); goto out; } @@ -376,14 +376,14 @@ static struct page *read_page(struct file *file, unsigned long index, if (index > end_index) /* we have read beyond EOF */ *bytes_read = 0; else if (index == end_index) /* possible short read */ - *bytes_read = isize & ~PAGE_CACHE_MASK; + *bytes_read = isize & ~PAGE_MASK; else - *bytes_read = PAGE_CACHE_SIZE; /* got a full page */ + *bytes_read = PAGE_SIZE; /* got a full page */ out: if (IS_ERR(page)) printk(KERN_ALERT "md: bitmap read error: (%dB @ %Lu): %ld\n", - (int)PAGE_CACHE_SIZE, - (unsigned long long)index << PAGE_CACHE_SHIFT, + (int)PAGE_SIZE, + (unsigned long long)index << PAGE_SHIFT, PTR_ERR(page)); return page; } @@ -558,7 +558,7 @@ static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits, spin_unlock_irqrestore(&bitmap->lock, flags); return; } - page_cache_get(bitmap->sb_page); + get_page(bitmap->sb_page); spin_unlock_irqrestore(&bitmap->lock, flags); sb = (bitmap_super_t *)kmap(bitmap->sb_page); switch (op) { @@ -569,7 +569,7 @@ static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits, default: BUG(); } kunmap(bitmap->sb_page); - page_cache_release(bitmap->sb_page); + put_page(bitmap->sb_page); } /* @@ -622,12 +622,12 @@ static void bitmap_file_unmap(struct bitmap *bitmap) while (pages--) if (map[pages]->index != 0) /* 0 is sb_page, release it below */ - page_cache_release(map[pages]); + put_page(map[pages]); kfree(map); kfree(attr); if (sb_page) - page_cache_release(sb_page); + put_page(sb_page); } static void bitmap_stop_daemon(struct bitmap *bitmap); @@ -654,7 +654,7 @@ static void drain_write_queues(struct bitmap *bitmap) while ((item = dequeue_page(bitmap))) { /* don't bother to wait */ - page_cache_release(item->page); + put_page(item->page); mempool_free(item, bitmap->write_pool); } @@ -763,7 +763,7 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) /* make sure the page stays cached until it gets written out */ if (! (get_page_attr(bitmap, page) & BITMAP_PAGE_DIRTY)) - page_cache_get(page); + get_page(page); /* set the bit */ kaddr = kmap_atomic(page, KM_USER0); @@ -938,7 +938,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) if (ret) { kunmap(page); /* release, page not in filemap yet */ - page_cache_release(page); + put_page(page); goto out; } } @@ -1043,7 +1043,7 @@ int bitmap_daemon_work(struct bitmap *bitmap) /* skip this page unless it's marked as needing cleaning */ if (!((attr=get_page_attr(bitmap, page)) & BITMAP_PAGE_CLEAN)) { if (attr & BITMAP_PAGE_NEEDWRITE) { - page_cache_get(page); + get_page(page); clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); } spin_unlock_irqrestore(&bitmap->lock, flags); @@ -1057,13 +1057,13 @@ int bitmap_daemon_work(struct bitmap *bitmap) default: bitmap_file_kick(bitmap); } - page_cache_release(page); + put_page(page); } continue; } /* grab the new page, sync and release the old */ - page_cache_get(page); + get_page(page); if (lastpage != NULL) { if (get_page_attr(bitmap, lastpage) & BITMAP_PAGE_NEEDWRITE) { clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); @@ -1078,7 +1078,7 @@ int bitmap_daemon_work(struct bitmap *bitmap) spin_unlock_irqrestore(&bitmap->lock, flags); } kunmap(lastpage); - page_cache_release(lastpage); + put_page(lastpage); if (err) bitmap_file_kick(bitmap); } else @@ -1133,7 +1133,7 @@ int bitmap_daemon_work(struct bitmap *bitmap) spin_unlock_irqrestore(&bitmap->lock, flags); } - page_cache_release(lastpage); + put_page(lastpage); } return err; @@ -1184,7 +1184,7 @@ static void bitmap_writeback_daemon(mddev_t *mddev) PRINTK("finished page writeback: %p\n", page); err = PageError(page); - page_cache_release(page); + put_page(page); if (err) { printk(KERN_WARNING "%s: bitmap file writeback " "failed (page %lu): %d\n", diff --git a/drivers/md/md.c b/drivers/md/md.c index 6101879a730f..c3ac67cffe62 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -339,7 +339,7 @@ static int alloc_disk_sb(mdk_rdev_t * rdev) static void free_disk_sb(mdk_rdev_t * rdev) { if (rdev->sb_page) { - page_cache_release(rdev->sb_page); + put_page(rdev->sb_page); rdev->sb_loaded = 0; rdev->sb_page = NULL; rdev->sb_offset = 0; diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index fece3277c2a5..a2c2e184c0ac 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -361,7 +361,7 @@ static int raid0_run (mddev_t *mddev) * chunksize should be used in that case. */ { - int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_CACHE_SIZE; + int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE; if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) mddev->queue->backing_dev_info.ra_pages = 2* stripe; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 6c10f28bc25e..bbe0b817572b 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -139,7 +139,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) out_free_pages: for (i=0; i < RESYNC_PAGES ; i++) for (j=0 ; j < pi->raid_disks; j++) - __free_page(r1_bio->bios[j]->bi_io_vec[i].bv_page); + put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page); j = -1; out_free_bio: while ( ++j < pi->raid_disks ) @@ -159,7 +159,7 @@ static void r1buf_pool_free(void *__r1_bio, void *data) if (j == 0 || r1bio->bios[j]->bi_io_vec[i].bv_page != r1bio->bios[0]->bi_io_vec[i].bv_page) - __free_page(r1bio->bios[j]->bi_io_vec[i].bv_page); + put_page(r1bio->bios[j]->bi_io_vec[i].bv_page); } for (i=0 ; i < pi->raid_disks; i++) bio_put(r1bio->bios[i]); @@ -384,7 +384,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int /* free extra copy of the data pages */ int i = bio->bi_vcnt; while (i--) - __free_page(bio->bi_io_vec[i].bv_page); + put_page(bio->bi_io_vec[i].bv_page); } /* clear the bitmap if all writes complete successfully */ bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, @@ -733,7 +733,7 @@ static struct page **alloc_behind_pages(struct bio *bio) do_sync_io: if (pages) for (i = 0; i < bio->bi_vcnt && pages[i]; i++) - __free_page(pages[i]); + put_page(pages[i]); kfree(pages); PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); return NULL; @@ -1893,7 +1893,7 @@ out_free_conf: if (conf->r1bio_pool) mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); - __free_page(conf->tmppage); + put_page(conf->tmppage); kfree(conf->poolinfo); kfree(conf); mddev->private = NULL; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 3f8df2ecbae3..ce729d6daf78 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -134,10 +134,10 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) out_free_pages: for ( ; i > 0 ; i--) - __free_page(bio->bi_io_vec[i-1].bv_page); + put_page(bio->bi_io_vec[i-1].bv_page); while (j--) for (i = 0; i < RESYNC_PAGES ; i++) - __free_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); + put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); j = -1; out_free_bio: while ( ++j < nalloc ) @@ -157,7 +157,7 @@ static void r10buf_pool_free(void *__r10_bio, void *data) struct bio *bio = r10bio->devs[j].bio; if (bio) { for (i = 0; i < RESYNC_PAGES; i++) { - __free_page(bio->bi_io_vec[i].bv_page); + put_page(bio->bi_io_vec[i].bv_page); bio->bi_io_vec[i].bv_page = NULL; } bio_put(bio); @@ -2015,7 +2015,7 @@ static int run(mddev_t *mddev) * maybe... */ { - int stripe = conf->raid_disks * mddev->chunk_size / PAGE_CACHE_SIZE; + int stripe = conf->raid_disks * mddev->chunk_size / PAGE_SIZE; stripe /= conf->near_copies; if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) mddev->queue->backing_dev_info.ra_pages = 2* stripe; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 0222ba1a6d35..ec5186fd510a 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -167,7 +167,7 @@ static void shrink_buffers(struct stripe_head *sh, int num) if (!p) continue; sh->dev[i].page = NULL; - page_cache_release(p); + put_page(p); } } @@ -1956,7 +1956,7 @@ static int run(mddev_t *mddev) */ { int stripe = (mddev->raid_disks-1) * mddev->chunk_size - / PAGE_CACHE_SIZE; + / PAGE_SIZE; if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) mddev->queue->backing_dev_info.ra_pages = 2 * stripe; } diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index b5b7a8d0b165..4062fc16ac2b 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c @@ -186,7 +186,7 @@ static void shrink_buffers(struct stripe_head *sh, int num) if (!p) continue; sh->dev[i].page = NULL; - page_cache_release(p); + put_page(p); } } @@ -2069,7 +2069,7 @@ static int run(mddev_t *mddev) */ { int stripe = (mddev->raid_disks-2) * mddev->chunk_size - / PAGE_CACHE_SIZE; + / PAGE_SIZE; if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) mddev->queue->backing_dev_info.ra_pages = 2 * stripe; } @@ -2084,7 +2084,7 @@ abort: if (conf) { print_raid6_conf(conf); if (conf->spare_page) - page_cache_release(conf->spare_page); + put_page(conf->spare_page); if (conf->stripe_hashtbl) free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER); -- cgit v1.2.3 From 9ffae0cf3ea02f75d163922accfd3e592d87adde Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:32 -0800 Subject: [PATCH] md: convert md to use kzalloc throughout Replace multiple kmalloc/memset pairs with kzalloc calls. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/bitmap.c | 11 +++-------- drivers/md/linear.c | 3 +-- drivers/md/md.c | 10 +++------- drivers/md/multipath.c | 10 +++------- drivers/md/raid0.c | 9 ++------- drivers/md/raid1.c | 20 ++++++-------------- drivers/md/raid10.c | 6 ++---- drivers/md/raid5.c | 8 ++++---- 8 files changed, 24 insertions(+), 53 deletions(-) (limited to 'drivers') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index fc05d1205aa0..c3faa6a43de1 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -887,12 +887,10 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) if (!bitmap->filemap) goto out; - bitmap->filemap_attr = kmalloc(sizeof(long) * num_pages, GFP_KERNEL); + bitmap->filemap_attr = kzalloc(sizeof(long) * num_pages, GFP_KERNEL); if (!bitmap->filemap_attr) goto out; - memset(bitmap->filemap_attr, 0, sizeof(long) * num_pages); - oldindex = ~0L; for (i = 0; i < chunks; i++) { @@ -1557,12 +1555,10 @@ int bitmap_create(mddev_t *mddev) BUG_ON(file && mddev->bitmap_offset); - bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL); + bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL); if (!bitmap) return -ENOMEM; - memset(bitmap, 0, sizeof(*bitmap)); - spin_lock_init(&bitmap->lock); bitmap->mddev = mddev; @@ -1603,12 +1599,11 @@ int bitmap_create(mddev_t *mddev) #ifdef INJECT_FATAL_FAULT_1 bitmap->bp = NULL; #else - bitmap->bp = kmalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL); + bitmap->bp = kzalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL); #endif err = -ENOMEM; if (!bitmap->bp) goto error; - memset(bitmap->bp, 0, pages * sizeof(*bitmap->bp)); bitmap->flags |= BITMAP_ACTIVE; diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 946efef3a8f5..f46c98d05b44 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -121,11 +121,10 @@ static int linear_run (mddev_t *mddev) sector_t curr_offset; struct list_head *tmp; - conf = kmalloc (sizeof (*conf) + mddev->raid_disks*sizeof(dev_info_t), + conf = kzalloc (sizeof (*conf) + mddev->raid_disks*sizeof(dev_info_t), GFP_KERNEL); if (!conf) goto out; - memset(conf, 0, sizeof(*conf) + mddev->raid_disks*sizeof(dev_info_t)); mddev->private = conf; cnt = 0; diff --git a/drivers/md/md.c b/drivers/md/md.c index c3ac67cffe62..8c378b62a676 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -228,12 +228,10 @@ static mddev_t * mddev_find(dev_t unit) } spin_unlock(&all_mddevs_lock); - new = (mddev_t *) kmalloc(sizeof(*new), GFP_KERNEL); + new = kzalloc(sizeof(*new), GFP_KERNEL); if (!new) return NULL; - memset(new, 0, sizeof(*new)); - new->unit = unit; if (MAJOR(unit) == MD_MAJOR) new->md_minor = MINOR(unit); @@ -1620,12 +1618,11 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi mdk_rdev_t *rdev; sector_t size; - rdev = (mdk_rdev_t *) kmalloc(sizeof(*rdev), GFP_KERNEL); + rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); if (!rdev) { printk(KERN_ERR "md: could not alloc mem for new device!\n"); return ERR_PTR(-ENOMEM); } - memset(rdev, 0, sizeof(*rdev)); if ((err = alloc_disk_sb(rdev))) goto abort_free; @@ -3505,11 +3502,10 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, { mdk_thread_t *thread; - thread = kmalloc(sizeof(mdk_thread_t), GFP_KERNEL); + thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL); if (!thread) return NULL; - memset(thread, 0, sizeof(mdk_thread_t)); init_waitqueue_head(&thread->wqueue); thread->run = run; diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 145cdc5ad008..97a56aaaef6d 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -41,9 +41,7 @@ static mdk_personality_t multipath_personality; static void *mp_pool_alloc(gfp_t gfp_flags, void *data) { struct multipath_bh *mpb; - mpb = kmalloc(sizeof(*mpb), gfp_flags); - if (mpb) - memset(mpb, 0, sizeof(*mpb)); + mpb = kzalloc(sizeof(*mpb), gfp_flags); return mpb; } @@ -444,7 +442,7 @@ static int multipath_run (mddev_t *mddev) * should be freed in multipath_stop()] */ - conf = kmalloc(sizeof(multipath_conf_t), GFP_KERNEL); + conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL); mddev->private = conf; if (!conf) { printk(KERN_ERR @@ -452,9 +450,8 @@ static int multipath_run (mddev_t *mddev) mdname(mddev)); goto out; } - memset(conf, 0, sizeof(*conf)); - conf->multipaths = kmalloc(sizeof(struct multipath_info)*mddev->raid_disks, + conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks, GFP_KERNEL); if (!conf->multipaths) { printk(KERN_ERR @@ -462,7 +459,6 @@ static int multipath_run (mddev_t *mddev) mdname(mddev)); goto out_free_conf; } - memset(conf->multipaths, 0, sizeof(struct multipath_info)*mddev->raid_disks); conf->working_disks = 0; ITERATE_RDEV(mddev,rdev,tmp) { diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index a2c2e184c0ac..b4eaa67fabde 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -113,21 +113,16 @@ static int create_strip_zones (mddev_t *mddev) } printk("raid0: FINAL %d zones\n", conf->nr_strip_zones); - conf->strip_zone = kmalloc(sizeof(struct strip_zone)* + conf->strip_zone = kzalloc(sizeof(struct strip_zone)* conf->nr_strip_zones, GFP_KERNEL); if (!conf->strip_zone) return 1; - conf->devlist = kmalloc(sizeof(mdk_rdev_t*)* + conf->devlist = kzalloc(sizeof(mdk_rdev_t*)* conf->nr_strip_zones*mddev->raid_disks, GFP_KERNEL); if (!conf->devlist) return 1; - memset(conf->strip_zone, 0,sizeof(struct strip_zone)* - conf->nr_strip_zones); - memset(conf->devlist, 0, - sizeof(mdk_rdev_t*) * conf->nr_strip_zones * mddev->raid_disks); - /* The first zone must contain all devices, so here we check that * there is a proper alignment of slots to devices and find them all */ diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index bbe0b817572b..c42ef1c99fa0 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -61,10 +61,8 @@ static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) int size = offsetof(r1bio_t, bios[pi->raid_disks]); /* allocate a r1bio with room for raid_disks entries in the bios array */ - r1_bio = kmalloc(size, gfp_flags); - if (r1_bio) - memset(r1_bio, 0, size); - else + r1_bio = kzalloc(size, gfp_flags); + if (!r1_bio) unplug_slaves(pi->mddev); return r1_bio; @@ -711,13 +709,11 @@ static struct page **alloc_behind_pages(struct bio *bio) { int i; struct bio_vec *bvec; - struct page **pages = kmalloc(bio->bi_vcnt * sizeof(struct page *), + struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page *), GFP_NOIO); if (unlikely(!pages)) goto do_sync_io; - memset(pages, 0, bio->bi_vcnt * sizeof(struct page *)); - bio_for_each_segment(bvec, bio, i) { pages[i] = alloc_page(GFP_NOIO); if (unlikely(!pages[i])) @@ -1770,19 +1766,16 @@ static int run(mddev_t *mddev) * bookkeeping area. [whatever we allocate in run(), * should be freed in stop()] */ - conf = kmalloc(sizeof(conf_t), GFP_KERNEL); + conf = kzalloc(sizeof(conf_t), GFP_KERNEL); mddev->private = conf; if (!conf) goto out_no_mem; - memset(conf, 0, sizeof(*conf)); - conf->mirrors = kmalloc(sizeof(struct mirror_info)*mddev->raid_disks, + conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks, GFP_KERNEL); if (!conf->mirrors) goto out_no_mem; - memset(conf->mirrors, 0, sizeof(struct mirror_info)*mddev->raid_disks); - conf->tmppage = alloc_page(GFP_KERNEL); if (!conf->tmppage) goto out_no_mem; @@ -1992,13 +1985,12 @@ static int raid1_reshape(mddev_t *mddev, int raid_disks) kfree(newpoolinfo); return -ENOMEM; } - newmirrors = kmalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL); + newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL); if (!newmirrors) { kfree(newpoolinfo); mempool_destroy(newpool); return -ENOMEM; } - memset(newmirrors, 0, sizeof(struct mirror_info)*raid_disks); raise_barrier(conf); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index ce729d6daf78..254b50e32135 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -59,10 +59,8 @@ static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) int size = offsetof(struct r10bio_s, devs[conf->copies]); /* allocate a r10bio with room for raid_disks entries in the bios array */ - r10_bio = kmalloc(size, gfp_flags); - if (r10_bio) - memset(r10_bio, 0, size); - else + r10_bio = kzalloc(size, gfp_flags); + if (!r10_bio) unplug_slaves(conf->mddev); return r10_bio; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index ec5186fd510a..9fc50487e2ed 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1827,12 +1827,12 @@ static int run(mddev_t *mddev) return -EIO; } - mddev->private = kmalloc (sizeof (raid5_conf_t) - + mddev->raid_disks * sizeof(struct disk_info), - GFP_KERNEL); + mddev->private = kzalloc(sizeof (raid5_conf_t) + + mddev->raid_disks * sizeof(struct disk_info), + GFP_KERNEL); if ((conf = mddev->private) == NULL) goto abort; - memset (conf, 0, sizeof (*conf) + mddev->raid_disks * sizeof(struct disk_info) ); + conf->mddev = mddev; if ((conf->stripe_hashtbl = (struct stripe_head **) __get_free_pages(GFP_ATOMIC, HASH_PAGES_ORDER)) == NULL) -- cgit v1.2.3 From fccddba060f2b4916a30aa27acc3d03b01bb981e Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:33 -0800 Subject: [PATCH] md: tidy up raid5/6 hash table code - replace open-coded hash chain with hlist macros - Fix hash-table size at one page - it is already quite generous, so there will never be a need to use multiple pages, so no need for __get_free_pages No functional change. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid5.c | 40 ++++++++++++++-------------------------- drivers/md/raid6main.c | 46 +++++++++++++++++----------------------------- 2 files changed, 31 insertions(+), 55 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 9fc50487e2ed..6e4db95cebb1 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -35,12 +35,10 @@ #define STRIPE_SHIFT (PAGE_SHIFT - 9) #define STRIPE_SECTORS (STRIPE_SIZE>>9) #define IO_THRESHOLD 1 -#define HASH_PAGES 1 -#define HASH_PAGES_ORDER 0 -#define NR_HASH (HASH_PAGES * PAGE_SIZE / sizeof(struct stripe_head *)) +#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) #define HASH_MASK (NR_HASH - 1) -#define stripe_hash(conf, sect) ((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]) +#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])) /* bio's attached to a stripe+device for I/O are linked together in bi_sector * order without overlap. There may be several bio's per stripe+device, and @@ -113,29 +111,21 @@ static void release_stripe(struct stripe_head *sh) spin_unlock_irqrestore(&conf->device_lock, flags); } -static void remove_hash(struct stripe_head *sh) +static inline void remove_hash(struct stripe_head *sh) { PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector); - if (sh->hash_pprev) { - if (sh->hash_next) - sh->hash_next->hash_pprev = sh->hash_pprev; - *sh->hash_pprev = sh->hash_next; - sh->hash_pprev = NULL; - } + hlist_del_init(&sh->hash); } -static __inline__ void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) +static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) { - struct stripe_head **shp = &stripe_hash(conf, sh->sector); + struct hlist_head *hp = stripe_hash(conf, sh->sector); PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector); CHECK_DEVLOCK(); - if ((sh->hash_next = *shp) != NULL) - (*shp)->hash_pprev = &sh->hash_next; - *shp = sh; - sh->hash_pprev = shp; + hlist_add_head(&sh->hash, hp); } @@ -228,10 +218,11 @@ static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_i static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector) { struct stripe_head *sh; + struct hlist_node *hn; CHECK_DEVLOCK(); PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector); - for (sh = stripe_hash(conf, sector); sh; sh = sh->hash_next) + hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) if (sh->sector == sector) return sh; PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector); @@ -1835,9 +1826,8 @@ static int run(mddev_t *mddev) conf->mddev = mddev; - if ((conf->stripe_hashtbl = (struct stripe_head **) __get_free_pages(GFP_ATOMIC, HASH_PAGES_ORDER)) == NULL) + if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) goto abort; - memset(conf->stripe_hashtbl, 0, HASH_PAGES * PAGE_SIZE); spin_lock_init(&conf->device_lock); init_waitqueue_head(&conf->wait_for_stripe); @@ -1972,9 +1962,7 @@ static int run(mddev_t *mddev) abort: if (conf) { print_raid5_conf(conf); - if (conf->stripe_hashtbl) - free_pages((unsigned long) conf->stripe_hashtbl, - HASH_PAGES_ORDER); + kfree(conf->stripe_hashtbl); kfree(conf); } mddev->private = NULL; @@ -1991,7 +1979,7 @@ static int stop(mddev_t *mddev) md_unregister_thread(mddev->thread); mddev->thread = NULL; shrink_stripes(conf); - free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER); + kfree(conf->stripe_hashtbl); blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); kfree(conf); @@ -2019,12 +2007,12 @@ static void print_sh (struct stripe_head *sh) static void printall (raid5_conf_t *conf) { struct stripe_head *sh; + struct hlist_node *hn; int i; spin_lock_irq(&conf->device_lock); for (i = 0; i < NR_HASH; i++) { - sh = conf->stripe_hashtbl[i]; - for (; sh; sh = sh->hash_next) { + hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) { if (sh->raid_conf != conf) continue; print_sh(sh); diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index 4062fc16ac2b..79b5244f44f4 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c @@ -40,12 +40,10 @@ #define STRIPE_SHIFT (PAGE_SHIFT - 9) #define STRIPE_SECTORS (STRIPE_SIZE>>9) #define IO_THRESHOLD 1 -#define HASH_PAGES 1 -#define HASH_PAGES_ORDER 0 -#define NR_HASH (HASH_PAGES * PAGE_SIZE / sizeof(struct stripe_head *)) +#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) #define HASH_MASK (NR_HASH - 1) -#define stripe_hash(conf, sect) ((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]) +#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])) /* bio's attached to a stripe+device for I/O are linked together in bi_sector * order without overlap. There may be several bio's per stripe+device, and @@ -132,29 +130,21 @@ static void release_stripe(struct stripe_head *sh) spin_unlock_irqrestore(&conf->device_lock, flags); } -static void remove_hash(struct stripe_head *sh) +static inline void remove_hash(struct stripe_head *sh) { PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector); - if (sh->hash_pprev) { - if (sh->hash_next) - sh->hash_next->hash_pprev = sh->hash_pprev; - *sh->hash_pprev = sh->hash_next; - sh->hash_pprev = NULL; - } + hlist_del_init(&sh->hash); } -static __inline__ void insert_hash(raid6_conf_t *conf, struct stripe_head *sh) +static inline void insert_hash(raid6_conf_t *conf, struct stripe_head *sh) { - struct stripe_head **shp = &stripe_hash(conf, sh->sector); + struct hlist_head *hp = stripe_hash(conf, sh->sector); PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector); CHECK_DEVLOCK(); - if ((sh->hash_next = *shp) != NULL) - (*shp)->hash_pprev = &sh->hash_next; - *shp = sh; - sh->hash_pprev = shp; + hlist_add_head(&sh->hash, hp); } @@ -247,10 +237,11 @@ static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_i static struct stripe_head *__find_stripe(raid6_conf_t *conf, sector_t sector) { struct stripe_head *sh; + struct hlist_node *hn; CHECK_DEVLOCK(); PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector); - for (sh = stripe_hash(conf, sector); sh; sh = sh->hash_next) + hlist_for_each_entry (sh, hn, stripe_hash(conf, sector), hash) if (sh->sector == sector) return sh; PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector); @@ -1931,17 +1922,15 @@ static int run(mddev_t *mddev) return -EIO; } - mddev->private = kmalloc (sizeof (raid6_conf_t) - + mddev->raid_disks * sizeof(struct disk_info), - GFP_KERNEL); + mddev->private = kzalloc(sizeof (raid6_conf_t) + + mddev->raid_disks * sizeof(struct disk_info), + GFP_KERNEL); if ((conf = mddev->private) == NULL) goto abort; - memset (conf, 0, sizeof (*conf) + mddev->raid_disks * sizeof(struct disk_info) ); conf->mddev = mddev; - if ((conf->stripe_hashtbl = (struct stripe_head **) __get_free_pages(GFP_ATOMIC, HASH_PAGES_ORDER)) == NULL) + if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) goto abort; - memset(conf->stripe_hashtbl, 0, HASH_PAGES * PAGE_SIZE); conf->spare_page = alloc_page(GFP_KERNEL); if (!conf->spare_page) @@ -2085,9 +2074,7 @@ abort: print_raid6_conf(conf); if (conf->spare_page) put_page(conf->spare_page); - if (conf->stripe_hashtbl) - free_pages((unsigned long) conf->stripe_hashtbl, - HASH_PAGES_ORDER); + kfree(conf->stripe_hashtbl); kfree(conf); } mddev->private = NULL; @@ -2104,7 +2091,7 @@ static int stop (mddev_t *mddev) md_unregister_thread(mddev->thread); mddev->thread = NULL; shrink_stripes(conf); - free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER); + kfree(conf->stripe_hashtbl); blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ kfree(conf); mddev->private = NULL; @@ -2131,12 +2118,13 @@ static void print_sh (struct seq_file *seq, struct stripe_head *sh) static void printall (struct seq_file *seq, raid6_conf_t *conf) { struct stripe_head *sh; + struct hlist_node *hn; int i; spin_lock_irq(&conf->device_lock); for (i = 0; i < NR_HASH; i++) { sh = conf->stripe_hashtbl[i]; - for (; sh; sh = sh->hash_next) { + hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) { if (sh->raid_conf != conf) continue; print_sh(seq, sh); -- cgit v1.2.3 From ea03aff93b4117c2ddfb91b34b44a44e19df24cd Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:34 -0800 Subject: [PATCH] md: convert various kmap calls to kmap_atomic Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/bitmap.c | 44 +++++++++++++++++++++----------------------- 1 file changed, 21 insertions(+), 23 deletions(-) (limited to 'drivers') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index c3faa6a43de1..519b1bf7a09a 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -406,11 +406,11 @@ int bitmap_update_sb(struct bitmap *bitmap) return 0; } spin_unlock_irqrestore(&bitmap->lock, flags); - sb = (bitmap_super_t *)kmap(bitmap->sb_page); + sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0); sb->events = cpu_to_le64(bitmap->mddev->events); if (!bitmap->mddev->degraded) sb->events_cleared = cpu_to_le64(bitmap->mddev->events); - kunmap(bitmap->sb_page); + kunmap_atomic(sb, KM_USER0); return write_page(bitmap, bitmap->sb_page, 1); } @@ -421,7 +421,7 @@ void bitmap_print_sb(struct bitmap *bitmap) if (!bitmap || !bitmap->sb_page) return; - sb = (bitmap_super_t *)kmap(bitmap->sb_page); + sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0); printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap)); printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic)); printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version)); @@ -440,7 +440,7 @@ void bitmap_print_sb(struct bitmap *bitmap) printk(KERN_DEBUG " sync size: %llu KB\n", (unsigned long long)le64_to_cpu(sb->sync_size)/2); printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind)); - kunmap(bitmap->sb_page); + kunmap_atomic(sb, KM_USER0); } /* read the superblock from the bitmap file and initialize some bitmap fields */ @@ -466,7 +466,7 @@ static int bitmap_read_sb(struct bitmap *bitmap) return err; } - sb = (bitmap_super_t *)kmap(bitmap->sb_page); + sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0); if (bytes_read < sizeof(*sb)) { /* short read */ printk(KERN_INFO "%s: bitmap file superblock truncated\n", @@ -535,7 +535,7 @@ success: bitmap->events_cleared = bitmap->mddev->events; err = 0; out: - kunmap(bitmap->sb_page); + kunmap_atomic(sb, KM_USER0); if (err) bitmap_print_sb(bitmap); return err; @@ -560,7 +560,7 @@ static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits, } get_page(bitmap->sb_page); spin_unlock_irqrestore(&bitmap->lock, flags); - sb = (bitmap_super_t *)kmap(bitmap->sb_page); + sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0); switch (op) { case MASK_SET: sb->state |= bits; break; @@ -568,7 +568,7 @@ static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits, break; default: BUG(); } - kunmap(bitmap->sb_page); + kunmap_atomic(sb, KM_USER0); put_page(bitmap->sb_page); } @@ -854,6 +854,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) unsigned long bytes, offset, dummy; int outofdate; int ret = -ENOSPC; + void *paddr; chunks = bitmap->chunks; file = bitmap->file; @@ -899,8 +900,6 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) bit = file_page_offset(i); if (index != oldindex) { /* this is a new page, read it in */ /* unmap the old page, we're done with it */ - if (oldpage != NULL) - kunmap(oldpage); if (index == 0) { /* * if we're here then the superblock page @@ -923,18 +922,18 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) oldindex = index; oldpage = page; - kmap(page); if (outofdate) { /* * if bitmap is out of date, dirty the * whole page and write it out */ - memset(page_address(page) + offset, 0xff, + paddr = kmap_atomic(page, KM_USER0); + memset(paddr + offset, 0xff, PAGE_SIZE - offset); + kunmap_atomic(paddr, KM_USER0); ret = write_page(bitmap, page, 1); if (ret) { - kunmap(page); /* release, page not in filemap yet */ put_page(page); goto out; @@ -943,10 +942,12 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) bitmap->filemap[bitmap->file_pages++] = page; } + paddr = kmap_atomic(page, KM_USER0); if (bitmap->flags & BITMAP_HOSTENDIAN) - b = test_bit(bit, page_address(page)); + b = test_bit(bit, paddr); else - b = ext2_test_bit(bit, page_address(page)); + b = ext2_test_bit(bit, paddr); + kunmap_atomic(paddr, KM_USER0); if (b) { /* if the disk bit is set, set the memory bit */ bitmap_set_memory_bits(bitmap, i << CHUNK_BLOCK_SHIFT(bitmap), @@ -961,9 +962,6 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) ret = 0; bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET); - if (page) /* unmap the last page */ - kunmap(page); - if (bit_cnt) { /* Kick recovery if any bits were set */ set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery); md_wakeup_thread(bitmap->mddev->thread); @@ -1019,6 +1017,7 @@ int bitmap_daemon_work(struct bitmap *bitmap) int err = 0; int blocks; int attr; + void *paddr; if (bitmap == NULL) return 0; @@ -1075,14 +1074,12 @@ int bitmap_daemon_work(struct bitmap *bitmap) set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); spin_unlock_irqrestore(&bitmap->lock, flags); } - kunmap(lastpage); put_page(lastpage); if (err) bitmap_file_kick(bitmap); } else spin_unlock_irqrestore(&bitmap->lock, flags); lastpage = page; - kmap(page); /* printk("bitmap clean at page %lu\n", j); */ @@ -1105,10 +1102,12 @@ int bitmap_daemon_work(struct bitmap *bitmap) -1); /* clear the bit */ + paddr = kmap_atomic(page, KM_USER0); if (bitmap->flags & BITMAP_HOSTENDIAN) - clear_bit(file_page_offset(j), page_address(page)); + clear_bit(file_page_offset(j), paddr); else - ext2_clear_bit(file_page_offset(j), page_address(page)); + ext2_clear_bit(file_page_offset(j), paddr); + kunmap_atomic(paddr, KM_USER0); } } spin_unlock_irqrestore(&bitmap->lock, flags); @@ -1116,7 +1115,6 @@ int bitmap_daemon_work(struct bitmap *bitmap) /* now sync the final page */ if (lastpage != NULL) { - kunmap(lastpage); spin_lock_irqsave(&bitmap->lock, flags); if (get_page_attr(bitmap, lastpage) &BITMAP_PAGE_NEEDWRITE) { clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); -- cgit v1.2.3 From a8745db232dd86f145bff6946e78f839acab511b Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:34 -0800 Subject: [PATCH] md: convert recently exported symbol to GPL ...because that seems to be the preferred practice these days. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 8c378b62a676..a6a066fc92e3 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -478,7 +478,7 @@ int sync_page_io(struct block_device *bdev, sector_t sector, int size, bio_put(bio); return ret; } -EXPORT_SYMBOL(sync_page_io); +EXPORT_SYMBOL_GPL(sync_page_io); static int read_disk_sb(mdk_rdev_t * rdev, int size) { -- cgit v1.2.3 From a24a8dd858e0ba50f06a9fd8f61fe8c4fe7a8d8e Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:35 -0800 Subject: [PATCH] md: break out of a loop that doesn't need to run to completion Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid10.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 254b50e32135..253322ae9195 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1672,8 +1672,10 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i for (j=0; jcopies;j++) { int d = r10_bio->devs[j].devnum; if (conf->mirrors[d].rdev == NULL || - test_bit(Faulty, &conf->mirrors[d].rdev->flags)) + test_bit(Faulty, &conf->mirrors[d].rdev->flags)) { still_degraded = 1; + break; + } } must_sync = bitmap_start_sync(mddev->bitmap, sect, &sync_blocks, still_degraded); -- cgit v1.2.3 From 2604b703b6b3db80e3c75ce472a54dfd0b7bf9f4 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:36 -0800 Subject: [PATCH] md: remove personality numbering from md md supports multiple different RAID level, each being implemented by a 'personality' (which is often in a separate module). These personalities have fairly artificial 'numbers'. The numbers are use to: 1- provide an index into an array where the various personalities are recorded 2- identify the module (via an alias) which implements are particular personality. Neither of these uses really justify the existence of personality numbers. The array can be replaced by a linked list which is searched (array lookup only happens very rarely). Module identification can be done using an alias based on level rather than 'personality' number. The current 'raid5' modules support two level (4 and 5) but only one personality. This slight awkwardness (which was handled in the mapping from level to personality) can be better handled by allowing raid5 to register 2 personalities. With this change in place, the core md module does not need to have an exhaustive list of all possible personalities, so other personalities can be added independently. This patch also moves the check for chunksize being non-zero into the ->run routines for the personalities that need it, rather than having it in core-md. This has a side effect of allowing 'faulty' and 'linear' not to have a chunk-size set. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/faulty.c | 8 +++-- drivers/md/linear.c | 10 ++++--- drivers/md/md.c | 79 ++++++++++++++++++-------------------------------- drivers/md/multipath.c | 11 ++++--- drivers/md/raid0.c | 14 ++++++--- drivers/md/raid1.c | 9 +++--- drivers/md/raid10.c | 16 +++++----- drivers/md/raid5.c | 34 ++++++++++++++++++---- drivers/md/raid6main.c | 10 ++++--- 9 files changed, 104 insertions(+), 87 deletions(-) (limited to 'drivers') diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index 0248f8e7eac0..f12e83086897 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c @@ -316,9 +316,10 @@ static int stop(mddev_t *mddev) return 0; } -static mdk_personality_t faulty_personality = +static struct mdk_personality faulty_personality = { .name = "faulty", + .level = LEVEL_FAULTY, .owner = THIS_MODULE, .make_request = make_request, .run = run, @@ -329,15 +330,16 @@ static mdk_personality_t faulty_personality = static int __init raid_init(void) { - return register_md_personality(FAULTY, &faulty_personality); + return register_md_personality(&faulty_personality); } static void raid_exit(void) { - unregister_md_personality(FAULTY); + unregister_md_personality(&faulty_personality); } module_init(raid_init); module_exit(raid_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("md-personality-10"); /* faulty */ +MODULE_ALIAS("md-level--5"); diff --git a/drivers/md/linear.c b/drivers/md/linear.c index f46c98d05b44..79dee8159217 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -351,9 +351,10 @@ static void linear_status (struct seq_file *seq, mddev_t *mddev) } -static mdk_personality_t linear_personality= +static struct mdk_personality linear_personality = { .name = "linear", + .level = LEVEL_LINEAR, .owner = THIS_MODULE, .make_request = linear_make_request, .run = linear_run, @@ -363,16 +364,17 @@ static mdk_personality_t linear_personality= static int __init linear_init (void) { - return register_md_personality (LINEAR, &linear_personality); + return register_md_personality (&linear_personality); } static void linear_exit (void) { - unregister_md_personality (LINEAR); + unregister_md_personality (&linear_personality); } module_init(linear_init); module_exit(linear_exit); MODULE_LICENSE("GPL"); -MODULE_ALIAS("md-personality-1"); /* LINEAR */ +MODULE_ALIAS("md-personality-1"); /* LINEAR - degrecated*/ +MODULE_ALIAS("md-level--1"); diff --git a/drivers/md/md.c b/drivers/md/md.c index a6a066fc92e3..07f180f95b47 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -68,7 +68,7 @@ static void autostart_arrays (int part); #endif -static mdk_personality_t *pers[MAX_PERSONALITY]; +static LIST_HEAD(pers_list); static DEFINE_SPINLOCK(pers_lock); /* @@ -303,6 +303,15 @@ static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) return NULL; } +static struct mdk_personality *find_pers(int level) +{ + struct mdk_personality *pers; + list_for_each_entry(pers, &pers_list, list) + if (pers->level == level) + return pers; + return NULL; +} + static inline sector_t calc_dev_sboffset(struct block_device *bdev) { sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; @@ -1744,7 +1753,7 @@ static void analyze_sbs(mddev_t * mddev) static ssize_t level_show(mddev_t *mddev, char *page) { - mdk_personality_t *p = mddev->pers; + struct mdk_personality *p = mddev->pers; if (p == NULL && mddev->raid_disks == 0) return 0; if (mddev->level >= 0) @@ -1960,11 +1969,12 @@ static int start_dirty_degraded; static int do_md_run(mddev_t * mddev) { - int pnum, err; + int err; int chunk_size; struct list_head *tmp; mdk_rdev_t *rdev; struct gendisk *disk; + struct mdk_personality *pers; char b[BDEVNAME_SIZE]; if (list_empty(&mddev->disks)) @@ -1981,20 +1991,8 @@ static int do_md_run(mddev_t * mddev) analyze_sbs(mddev); chunk_size = mddev->chunk_size; - pnum = level_to_pers(mddev->level); - if ((pnum != MULTIPATH) && (pnum != RAID1)) { - if (!chunk_size) { - /* - * 'default chunksize' in the old md code used to - * be PAGE_SIZE, baaad. - * we abort here to be on the safe side. We don't - * want to continue the bad practice. - */ - printk(KERN_ERR - "no chunksize specified, see 'man raidtab'\n"); - return -EINVAL; - } + if (chunk_size) { if (chunk_size > MAX_CHUNK_SIZE) { printk(KERN_ERR "too big chunk_size: %d > %d\n", chunk_size, MAX_CHUNK_SIZE); @@ -2030,10 +2028,7 @@ static int do_md_run(mddev_t * mddev) } #ifdef CONFIG_KMOD - if (!pers[pnum]) - { - request_module("md-personality-%d", pnum); - } + request_module("md-level-%d", mddev->level); #endif /* @@ -2055,14 +2050,14 @@ static int do_md_run(mddev_t * mddev) return -ENOMEM; spin_lock(&pers_lock); - if (!pers[pnum] || !try_module_get(pers[pnum]->owner)) { + pers = find_pers(mddev->level); + if (!pers || !try_module_get(pers->owner)) { spin_unlock(&pers_lock); - printk(KERN_WARNING "md: personality %d is not loaded!\n", - pnum); + printk(KERN_WARNING "md: personality for level %d is not loaded!\n", + mddev->level); return -EINVAL; } - - mddev->pers = pers[pnum]; + mddev->pers = pers; spin_unlock(&pers_lock); mddev->recovery = 0; @@ -3701,15 +3696,14 @@ static int md_seq_show(struct seq_file *seq, void *v) struct list_head *tmp2; mdk_rdev_t *rdev; struct mdstat_info *mi = seq->private; - int i; struct bitmap *bitmap; if (v == (void*)1) { + struct mdk_personality *pers; seq_printf(seq, "Personalities : "); spin_lock(&pers_lock); - for (i = 0; i < MAX_PERSONALITY; i++) - if (pers[i]) - seq_printf(seq, "[%s] ", pers[i]->name); + list_for_each_entry(pers, &pers_list, list) + seq_printf(seq, "[%s] ", pers->name); spin_unlock(&pers_lock); seq_printf(seq, "\n"); @@ -3870,35 +3864,20 @@ static struct file_operations md_seq_fops = { .poll = mdstat_poll, }; -int register_md_personality(int pnum, mdk_personality_t *p) +int register_md_personality(struct mdk_personality *p) { - if (pnum >= MAX_PERSONALITY) { - printk(KERN_ERR - "md: tried to install personality %s as nr %d, but max is %lu\n", - p->name, pnum, MAX_PERSONALITY-1); - return -EINVAL; - } - spin_lock(&pers_lock); - if (pers[pnum]) { - spin_unlock(&pers_lock); - return -EBUSY; - } - - pers[pnum] = p; - printk(KERN_INFO "md: %s personality registered as nr %d\n", p->name, pnum); + list_add_tail(&p->list, &pers_list); + printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level); spin_unlock(&pers_lock); return 0; } -int unregister_md_personality(int pnum) +int unregister_md_personality(struct mdk_personality *p) { - if (pnum >= MAX_PERSONALITY) - return -EINVAL; - - printk(KERN_INFO "md: %s personality unregistered\n", pers[pnum]->name); + printk(KERN_INFO "md: %s personality unregistered\n", p->name); spin_lock(&pers_lock); - pers[pnum] = NULL; + list_del_init(&p->list); spin_unlock(&pers_lock); return 0; } diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 97a56aaaef6d..d4d838e3f9f8 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -35,9 +35,6 @@ #define NR_RESERVED_BUFS 32 -static mdk_personality_t multipath_personality; - - static void *mp_pool_alloc(gfp_t gfp_flags, void *data) { struct multipath_bh *mpb; @@ -553,9 +550,10 @@ static int multipath_stop (mddev_t *mddev) return 0; } -static mdk_personality_t multipath_personality= +static struct mdk_personality multipath_personality = { .name = "multipath", + .level = LEVEL_MULTIPATH, .owner = THIS_MODULE, .make_request = multipath_make_request, .run = multipath_run, @@ -568,15 +566,16 @@ static mdk_personality_t multipath_personality= static int __init multipath_init (void) { - return register_md_personality (MULTIPATH, &multipath_personality); + return register_md_personality (&multipath_personality); } static void __exit multipath_exit (void) { - unregister_md_personality (MULTIPATH); + unregister_md_personality (&multipath_personality); } module_init(multipath_init); module_exit(multipath_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("md-personality-7"); /* MULTIPATH */ +MODULE_ALIAS("md-level--4"); diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index b4eaa67fabde..7fb69e29391b 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -275,7 +275,11 @@ static int raid0_run (mddev_t *mddev) mdk_rdev_t *rdev; struct list_head *tmp; - printk("%s: setting max_sectors to %d, segment boundary to %d\n", + if (mddev->chunk_size == 0) { + printk(KERN_ERR "md/raid0: non-zero chunk size required.\n"); + return -EINVAL; + } + printk(KERN_INFO "%s: setting max_sectors to %d, segment boundary to %d\n", mdname(mddev), mddev->chunk_size >> 9, (mddev->chunk_size>>1)-1); @@ -507,9 +511,10 @@ static void raid0_status (struct seq_file *seq, mddev_t *mddev) return; } -static mdk_personality_t raid0_personality= +static struct mdk_personality raid0_personality= { .name = "raid0", + .level = 0, .owner = THIS_MODULE, .make_request = raid0_make_request, .run = raid0_run, @@ -519,15 +524,16 @@ static mdk_personality_t raid0_personality= static int __init raid0_init (void) { - return register_md_personality (RAID0, &raid0_personality); + return register_md_personality (&raid0_personality); } static void raid0_exit (void) { - unregister_md_personality (RAID0); + unregister_md_personality (&raid0_personality); } module_init(raid0_init); module_exit(raid0_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("md-personality-2"); /* RAID0 */ +MODULE_ALIAS("md-level-0"); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index c42ef1c99fa0..6e0f59ed3d80 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -47,7 +47,6 @@ */ #define NR_RAID1_BIOS 256 -static mdk_personality_t raid1_personality; static void unplug_slaves(mddev_t *mddev); @@ -2036,9 +2035,10 @@ static void raid1_quiesce(mddev_t *mddev, int state) } -static mdk_personality_t raid1_personality = +static struct mdk_personality raid1_personality = { .name = "raid1", + .level = 1, .owner = THIS_MODULE, .make_request = make_request, .run = run, @@ -2056,15 +2056,16 @@ static mdk_personality_t raid1_personality = static int __init raid_init(void) { - return register_md_personality(RAID1, &raid1_personality); + return register_md_personality(&raid1_personality); } static void raid_exit(void) { - unregister_md_personality(RAID1); + unregister_md_personality(&raid1_personality); } module_init(raid_init); module_exit(raid_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("md-personality-3"); /* RAID1 */ +MODULE_ALIAS("md-level-1"); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 253322ae9195..f23d52c5df94 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1883,11 +1883,11 @@ static int run(mddev_t *mddev) int nc, fc; sector_t stride, size; - if (mddev->level != 10) { - printk(KERN_ERR "raid10: %s: raid level not set correctly... (%d)\n", - mdname(mddev), mddev->level); - goto out; + if (mddev->chunk_size == 0) { + printk(KERN_ERR "md/raid10: non-zero chunk size required.\n"); + return -EINVAL; } + nc = mddev->layout & 255; fc = (mddev->layout >> 8) & 255; if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks || @@ -2072,9 +2072,10 @@ static void raid10_quiesce(mddev_t *mddev, int state) } } -static mdk_personality_t raid10_personality = +static struct mdk_personality raid10_personality = { .name = "raid10", + .level = 10, .owner = THIS_MODULE, .make_request = make_request, .run = run, @@ -2090,15 +2091,16 @@ static mdk_personality_t raid10_personality = static int __init raid_init(void) { - return register_md_personality(RAID10, &raid10_personality); + return register_md_personality(&raid10_personality); } static void raid_exit(void) { - unregister_md_personality(RAID10); + unregister_md_personality(&raid10_personality); } module_init(raid_init); module_exit(raid_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("md-personality-9"); /* RAID10 */ +MODULE_ALIAS("md-level-10"); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 6e4db95cebb1..b0cfd3ca9ca0 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2187,9 +2187,10 @@ static void raid5_quiesce(mddev_t *mddev, int state) } } -static mdk_personality_t raid5_personality= +static struct mdk_personality raid5_personality = { .name = "raid5", + .level = 5, .owner = THIS_MODULE, .make_request = make_request, .run = run, @@ -2204,17 +2205,40 @@ static mdk_personality_t raid5_personality= .quiesce = raid5_quiesce, }; -static int __init raid5_init (void) +static struct mdk_personality raid4_personality = { - return register_md_personality (RAID5, &raid5_personality); + .name = "raid4", + .level = 4, + .owner = THIS_MODULE, + .make_request = make_request, + .run = run, + .stop = stop, + .status = status, + .error_handler = error, + .hot_add_disk = raid5_add_disk, + .hot_remove_disk= raid5_remove_disk, + .spare_active = raid5_spare_active, + .sync_request = sync_request, + .resize = raid5_resize, + .quiesce = raid5_quiesce, +}; + +static int __init raid5_init(void) +{ + register_md_personality(&raid5_personality); + register_md_personality(&raid4_personality); + return 0; } -static void raid5_exit (void) +static void raid5_exit(void) { - unregister_md_personality (RAID5); + unregister_md_personality(&raid5_personality); + unregister_md_personality(&raid4_personality); } module_init(raid5_init); module_exit(raid5_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("md-personality-4"); /* RAID5 */ +MODULE_ALIAS("md-level-5"); +MODULE_ALIAS("md-level-4"); diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index 79b5244f44f4..950e5fa6e1f2 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c @@ -2304,9 +2304,10 @@ static void raid6_quiesce(mddev_t *mddev, int state) } } -static mdk_personality_t raid6_personality= +static struct mdk_personality raid6_personality = { .name = "raid6", + .level = 6, .owner = THIS_MODULE, .make_request = make_request, .run = run, @@ -2321,7 +2322,7 @@ static mdk_personality_t raid6_personality= .quiesce = raid6_quiesce, }; -static int __init raid6_init (void) +static int __init raid6_init(void) { int e; @@ -2329,15 +2330,16 @@ static int __init raid6_init (void) if ( e ) return e; - return register_md_personality (RAID6, &raid6_personality); + return register_md_personality(&raid6_personality); } static void raid6_exit (void) { - unregister_md_personality (RAID6); + unregister_md_personality(&raid6_personality); } module_init(raid6_init); module_exit(raid6_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("md-personality-8"); /* RAID6 */ +MODULE_ALIAS("md-level-6"); -- cgit v1.2.3 From 097426f689f179747f3cd6b4749eb2a6b605702d Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:37 -0800 Subject: [PATCH] md: fix possible problem in raid1/raid10 error overwriting The code to overwrite/reread for addressing read errors in raid1/raid10 currently assumes that the read will not alter the buffer which could be used to write to the next device. This is not a safe assumption to make. So we split the loops into a overwrite loop and a separate re-read loop, so that the writing is complete before reading is attempted. Cc: Paul Clements Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid1.c | 38 ++++++++++++++++++++++++++++++-------- drivers/md/raid10.c | 22 ++++++++++++++++++---- 2 files changed, 48 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 6e0f59ed3d80..39c10a65683d 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1253,6 +1253,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) } while (!success && d != r1_bio->read_disk); if (success) { + int start = d; /* write it back and re-read */ set_bit(R1BIO_Uptodate, &r1_bio->state); while (d != r1_bio->read_disk) { @@ -1266,14 +1267,23 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) sect + rdev->data_offset, s<<9, bio->bi_io_vec[idx].bv_page, - WRITE) == 0 || - sync_page_io(rdev->bdev, + WRITE) == 0) + md_error(mddev, rdev); + } + d = start; + while (d != r1_bio->read_disk) { + if (d == 0) + d = conf->raid_disks; + d--; + if (r1_bio->bios[d]->bi_end_io != end_sync_read) + continue; + rdev = conf->mirrors[d].rdev; + if (sync_page_io(rdev->bdev, sect + rdev->data_offset, s<<9, bio->bi_io_vec[idx].bv_page, - READ) == 0) { + READ) == 0) md_error(mddev, rdev); - } } } else { char b[BDEVNAME_SIZE]; @@ -1445,6 +1455,7 @@ static void raid1d(mddev_t *mddev) if (success) { /* write it back and re-read */ + int start = d; while (d != r1_bio->read_disk) { if (d==0) d = conf->raid_disks; @@ -1454,13 +1465,24 @@ static void raid1d(mddev_t *mddev) test_bit(In_sync, &rdev->flags)) { if (sync_page_io(rdev->bdev, sect + rdev->data_offset, - s<<9, conf->tmppage, WRITE) == 0 || - sync_page_io(rdev->bdev, + s<<9, conf->tmppage, WRITE) == 0) + /* Well, this device is dead */ + md_error(mddev, rdev); + } + } + d = start; + while (d != r1_bio->read_disk) { + if (d==0) + d = conf->raid_disks; + d--; + rdev = conf->mirrors[d].rdev; + if (rdev && + test_bit(In_sync, &rdev->flags)) { + if (sync_page_io(rdev->bdev, sect + rdev->data_offset, - s<<9, conf->tmppage, READ) == 0) { + s<<9, conf->tmppage, READ) == 0) /* Well, this device is dead */ md_error(mddev, rdev); - } } } } else { diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index f23d52c5df94..9647ebb0983a 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1421,6 +1421,7 @@ static void raid10d(mddev_t *mddev) } while (!success && sl != r10_bio->read_slot); if (success) { + int start = sl; /* write it back and re-read */ while (sl != r10_bio->read_slot) { int d; @@ -1434,14 +1435,27 @@ static void raid10d(mddev_t *mddev) if (sync_page_io(rdev->bdev, r10_bio->devs[sl].addr + sect + rdev->data_offset, - s<<9, conf->tmppage, WRITE) == 0 || - sync_page_io(rdev->bdev, + s<<9, conf->tmppage, WRITE) == 0) + /* Well, this device is dead */ + md_error(mddev, rdev); + } + } + sl = start; + while (sl != r10_bio->read_slot) { + int d; + if (sl==0) + sl = conf->copies; + sl--; + d = r10_bio->devs[sl].devnum; + rdev = conf->mirrors[d].rdev; + if (rdev && + test_bit(In_sync, &rdev->flags)) { + if (sync_page_io(rdev->bdev, r10_bio->devs[sl].addr + sect + rdev->data_offset, - s<<9, conf->tmppage, READ) == 0) { + s<<9, conf->tmppage, READ) == 0) /* Well, this device is dead */ md_error(mddev, rdev); - } } } } else { -- cgit v1.2.3 From 7dd5d34c6c2da04e8cd0732e08b33cd3359e4bae Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:39 -0800 Subject: [PATCH] md: remove inappropriate limits in md/bitmap configuration. The kernel should not be imposing these policy limits: The time between bitmap updates should certainly be allowed to be more than 15 seconds, and if someone wants a bitmap chunk size in excess of 4MB, the kernel isn't the place to stop them. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/bitmap.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 519b1bf7a09a..abe415f0c039 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -485,12 +485,12 @@ static int bitmap_read_sb(struct bitmap *bitmap) else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO || le32_to_cpu(sb->version) > BITMAP_MAJOR_HI) reason = "unrecognized superblock version"; - else if (chunksize < 512 || chunksize > (1024 * 1024 * 4)) - reason = "bitmap chunksize out of range (512B - 4MB)"; + else if (chunksize < PAGE_SIZE) + reason = "bitmap chunksize too small"; else if ((1 << ffz(~chunksize)) != chunksize) reason = "bitmap chunksize not a power of 2"; - else if (daemon_sleep < 1 || daemon_sleep > 15) - reason = "daemon sleep period out of range (1-15s)"; + else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT / HZ) + reason = "daemon sleep period out of range"; else if (write_behind > COUNTER_MAX) reason = "write-behind limit out of range (0 - 16383)"; if (reason) { -- cgit v1.2.3 From 1345b1d8adbdeceb1c871d9a4af5e2a700b341c6 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:40 -0800 Subject: [PATCH] md: define and use safe_put_page for md md sometimes call put_page on NULL pointers (treating it like kfree). This is not safe, so define and use a 'safe_put_page' which checks for NULL. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/bitmap.c | 3 +-- drivers/md/raid1.c | 8 ++++---- drivers/md/raid10.c | 8 ++++---- drivers/md/raid6main.c | 3 +-- 4 files changed, 10 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index abe415f0c039..ee4a3424a8a3 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -626,8 +626,7 @@ static void bitmap_file_unmap(struct bitmap *bitmap) kfree(map); kfree(attr); - if (sb_page) - put_page(sb_page); + safe_put_page(sb_page); } static void bitmap_stop_daemon(struct bitmap *bitmap); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 39c10a65683d..feea4eeca1d9 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -136,7 +136,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) out_free_pages: for (i=0; i < RESYNC_PAGES ; i++) for (j=0 ; j < pi->raid_disks; j++) - put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page); + safe_put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page); j = -1; out_free_bio: while ( ++j < pi->raid_disks ) @@ -156,7 +156,7 @@ static void r1buf_pool_free(void *__r1_bio, void *data) if (j == 0 || r1bio->bios[j]->bi_io_vec[i].bv_page != r1bio->bios[0]->bi_io_vec[i].bv_page) - put_page(r1bio->bios[j]->bi_io_vec[i].bv_page); + safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page); } for (i=0 ; i < pi->raid_disks; i++) bio_put(r1bio->bios[i]); @@ -381,7 +381,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int /* free extra copy of the data pages */ int i = bio->bi_vcnt; while (i--) - put_page(bio->bi_io_vec[i].bv_page); + safe_put_page(bio->bi_io_vec[i].bv_page); } /* clear the bitmap if all writes complete successfully */ bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, @@ -1907,7 +1907,7 @@ out_free_conf: if (conf->r1bio_pool) mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); - put_page(conf->tmppage); + safe_put_page(conf->tmppage); kfree(conf->poolinfo); kfree(conf); mddev->private = NULL; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 9647ebb0983a..fb952000fae2 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -132,10 +132,10 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) out_free_pages: for ( ; i > 0 ; i--) - put_page(bio->bi_io_vec[i-1].bv_page); + safe_put_page(bio->bi_io_vec[i-1].bv_page); while (j--) for (i = 0; i < RESYNC_PAGES ; i++) - put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); + safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); j = -1; out_free_bio: while ( ++j < nalloc ) @@ -155,7 +155,7 @@ static void r10buf_pool_free(void *__r10_bio, void *data) struct bio *bio = r10bio->devs[j].bio; if (bio) { for (i = 0; i < RESYNC_PAGES; i++) { - put_page(bio->bi_io_vec[i].bv_page); + safe_put_page(bio->bi_io_vec[i].bv_page); bio->bi_io_vec[i].bv_page = NULL; } bio_put(bio); @@ -2042,7 +2042,7 @@ static int run(mddev_t *mddev) out_free_conf: if (conf->r10bio_pool) mempool_destroy(conf->r10bio_pool); - put_page(conf->tmppage); + safe_put_page(conf->tmppage); kfree(conf->mirrors); kfree(conf); mddev->private = NULL; diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index 950e5fa6e1f2..06b32bd671a3 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c @@ -2072,8 +2072,7 @@ static int run(mddev_t *mddev) abort: if (conf) { print_raid6_conf(conf); - if (conf->spare_page) - put_page(conf->spare_page); + safe_put_page(conf->spare_page); kfree(conf->stripe_hashtbl); kfree(conf); } -- cgit v1.2.3 From bce74dac082787375e76d2b33726b94c9701fabc Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:41 -0800 Subject: [PATCH] md: helper function to match commands written to sysfs files Commands written to sysfs files may, or my not, be \n terminated. We want to accept with case. For this we use cmd_match. Signed-off-by: Neil Brown Acked-by: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 07f180f95b47..c1613854f38d 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1525,6 +1525,26 @@ repeat: } +/* words written to sysfs files may, or my not, be \n terminated. + * We want to accept with case. For this we use cmd_match. + */ +static int cmd_match(const char *cmd, const char *str) +{ + /* See if cmd, written into a sysfs file, matches + * str. They must either be the same, or cmd can + * have a trailing newline + */ + while (*cmd && *str && *cmd == *str) { + cmd++; + str++; + } + if (*cmd == '\n') + cmd++; + if (*str || *cmd) + return 0; + return 1; +} + struct rdev_sysfs_entry { struct attribute attr; ssize_t (*show)(mdk_rdev_t *, char *); @@ -1799,7 +1819,7 @@ action_store(mddev_t *mddev, const char *page, size_t len) if (!mddev->pers || !mddev->pers->sync_request) return -EINVAL; - if (strcmp(page, "idle")==0 || strcmp(page, "idle\n")==0) { + if (cmd_match(page, "idle")) { if (mddev->sync_thread) { set_bit(MD_RECOVERY_INTR, &mddev->recovery); md_unregister_thread(mddev->sync_thread); @@ -1812,13 +1832,12 @@ action_store(mddev_t *mddev, const char *page, size_t len) if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) return -EBUSY; - if (strcmp(page, "resync")==0 || strcmp(page, "resync\n")==0 || - strcmp(page, "recover")==0 || strcmp(page, "recover\n")==0) + if (cmd_match(page, "resync") || cmd_match(page, "recover")) set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); else { - if (strcmp(page, "check")==0 || strcmp(page, "check\n")==0) + if (cmd_match(page, "check")) set_bit(MD_RECOVERY_CHECK, &mddev->recovery); - else if (strcmp(page, "repair")!=0 && strcmp(page, "repair\n")!=0) + else if (cmd_match(page, "repair")) return -EINVAL; set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); set_bit(MD_RECOVERY_SYNC, &mddev->recovery); -- cgit v1.2.3 From f188593ee7af8c71755d2df269a7a5f62c4b695e Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:42 -0800 Subject: [PATCH] md: fix typo in comment Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid10.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index fb952000fae2..8e6f6dfddb2b 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -712,7 +712,7 @@ static void allow_barrier(conf_t *conf) static void freeze_array(conf_t *conf) { /* stop syncio and normal IO and wait for everything to - * go quite. + * go quiet. * We increment barrier and nr_waiting, and then * wait until barrier+nr_pending match nr_queued+2 */ -- cgit v1.2.3 From 2989ddbd6e1d9638a188311b896362c4bf7b7c25 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:43 -0800 Subject: [PATCH] md: make a couple of names in md.c static .. because they aren't used outside md.c Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index c1613854f38d..3cf089349e77 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -144,7 +144,7 @@ static int start_readonly; * start array, stop array, error, add device, remove device, * start build, activate spare */ -DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); +static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); static atomic_t md_event_count; void md_new_event(mddev_t *mddev) { @@ -279,7 +279,7 @@ static inline void mddev_unlock(mddev_t * mddev) md_wakeup_thread(mddev->thread); } -mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) +static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) { mdk_rdev_t * rdev; struct list_head *tmp; -- cgit v1.2.3 From 07dbd37727049c0d56e313e5afcf8db07a9c3e37 Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Fri, 6 Jan 2006 00:20:44 -0800 Subject: [PATCH] drivers/md/md.c: make md_new_event() static Make the needlessly global function md_new_event() static. Signed-off-by: Adrian Bunk Cc: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 3cf089349e77..241d514343ce 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -146,7 +146,7 @@ static int start_readonly; */ static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); static atomic_t md_event_count; -void md_new_event(mddev_t *mddev) +static void md_new_event(mddev_t *mddev) { atomic_inc(&md_event_count); wake_up(&md_event_waiters); -- cgit v1.2.3 From c708443c004f2310abdd7f1c353daa372b37f7a2 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:45 -0800 Subject: [PATCH] md: make sure bitmap updates are visible through filesystem When we update a page_cache page in the kernel, we need to flush_dache_page or userspace might not see the change. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/bitmap.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index ee4a3424a8a3..76a189ceb529 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -315,6 +315,8 @@ static int write_page(struct bitmap *bitmap, struct page *page, int wait) if (bitmap->file == NULL) return write_sb_page(bitmap->mddev, bitmap->offset, page, wait); + flush_dcache_page(page); /* make sure visible to anyone reading the file */ + if (wait) lock_page(page); else { -- cgit v1.2.3 From 03c902e17f40cfed63cd2494616f35fc9c58571b Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:46 -0800 Subject: [PATCH] md: fix rdev->pending counts in raid1 When we do a user-requested check/repair, we lose count of the outstanding requests... Also make sure that when anything is written to md/sync_action, the RECOVERY_NEEDED flag is set and the thread is woken up so any changes take effect. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 11 ++++------- drivers/md/raid1.c | 10 ++++++---- 2 files changed, 10 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 241d514343ce..0b3081aa4d63 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1826,13 +1826,10 @@ action_store(mddev_t *mddev, const char *page, size_t len) mddev->sync_thread = NULL; mddev->recovery = 0; } - return len; - } - - if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || - test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) + } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || + test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) return -EBUSY; - if (cmd_match(page, "resync") || cmd_match(page, "recover")) + else if (cmd_match(page, "resync") || cmd_match(page, "recover")) set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); else { if (cmd_match(page, "check")) @@ -1841,8 +1838,8 @@ action_store(mddev_t *mddev, const char *page, size_t len) return -EINVAL; set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); set_bit(MD_RECOVERY_SYNC, &mddev->recovery); - set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); } + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); return len; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index feea4eeca1d9..7d4465f93064 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -527,7 +527,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) /* cannot risk returning a device that failed * before we inc'ed nr_pending */ - atomic_dec(&rdev->nr_pending); + rdev_dec_pending(rdev, conf->mddev); goto retry; } conf->next_seq_sect = this_sector + sectors; @@ -830,7 +830,7 @@ static int make_request(request_queue_t *q, struct bio * bio) !test_bit(Faulty, &rdev->flags)) { atomic_inc(&rdev->nr_pending); if (test_bit(Faulty, &rdev->flags)) { - atomic_dec(&rdev->nr_pending); + rdev_dec_pending(rdev, mddev); r1_bio->bios[i] = NULL; } else r1_bio->bios[i] = bio; @@ -1176,6 +1176,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) if (r1_bio->bios[primary]->bi_end_io == end_sync_read && test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) { r1_bio->bios[primary]->bi_end_io = NULL; + rdev_dec_pending(conf->mirrors[primary].rdev, mddev); break; } r1_bio->read_disk = primary; @@ -1193,9 +1194,10 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) break; if (j >= 0) mddev->resync_mismatches += r1_bio->sectors; - if (j < 0 || test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) + if (j < 0 || test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { sbio->bi_end_io = NULL; - else { + rdev_dec_pending(conf->mirrors[i].rdev, mddev); + } else { /* fixup the bio for reuse */ sbio->bi_vcnt = vcnt; sbio->bi_size = r1_bio->sectors << 9; -- cgit v1.2.3 From 3b34380ae8c5df6debd85183c7fa1ac05f79b7d2 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:47 -0800 Subject: [PATCH] md: allow chunk_size to be settable through sysfs ... only before array is started of course. Signed-off-by: Neil Brown Acked-by: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 0b3081aa4d63..9e57e97bd530 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1794,6 +1794,31 @@ raid_disks_show(mddev_t *mddev, char *page) static struct md_sysfs_entry md_raid_disks = __ATTR_RO(raid_disks); +static ssize_t +chunk_size_show(mddev_t *mddev, char *page) +{ + return sprintf(page, "%d\n", mddev->chunk_size); +} + +static ssize_t +chunk_size_store(mddev_t *mddev, const char *buf, size_t len) +{ + /* can only set chunk_size if array is not yet active */ + char *e; + unsigned long n = simple_strtoul(buf, &e, 10); + + if (mddev->pers) + return -EBUSY; + if (!*buf || (*e && *e != '\n')) + return -EINVAL; + + mddev->chunk_size = n; + return len; +} +static struct md_sysfs_entry md_chunk_size = +__ATTR(chunk_size, 0644, chunk_size_show, chunk_size_store); + + static ssize_t action_show(mddev_t *mddev, char *page) { @@ -1861,6 +1886,7 @@ md_mismatches = __ATTR_RO(mismatch_cnt); static struct attribute *md_default_attrs[] = { &md_level.attr, &md_raid_disks.attr, + &md_chunk_size.attr, NULL, }; -- cgit v1.2.3 From a35b0d695d44410eb1734c9abb632725a3138628 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:49 -0800 Subject: [PATCH] md: allow md array component size to be accessed and set via sysfs Signed-off-by: Neil Brown Acked-by: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 131 ++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 89 insertions(+), 42 deletions(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 9e57e97bd530..d568ab4a487f 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1819,6 +1819,44 @@ static struct md_sysfs_entry md_chunk_size = __ATTR(chunk_size, 0644, chunk_size_show, chunk_size_store); +static ssize_t +size_show(mddev_t *mddev, char *page) +{ + return sprintf(page, "%llu\n", (unsigned long long)mddev->size); +} + +static int update_size(mddev_t *mddev, unsigned long size); + +static ssize_t +size_store(mddev_t *mddev, const char *buf, size_t len) +{ + /* If array is inactive, we can reduce the component size, but + * not increase it (except from 0). + * If array is active, we can try an on-line resize + */ + char *e; + int err = 0; + unsigned long long size = simple_strtoull(buf, &e, 10); + if (!*buf || *buf == '\n' || + (*e && *e != '\n')) + return -EINVAL; + + if (mddev->pers) { + err = update_size(mddev, size); + md_update_sb(mddev); + } else { + if (mddev->size == 0 || + mddev->size > size) + mddev->size = size; + else + err = -ENOSPC; + } + return err ? err : len; +} + +static struct md_sysfs_entry md_size = +__ATTR(component_size, 0644, size_show, size_store); + static ssize_t action_show(mddev_t *mddev, char *page) { @@ -1887,6 +1925,7 @@ static struct attribute *md_default_attrs[] = { &md_level.attr, &md_raid_disks.attr, &md_chunk_size.attr, + &md_size.attr, NULL, }; @@ -3005,6 +3044,54 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) return 0; } +static int update_size(mddev_t *mddev, unsigned long size) +{ + mdk_rdev_t * rdev; + int rv; + struct list_head *tmp; + + if (mddev->pers->resize == NULL) + return -EINVAL; + /* The "size" is the amount of each device that is used. + * This can only make sense for arrays with redundancy. + * linear and raid0 always use whatever space is available + * We can only consider changing the size if no resync + * or reconstruction is happening, and if the new size + * is acceptable. It must fit before the sb_offset or, + * if that is sync_thread) + return -EBUSY; + ITERATE_RDEV(mddev,rdev,tmp) { + sector_t avail; + int fit = (size == 0); + if (rdev->sb_offset > rdev->data_offset) + avail = (rdev->sb_offset*2) - rdev->data_offset; + else + avail = get_capacity(rdev->bdev->bd_disk) + - rdev->data_offset; + if (fit && (size == 0 || size > avail/2)) + size = avail/2; + if (avail < ((sector_t)size << 1)) + return -ENOSPC; + } + rv = mddev->pers->resize(mddev, (sector_t)size *2); + if (!rv) { + struct block_device *bdev; + + bdev = bdget_disk(mddev->gendisk, 0); + if (bdev) { + down(&bdev->bd_inode->i_sem); + i_size_write(bdev->bd_inode, mddev->array_size << 10); + up(&bdev->bd_inode->i_sem); + bdput(bdev); + } + } + return rv; +} + /* * update_array_info is used to change the configuration of an * on-line array. @@ -3053,49 +3140,9 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) else return mddev->pers->reconfig(mddev, info->layout, -1); } - if (mddev->size != info->size) { - mdk_rdev_t * rdev; - struct list_head *tmp; - if (mddev->pers->resize == NULL) - return -EINVAL; - /* The "size" is the amount of each device that is used. - * This can only make sense for arrays with redundancy. - * linear and raid0 always use whatever space is available - * We can only consider changing the size if no resync - * or reconstruction is happening, and if the new size - * is acceptable. It must fit before the sb_offset or, - * if that is sync_thread) - return -EBUSY; - ITERATE_RDEV(mddev,rdev,tmp) { - sector_t avail; - int fit = (info->size == 0); - if (rdev->sb_offset > rdev->data_offset) - avail = (rdev->sb_offset*2) - rdev->data_offset; - else - avail = get_capacity(rdev->bdev->bd_disk) - - rdev->data_offset; - if (fit && (info->size == 0 || info->size > avail/2)) - info->size = avail/2; - if (avail < ((sector_t)info->size << 1)) - return -ENOSPC; - } - rv = mddev->pers->resize(mddev, (sector_t)info->size *2); - if (!rv) { - struct block_device *bdev; + if (mddev->size != info->size) + rv = update_size(mddev, info->size); - bdev = bdget_disk(mddev->gendisk, 0); - if (bdev) { - down(&bdev->bd_inode->i_sem); - i_size_write(bdev->bd_inode, mddev->array_size << 10); - up(&bdev->bd_inode->i_sem); - bdput(bdev); - } - } - } if (mddev->raid_disks != info->raid_disks) { /* change the number of raid disks */ if (mddev->pers->reshape == NULL) -- cgit v1.2.3 From 8bb93aaca2062cd54cc2c58c76ee8409cae209a7 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:50 -0800 Subject: [PATCH] md: expose md metadata format in sysfs Allow it to be set to a particular version, or 'none'. Signed-off-by: Neil Brown Acked-by: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index d568ab4a487f..ecc0166ba779 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1857,6 +1857,54 @@ size_store(mddev_t *mddev, const char *buf, size_t len) static struct md_sysfs_entry md_size = __ATTR(component_size, 0644, size_show, size_store); + +/* Metdata version. + * This is either 'none' for arrays with externally managed metadata, + * or N.M for internally known formats + */ +static ssize_t +metadata_show(mddev_t *mddev, char *page) +{ + if (mddev->persistent) + return sprintf(page, "%d.%d\n", + mddev->major_version, mddev->minor_version); + else + return sprintf(page, "none\n"); +} + +static ssize_t +metadata_store(mddev_t *mddev, const char *buf, size_t len) +{ + int major, minor; + char *e; + if (!list_empty(&mddev->disks)) + return -EBUSY; + + if (cmd_match(buf, "none")) { + mddev->persistent = 0; + mddev->major_version = 0; + mddev->minor_version = 90; + return len; + } + major = simple_strtoul(buf, &e, 10); + if (e==buf || *e != '.') + return -EINVAL; + buf = e+1; + minor = simple_strtoul(buf, &e, 10); + if (e==buf || *e != '\n') + return -EINVAL; + if (major >= sizeof(super_types)/sizeof(super_types[0]) || + super_types[major].name == NULL) + return -ENOENT; + mddev->major_version = major; + mddev->minor_version = minor; + mddev->persistent = 1; + return len; +} + +static struct md_sysfs_entry md_metadata = +__ATTR(metadata_version, 0644, metadata_show, metadata_store); + static ssize_t action_show(mddev_t *mddev, char *page) { @@ -1926,6 +1974,7 @@ static struct attribute *md_default_attrs[] = { &md_raid_disks.attr, &md_chunk_size.attr, &md_size.attr, + &md_metadata.attr, NULL, }; -- cgit v1.2.3 From d9d166c2a9d5d01af34396793950aa695883eed4 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:51 -0800 Subject: [PATCH] md: allow array level to be set textually via sysfs Signed-off-by: Neil Brown Acked-by: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/faulty.c | 1 + drivers/md/linear.c | 3 ++- drivers/md/md.c | 61 +++++++++++++++++++++++++++++++++++++++----------- drivers/md/multipath.c | 1 + drivers/md/raid0.c | 1 + drivers/md/raid1.c | 1 + drivers/md/raid10.c | 1 + drivers/md/raid5.c | 2 ++ drivers/md/raid6main.c | 1 + 9 files changed, 58 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index f12e83086897..a7a5ab554338 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c @@ -342,4 +342,5 @@ module_init(raid_init); module_exit(raid_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("md-personality-10"); /* faulty */ +MODULE_ALIAS("md-faulty"); MODULE_ALIAS("md-level--5"); diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 79dee8159217..777585458c85 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -376,5 +376,6 @@ static void linear_exit (void) module_init(linear_init); module_exit(linear_exit); MODULE_LICENSE("GPL"); -MODULE_ALIAS("md-personality-1"); /* LINEAR - degrecated*/ +MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/ +MODULE_ALIAS("md-linear"); MODULE_ALIAS("md-level--1"); diff --git a/drivers/md/md.c b/drivers/md/md.c index ecc0166ba779..594d8c312e6a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -303,12 +303,15 @@ static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) return NULL; } -static struct mdk_personality *find_pers(int level) +static struct mdk_personality *find_pers(int level, char *clevel) { struct mdk_personality *pers; - list_for_each_entry(pers, &pers_list, list) - if (pers->level == level) + list_for_each_entry(pers, &pers_list, list) { + if (level != LEVEL_NONE && pers->level == level) return pers; + if (strcmp(pers->name, clevel)==0) + return pers; + } return NULL; } @@ -715,6 +718,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) mddev->ctime = sb->ctime; mddev->utime = sb->utime; mddev->level = sb->level; + mddev->clevel[0] = 0; mddev->layout = sb->layout; mddev->raid_disks = sb->raid_disks; mddev->size = sb->size; @@ -1051,6 +1055,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); mddev->level = le32_to_cpu(sb->level); + mddev->clevel[0] = 0; mddev->layout = le32_to_cpu(sb->layout); mddev->raid_disks = le32_to_cpu(sb->raid_disks); mddev->size = le64_to_cpu(sb->size)/2; @@ -1774,15 +1779,36 @@ static ssize_t level_show(mddev_t *mddev, char *page) { struct mdk_personality *p = mddev->pers; - if (p == NULL && mddev->raid_disks == 0) - return 0; - if (mddev->level >= 0) - return sprintf(page, "raid%d\n", mddev->level); - else + if (p) return sprintf(page, "%s\n", p->name); + else if (mddev->clevel[0]) + return sprintf(page, "%s\n", mddev->clevel); + else if (mddev->level != LEVEL_NONE) + return sprintf(page, "%d\n", mddev->level); + else + return 0; +} + +static ssize_t +level_store(mddev_t *mddev, const char *buf, size_t len) +{ + int rv = len; + if (mddev->pers) + return -EBUSY; + if (len == 0) + return 0; + if (len >= sizeof(mddev->clevel)) + return -ENOSPC; + strncpy(mddev->clevel, buf, len); + if (mddev->clevel[len-1] == '\n') + len--; + mddev->clevel[len] = 0; + mddev->level = LEVEL_NONE; + return rv; } -static struct md_sysfs_entry md_level = __ATTR_RO(level); +static struct md_sysfs_entry md_level = +__ATTR(level, 0644, level_show, level_store); static ssize_t raid_disks_show(mddev_t *mddev, char *page) @@ -2158,7 +2184,10 @@ static int do_md_run(mddev_t * mddev) } #ifdef CONFIG_KMOD - request_module("md-level-%d", mddev->level); + if (mddev->level != LEVEL_NONE) + request_module("md-level-%d", mddev->level); + else if (mddev->clevel[0]) + request_module("md-%s", mddev->clevel); #endif /* @@ -2180,15 +2209,21 @@ static int do_md_run(mddev_t * mddev) return -ENOMEM; spin_lock(&pers_lock); - pers = find_pers(mddev->level); + pers = find_pers(mddev->level, mddev->clevel); if (!pers || !try_module_get(pers->owner)) { spin_unlock(&pers_lock); - printk(KERN_WARNING "md: personality for level %d is not loaded!\n", - mddev->level); + if (mddev->level != LEVEL_NONE) + printk(KERN_WARNING "md: personality for level %d is not loaded!\n", + mddev->level); + else + printk(KERN_WARNING "md: personality for level %s is not loaded!\n", + mddev->clevel); return -EINVAL; } mddev->pers = pers; spin_unlock(&pers_lock); + mddev->level = pers->level; + strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); mddev->recovery = 0; mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */ diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index d4d838e3f9f8..e6aa309a66d7 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -578,4 +578,5 @@ module_init(multipath_init); module_exit(multipath_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("md-personality-7"); /* MULTIPATH */ +MODULE_ALIAS("md-multipath"); MODULE_ALIAS("md-level--4"); diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 7fb69e29391b..abbca150202b 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -536,4 +536,5 @@ module_init(raid0_init); module_exit(raid0_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("md-personality-2"); /* RAID0 */ +MODULE_ALIAS("md-raid0"); MODULE_ALIAS("md-level-0"); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 7d4465f93064..181c9616d5f1 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2092,4 +2092,5 @@ module_init(raid_init); module_exit(raid_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("md-personality-3"); /* RAID1 */ +MODULE_ALIAS("md-raid1"); MODULE_ALIAS("md-level-1"); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 8e6f6dfddb2b..201dc7168a5f 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2117,4 +2117,5 @@ module_init(raid_init); module_exit(raid_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("md-personality-9"); /* RAID10 */ +MODULE_ALIAS("md-raid10"); MODULE_ALIAS("md-level-10"); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index b0cfd3ca9ca0..9cc844f455bf 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2240,5 +2240,7 @@ module_init(raid5_init); module_exit(raid5_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("md-personality-4"); /* RAID5 */ +MODULE_ALIAS("md-raid5"); +MODULE_ALIAS("md-raid4"); MODULE_ALIAS("md-level-5"); MODULE_ALIAS("md-level-4"); diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index 06b32bd671a3..84dd875bb2f6 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c @@ -2341,4 +2341,5 @@ module_init(raid6_init); module_exit(raid6_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("md-personality-8"); /* RAID6 */ +MODULE_ALIAS("md-raid6"); MODULE_ALIAS("md-level-6"); -- cgit v1.2.3 From 4dbcdc751cb25ffca3a8374cbc5ab6de961cc545 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:52 -0800 Subject: [PATCH] md: count corrected read errors per drive Store this total in superblock (As appropriate), and make it available to userspace via sysfs. Signed-off-by: Neil Brown Acked-by: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 27 ++++++++++++++++++++++++++- drivers/md/raid1.c | 2 ++ drivers/md/raid10.c | 11 ++++++++--- drivers/md/raid5.c | 3 +++ drivers/md/raid6main.c | 3 +++ 5 files changed, 42 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 594d8c312e6a..32a4e2311e43 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1000,6 +1000,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) } rdev->preferred_minor = 0xffff; rdev->data_offset = le64_to_cpu(sb->data_offset); + atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; @@ -1139,6 +1140,8 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) else sb->resync_offset = cpu_to_le64(0); + sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors); + if (mddev->bitmap && mddev->bitmap_file == NULL) { sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); @@ -1592,9 +1595,30 @@ super_show(mdk_rdev_t *rdev, char *page) } static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super); +static ssize_t +errors_show(mdk_rdev_t *rdev, char *page) +{ + return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); +} + +static ssize_t +errors_store(mdk_rdev_t *rdev, const char *buf, size_t len) +{ + char *e; + unsigned long n = simple_strtoul(buf, &e, 10); + if (*buf && (*e == 0 || *e == '\n')) { + atomic_set(&rdev->corrected_errors, n); + return len; + } + return -EINVAL; +} +static struct rdev_sysfs_entry rdev_errors = +__ATTR(errors, 0644, errors_show, errors_store); + static struct attribute *rdev_default_attrs[] = { &rdev_state.attr, &rdev_super.attr, + &rdev_errors.attr, NULL, }; static ssize_t @@ -1674,6 +1698,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi rdev->data_offset = 0; atomic_set(&rdev->nr_pending, 0); atomic_set(&rdev->read_errors, 0); + atomic_set(&rdev->corrected_errors, 0); size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; if (!size) { @@ -4729,7 +4754,7 @@ static int set_ro(const char *val, struct kernel_param *kp) int num = simple_strtoul(val, &e, 10); if (*val && (*e == '\0' || *e == '\n')) { start_readonly = num; - return 0;; + return 0; } return -EINVAL; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 181c9616d5f1..a06ff91f27e2 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1265,6 +1265,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) if (r1_bio->bios[d]->bi_end_io != end_sync_read) continue; rdev = conf->mirrors[d].rdev; + atomic_add(s, &rdev->corrected_errors); if (sync_page_io(rdev->bdev, sect + rdev->data_offset, s<<9, @@ -1463,6 +1464,7 @@ static void raid1d(mddev_t *mddev) d = conf->raid_disks; d--; rdev = conf->mirrors[d].rdev; + atomic_add(s, &rdev->corrected_errors); if (rdev && test_bit(In_sync, &rdev->flags)) { if (sync_page_io(rdev->bdev, diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 201dc7168a5f..9e658e519a27 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1122,9 +1122,13 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) if (test_bit(BIO_UPTODATE, &bio->bi_flags)) set_bit(R10BIO_Uptodate, &r10_bio->state); - else if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) - md_error(r10_bio->mddev, - conf->mirrors[d].rdev); + else { + atomic_add(r10_bio->sectors, + &conf->mirrors[d].rdev->corrected_errors); + if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) + md_error(r10_bio->mddev, + conf->mirrors[d].rdev); + } /* for reconstruct, we always reschedule after a read. * for resync, only after all reads @@ -1430,6 +1434,7 @@ static void raid10d(mddev_t *mddev) sl--; d = r10_bio->devs[sl].devnum; rdev = conf->mirrors[d].rdev; + atomic_add(s, &rdev->corrected_errors); if (rdev && test_bit(In_sync, &rdev->flags)) { if (sync_page_io(rdev->bdev, diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 9cc844f455bf..54f4a9847e38 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1400,6 +1400,9 @@ static void handle_stripe(struct stripe_head *sh) bi->bi_io_vec[0].bv_offset = 0; bi->bi_size = STRIPE_SIZE; bi->bi_next = NULL; + if (rw == WRITE && + test_bit(R5_ReWrite, &sh->dev[i].flags)) + atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); generic_make_request(bi); } else { if (rw == 1) diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index 84dd875bb2f6..8c823d686a60 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c @@ -1562,6 +1562,9 @@ static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) bi->bi_io_vec[0].bv_offset = 0; bi->bi_size = STRIPE_SIZE; bi->bi_next = NULL; + if (rw == WRITE && + test_bit(R5_ReWrite, &sh->dev[i].flags)) + atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); generic_make_request(bi); } else { if (rw == 1) -- cgit v1.2.3 From da943b9912df063322d37b1a1f285460531d481d Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:54 -0800 Subject: [PATCH] md: allow md/raid_disks to be settable If array is active, try to reshape, else just set the value. Signed-off-by: Neil Brown Acked-by: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 74 +++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 51 insertions(+), 23 deletions(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 32a4e2311e43..86e9f2efae5c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1843,7 +1843,27 @@ raid_disks_show(mddev_t *mddev, char *page) return sprintf(page, "%d\n", mddev->raid_disks); } -static struct md_sysfs_entry md_raid_disks = __ATTR_RO(raid_disks); +static int update_raid_disks(mddev_t *mddev, int raid_disks); + +static ssize_t +raid_disks_store(mddev_t *mddev, const char *buf, size_t len) +{ + /* can only set raid_disks if array is not yet active */ + char *e; + int rv = 0; + unsigned long n = simple_strtoul(buf, &e, 10); + + if (!*buf || (*e && *e != '\n')) + return -EINVAL; + + if (mddev->pers) + rv = update_raid_disks(mddev, n); + else + mddev->raid_disks = n; + return rv ? rv : len; +} +static struct md_sysfs_entry md_raid_disks = +__ATTR(raid_disks, 0644, raid_disks_show, raid_disks_store); static ssize_t chunk_size_show(mddev_t *mddev, char *page) @@ -3201,6 +3221,33 @@ static int update_size(mddev_t *mddev, unsigned long size) return rv; } +static int update_raid_disks(mddev_t *mddev, int raid_disks) +{ + int rv; + /* change the number of raid disks */ + if (mddev->pers->reshape == NULL) + return -EINVAL; + if (raid_disks <= 0 || + raid_disks >= mddev->max_disks) + return -EINVAL; + if (mddev->sync_thread) + return -EBUSY; + rv = mddev->pers->reshape(mddev, raid_disks); + if (!rv) { + struct block_device *bdev; + + bdev = bdget_disk(mddev->gendisk, 0); + if (bdev) { + down(&bdev->bd_inode->i_sem); + i_size_write(bdev->bd_inode, mddev->array_size << 10); + up(&bdev->bd_inode->i_sem); + bdput(bdev); + } + } + return rv; +} + + /* * update_array_info is used to change the configuration of an * on-line array. @@ -3252,28 +3299,9 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) if (mddev->size != info->size) rv = update_size(mddev, info->size); - if (mddev->raid_disks != info->raid_disks) { - /* change the number of raid disks */ - if (mddev->pers->reshape == NULL) - return -EINVAL; - if (info->raid_disks <= 0 || - info->raid_disks >= mddev->max_disks) - return -EINVAL; - if (mddev->sync_thread) - return -EBUSY; - rv = mddev->pers->reshape(mddev, info->raid_disks); - if (!rv) { - struct block_device *bdev; - - bdev = bdget_disk(mddev->gendisk, 0); - if (bdev) { - down(&bdev->bd_inode->i_sem); - i_size_write(bdev->bd_inode, mddev->array_size << 10); - up(&bdev->bd_inode->i_sem); - bdput(bdev); - } - } - } + if (mddev->raid_disks != info->raid_disks) + rv = update_raid_disks(mddev, info->raid_disks); + if ((state ^ info->state) & (1<pers->quiesce == NULL) return -EINVAL; -- cgit v1.2.3 From 2bf071bf50580380a8c3afe5eef8152a66be96c7 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:55 -0800 Subject: [PATCH] md: keep better track of dev/array size when assembling md arrays Move the checks - that dev size is never less than array size - into bind_rdev_to_array to make sure it always happens properly (there is one place where currently it doesn't). Also reject any superblock which claims an array size smaller than the device in question can hold. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 41 +++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 86e9f2efae5c..27a9871f3057 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -695,6 +695,10 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version } rdev->size = calc_dev_size(rdev, sb->chunk_size); + if (rdev->size < sb->size && sb->level > 1) + /* "this cannot possibly happen" ... */ + ret = -EINVAL; + abort: return ret; } @@ -1039,6 +1043,9 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) rdev->size = le64_to_cpu(sb->data_size)/2; if (le32_to_cpu(sb->chunksize)) rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1); + + if (le32_to_cpu(sb->size) > rdev->size*2) + return -EINVAL; return 0; } @@ -1224,6 +1231,14 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) MD_BUG(); return -EINVAL; } + /* make sure rdev->size exceeds mddev->size */ + if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) { + if (mddev->pers) + /* Cannot change size, so fail */ + return -ENOSPC; + else + mddev->size = rdev->size; + } same_pdev = match_dev_unit(mddev, rdev); if (same_pdev) printk(KERN_WARNING @@ -2898,12 +2913,6 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) if (info->state & (1<flags); - err = bind_rdev_to_array(rdev, mddev); - if (err) { - export_rdev(rdev); - return err; - } - if (!mddev->persistent) { printk(KERN_INFO "md: nonpersistent superblock ...\n"); rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; @@ -2911,8 +2920,11 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) rdev->sb_offset = calc_dev_sboffset(rdev->bdev); rdev->size = calc_dev_size(rdev, mddev->chunk_size); - if (!mddev->size || (mddev->size > rdev->size)) - mddev->size = rdev->size; + err = bind_rdev_to_array(rdev, mddev); + if (err) { + export_rdev(rdev); + return err; + } } return 0; @@ -2984,15 +2996,6 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev) size = calc_dev_size(rdev, mddev->chunk_size); rdev->size = size; - if (size < mddev->size) { - printk(KERN_WARNING - "%s: disk size %llu blocks < array size %llu\n", - mdname(mddev), (unsigned long long)size, - (unsigned long long)mddev->size); - err = -ENOSPC; - goto abort_export; - } - if (test_bit(Faulty, &rdev->flags)) { printk(KERN_WARNING "md: can not hot-add faulty %s disk to %s!\n", @@ -3002,7 +3005,9 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev) } clear_bit(In_sync, &rdev->flags); rdev->desc_nr = -1; - bind_rdev_to_array(rdev, mddev); + err = bind_rdev_to_array(rdev, mddev); + if (err) + goto abort_export; /* * The rest should better be atomic, we can have disk failures -- cgit v1.2.3 From 014236d2b8ec6faea2a6134ab8e019d84d67b524 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:55 -0800 Subject: [PATCH] md: expose device slot information via sysfs This the role that a device has in an array can be viewed and set. Signed-off-by: Neil Brown Acked-by: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 27a9871f3057..a8169564209d 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1630,10 +1630,45 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len) static struct rdev_sysfs_entry rdev_errors = __ATTR(errors, 0644, errors_show, errors_store); +static ssize_t +slot_show(mdk_rdev_t *rdev, char *page) +{ + if (rdev->raid_disk < 0) + return sprintf(page, "none\n"); + else + return sprintf(page, "%d\n", rdev->raid_disk); +} + +static ssize_t +slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) +{ + char *e; + int slot = simple_strtoul(buf, &e, 10); + if (strncmp(buf, "none", 4)==0) + slot = -1; + else if (e==buf || (*e && *e!= '\n')) + return -EINVAL; + if (rdev->mddev->pers) + /* Cannot set slot in active array (yet) */ + return -EBUSY; + if (slot >= rdev->mddev->raid_disks) + return -ENOSPC; + rdev->raid_disk = slot; + /* assume it is working */ + rdev->flags = 0; + set_bit(In_sync, &rdev->flags); + return len; +} + + +static struct rdev_sysfs_entry rdev_slot = +__ATTR(slot, 0644, slot_show, slot_store); + static struct attribute *rdev_default_attrs[] = { &rdev_state.attr, &rdev_super.attr, &rdev_errors.attr, + &rdev_slot.attr, NULL, }; static ssize_t -- cgit v1.2.3 From 93c8cad03f02dbd1532a5413bdced25f000d5728 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:20:56 -0800 Subject: [PATCH] md: export rdev->data_offset via sysfs Signed-off-by: Neil Brown Acked-by: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index a8169564209d..742a82a4d10b 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1664,11 +1664,34 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) static struct rdev_sysfs_entry rdev_slot = __ATTR(slot, 0644, slot_show, slot_store); +static ssize_t +offset_show(mdk_rdev_t *rdev, char *page) +{ + return sprintf(page, "%llu\n", rdev->data_offset); +} + +static ssize_t +offset_store(mdk_rdev_t *rdev, const char *buf, size_t len) +{ + char *e; + unsigned long long offset = simple_strtoull(buf, &e, 10); + if (e==buf || (*e && *e != '\n')) + return -EINVAL; + if (rdev->mddev->pers) + return -EBUSY; + rdev->data_offset = offset; + return len; +} + +static struct rdev_sysfs_entry rdev_offset = +__ATTR(offset, 0644, offset_show, offset_store); + static struct attribute *rdev_default_attrs[] = { &rdev_state.attr, &rdev_super.attr, &rdev_errors.attr, &rdev_slot.attr, + &rdev_offset.attr, NULL, }; static ssize_t -- cgit v1.2.3 From 6961ece46c7d02de1bb83914900608e39633787d Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Fri, 6 Jan 2006 00:20:59 -0800 Subject: [PATCH] md-export-rdev-data_offset-via-sysfs-fix drivers/md/md.c: In function `offset_show': drivers/md/md.c:1670: warning: long long unsigned int format, different type arg (arg 3) Cc: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 742a82a4d10b..27db100d8b12 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1667,7 +1667,7 @@ __ATTR(slot, 0644, slot_show, slot_store); static ssize_t offset_show(mdk_rdev_t *rdev, char *page) { - return sprintf(page, "%llu\n", rdev->data_offset); + return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); } static ssize_t -- cgit v1.2.3 From 83303b613d00718b07ec0a4dee7c99aa66629d96 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:21:06 -0800 Subject: [PATCH] md: allow available size of component devices to be set via sysfs Signed-off-by: Neil Brown Acked-by: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 27db100d8b12..40ac7fbab61f 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1686,12 +1686,37 @@ offset_store(mdk_rdev_t *rdev, const char *buf, size_t len) static struct rdev_sysfs_entry rdev_offset = __ATTR(offset, 0644, offset_show, offset_store); +static ssize_t +rdev_size_show(mdk_rdev_t *rdev, char *page) +{ + return sprintf(page, "%llu\n", (unsigned long long)rdev->size); +} + +static ssize_t +rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) +{ + char *e; + unsigned long long size = simple_strtoull(buf, &e, 10); + if (e==buf || (*e && *e != '\n')) + return -EINVAL; + if (rdev->mddev->pers) + return -EBUSY; + rdev->size = size; + if (size < rdev->mddev->size || rdev->mddev->size == 0) + rdev->mddev->size = size; + return len; +} + +static struct rdev_sysfs_entry rdev_size = +__ATTR(size, 0644, rdev_size_show, rdev_size_store); + static struct attribute *rdev_default_attrs[] = { &rdev_state.attr, &rdev_super.attr, &rdev_errors.attr, &rdev_slot.attr, &rdev_offset.attr, + &rdev_size.attr, NULL, }; static ssize_t -- cgit v1.2.3 From 6d7ff7380b2e28c2807da3bf9fa614d91d15bacf Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:21:16 -0800 Subject: [PATCH] md: support adding new devices to md arrays via sysfs Writing major:minor to md/new_dev will bind that device to the array. Signed-off-by: Neil Brown Acked-by: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 60 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 40ac7fbab61f..825e235b791b 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1987,6 +1987,65 @@ chunk_size_store(mddev_t *mddev, const char *buf, size_t len) static struct md_sysfs_entry md_chunk_size = __ATTR(chunk_size, 0644, chunk_size_show, chunk_size_store); +static ssize_t +null_show(mddev_t *mddev, char *page) +{ + return -EINVAL; +} + +static ssize_t +new_dev_store(mddev_t *mddev, const char *buf, size_t len) +{ + /* buf must be %d:%d\n? giving major and minor numbers */ + /* The new device is added to the array. + * If the array has a persistent superblock, we read the + * superblock to initialise info and check validity. + * Otherwise, only checking done is that in bind_rdev_to_array, + * which mainly checks size. + */ + char *e; + int major = simple_strtoul(buf, &e, 10); + int minor; + dev_t dev; + mdk_rdev_t *rdev; + int err; + + if (!*buf || *e != ':' || !e[1] || e[1] == '\n') + return -EINVAL; + minor = simple_strtoul(e+1, &e, 10); + if (*e && *e != '\n') + return -EINVAL; + dev = MKDEV(major, minor); + if (major != MAJOR(dev) || + minor != MINOR(dev)) + return -EOVERFLOW; + + + if (mddev->persistent) { + rdev = md_import_device(dev, mddev->major_version, + mddev->minor_version); + if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { + mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, + mdk_rdev_t, same_set); + err = super_types[mddev->major_version] + .load_super(rdev, rdev0, mddev->minor_version); + if (err < 0) + goto out; + } + } else + rdev = md_import_device(dev, -1, -1); + + if (IS_ERR(rdev)) + return PTR_ERR(rdev); + err = bind_rdev_to_array(rdev, mddev); + out: + if (err) + export_rdev(rdev); + return err ? err : len; +} + +static struct md_sysfs_entry md_new_device = +__ATTR(new_dev, 0200, null_show, new_dev_store); static ssize_t size_show(mddev_t *mddev, char *page) @@ -2144,6 +2203,7 @@ static struct attribute *md_default_attrs[] = { &md_chunk_size.attr, &md_size.attr, &md_metadata.attr, + &md_new_device.attr, NULL, }; -- cgit v1.2.3 From 88202a0c84e1951d6630d1d557d4801a8cc5b5ef Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 6 Jan 2006 00:21:36 -0800 Subject: [PATCH] md: allow sync-speed to be controlled per-device Also export current (average) speed and status in sysfs. Signed-off-by: Neil Brown Acked-by: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 110 +++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 105 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 825e235b791b..1b76fb29fb70 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -81,10 +81,22 @@ static DEFINE_SPINLOCK(pers_lock); * idle IO detection. * * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. + * or /sys/block/mdX/md/sync_speed_{min,max} */ static int sysctl_speed_limit_min = 1000; static int sysctl_speed_limit_max = 200000; +static inline int speed_min(mddev_t *mddev) +{ + return mddev->sync_speed_min ? + mddev->sync_speed_min : sysctl_speed_limit_min; +} + +static inline int speed_max(mddev_t *mddev) +{ + return mddev->sync_speed_max ? + mddev->sync_speed_max : sysctl_speed_limit_max; +} static struct ctl_table_header *raid_table_header; @@ -2197,6 +2209,90 @@ md_scan_mode = __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); +static ssize_t +sync_min_show(mddev_t *mddev, char *page) +{ + return sprintf(page, "%d (%s)\n", speed_min(mddev), + mddev->sync_speed_min ? "local": "system"); +} + +static ssize_t +sync_min_store(mddev_t *mddev, const char *buf, size_t len) +{ + int min; + char *e; + if (strncmp(buf, "system", 6)==0) { + mddev->sync_speed_min = 0; + return len; + } + min = simple_strtoul(buf, &e, 10); + if (buf == e || (*e && *e != '\n') || min <= 0) + return -EINVAL; + mddev->sync_speed_min = min; + return len; +} + +static struct md_sysfs_entry md_sync_min = +__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); + +static ssize_t +sync_max_show(mddev_t *mddev, char *page) +{ + return sprintf(page, "%d (%s)\n", speed_max(mddev), + mddev->sync_speed_max ? "local": "system"); +} + +static ssize_t +sync_max_store(mddev_t *mddev, const char *buf, size_t len) +{ + int max; + char *e; + if (strncmp(buf, "system", 6)==0) { + mddev->sync_speed_max = 0; + return len; + } + max = simple_strtoul(buf, &e, 10); + if (buf == e || (*e && *e != '\n') || max <= 0) + return -EINVAL; + mddev->sync_speed_max = max; + return len; +} + +static struct md_sysfs_entry md_sync_max = +__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); + + +static ssize_t +sync_speed_show(mddev_t *mddev, char *page) +{ + unsigned long resync, dt, db; + resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active)); + dt = ((jiffies - mddev->resync_mark) / HZ); + if (!dt) dt++; + db = resync - (mddev->resync_mark_cnt); + return sprintf(page, "%ld\n", db/dt/2); /* K/sec */ +} + +static struct md_sysfs_entry +md_sync_speed = __ATTR_RO(sync_speed); + +static ssize_t +sync_completed_show(mddev_t *mddev, char *page) +{ + unsigned long max_blocks, resync; + + if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) + max_blocks = mddev->resync_max_sectors; + else + max_blocks = mddev->size << 1; + + resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active)); + return sprintf(page, "%lu / %lu\n", resync, max_blocks); +} + +static struct md_sysfs_entry +md_sync_completed = __ATTR_RO(sync_completed); + static struct attribute *md_default_attrs[] = { &md_level.attr, &md_raid_disks.attr, @@ -2210,6 +2306,10 @@ static struct attribute *md_default_attrs[] = { static struct attribute *md_redundancy_attrs[] = { &md_scan_mode.attr, &md_mismatches.attr, + &md_sync_min.attr, + &md_sync_max.attr, + &md_sync_speed.attr, + &md_sync_completed.attr, NULL, }; static struct attribute_group md_redundancy_group = { @@ -4433,10 +4533,10 @@ static void md_do_sync(mddev_t *mddev) printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev)); printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:" - " %d KB/sec/disc.\n", sysctl_speed_limit_min); + " %d KB/sec/disc.\n", speed_min(mddev)); printk(KERN_INFO "md: using maximum available idle IO bandwidth " "(but not more than %d KB/sec) for reconstruction.\n", - sysctl_speed_limit_max); + speed_max(mddev)); is_mddev_idle(mddev); /* this also initializes IO event counters */ /* we don't use the checkpoint if there's a bitmap */ @@ -4477,7 +4577,7 @@ static void md_do_sync(mddev_t *mddev) skipped = 0; sectors = mddev->pers->sync_request(mddev, j, &skipped, - currspeed < sysctl_speed_limit_min); + currspeed < speed_min(mddev)); if (sectors == 0) { set_bit(MD_RECOVERY_ERR, &mddev->recovery); goto out; @@ -4542,8 +4642,8 @@ static void md_do_sync(mddev_t *mddev) currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 /((jiffies-mddev->resync_mark)/HZ +1) +1; - if (currspeed > sysctl_speed_limit_min) { - if ((currspeed > sysctl_speed_limit_max) || + if (currspeed > speed_min(mddev)) { + if ((currspeed > speed_max(mddev)) || !is_mddev_idle(mddev)) { msleep(500); goto repeat; -- cgit v1.2.3 From 9b847548663ef1039dd49f0eb4463d001e596bc3 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 6 Jan 2006 09:28:07 +0100 Subject: [PATCH] Suspend support for libata This patch adds suspend patch to libata, and ata_piix in particular. For most low level drivers, they should just need to add the 4 hooks to work. As I can only test ata_piix, I didn't enable it for more though. Suspend support is the single most important feature on a notebook, and most new notebooks have sata drives. It's quite embarrassing that we _still_ do not support this. Right now, it's perfectly possible to suspend the drive in mid-transfer. Signed-off-by: Jens Axboe Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/scsi/ata_piix.c | 4 ++ drivers/scsi/libata-core.c | 114 +++++++++++++++++++++++++++++++++++++++++++++ drivers/scsi/libata-scsi.c | 16 +++++++ drivers/scsi/scsi_sysfs.c | 31 ++++++++++++ 4 files changed, 165 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c index 0ea27873b9ff..f79630340028 100644 --- a/drivers/scsi/ata_piix.c +++ b/drivers/scsi/ata_piix.c @@ -166,6 +166,8 @@ static struct pci_driver piix_pci_driver = { .id_table = piix_pci_tbl, .probe = piix_init_one, .remove = ata_pci_remove_one, + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, }; static struct scsi_host_template piix_sht = { @@ -186,6 +188,8 @@ static struct scsi_host_template piix_sht = { .slave_configure = ata_scsi_slave_config, .bios_param = ata_std_bios_param, .ordered_flush = 1, + .resume = ata_scsi_device_resume, + .suspend = ata_scsi_device_suspend, }; static const struct ata_port_operations piix_pata_ops = { diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 9ea102587914..9c66d4059399 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c @@ -4154,6 +4154,96 @@ err_out: * Inherited from caller. */ +/* + * Execute a 'simple' command, that only consists of the opcode 'cmd' itself, + * without filling any other registers + */ +static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev, + u8 cmd) +{ + struct ata_taskfile tf; + int err; + + ata_tf_init(ap, &tf, dev->devno); + + tf.command = cmd; + tf.flags |= ATA_TFLAG_DEVICE; + tf.protocol = ATA_PROT_NODATA; + + err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); + if (err) + printk(KERN_ERR "%s: ata command failed: %d\n", + __FUNCTION__, err); + + return err; +} + +static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev) +{ + u8 cmd; + + if (!ata_try_flush_cache(dev)) + return 0; + + if (ata_id_has_flush_ext(dev->id)) + cmd = ATA_CMD_FLUSH_EXT; + else + cmd = ATA_CMD_FLUSH; + + return ata_do_simple_cmd(ap, dev, cmd); +} + +static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev) +{ + return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1); +} + +static int ata_start_drive(struct ata_port *ap, struct ata_device *dev) +{ + return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE); +} + +/** + * ata_device_resume - wakeup a previously suspended devices + * + * Kick the drive back into action, by sending it an idle immediate + * command and making sure its transfer mode matches between drive + * and host. + * + */ +int ata_device_resume(struct ata_port *ap, struct ata_device *dev) +{ + if (ap->flags & ATA_FLAG_SUSPENDED) { + ap->flags &= ~ATA_FLAG_SUSPENDED; + ata_set_mode(ap); + } + if (!ata_dev_present(dev)) + return 0; + if (dev->class == ATA_DEV_ATA) + ata_start_drive(ap, dev); + + return 0; +} + +/** + * ata_device_suspend - prepare a device for suspend + * + * Flush the cache on the drive, if appropriate, then issue a + * standbynow command. + * + */ +int ata_device_suspend(struct ata_port *ap, struct ata_device *dev) +{ + if (!ata_dev_present(dev)) + return 0; + if (dev->class == ATA_DEV_ATA) + ata_flush_cache(ap, dev); + + ata_standby_drive(ap, dev); + ap->flags |= ATA_FLAG_SUSPENDED; + return 0; +} + int ata_port_start (struct ata_port *ap) { struct device *dev = ap->host_set->dev; @@ -4902,6 +4992,23 @@ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) return (tmp == bits->val) ? 1 : 0; } + +int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state) +{ + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + return 0; +} + +int ata_pci_device_resume(struct pci_dev *pdev) +{ + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_enable_device(pdev); + pci_set_master(pdev); + return 0; +} #endif /* CONFIG_PCI */ @@ -5005,4 +5112,11 @@ EXPORT_SYMBOL_GPL(ata_pci_host_stop); EXPORT_SYMBOL_GPL(ata_pci_init_native_mode); EXPORT_SYMBOL_GPL(ata_pci_init_one); EXPORT_SYMBOL_GPL(ata_pci_remove_one); +EXPORT_SYMBOL_GPL(ata_pci_device_suspend); +EXPORT_SYMBOL_GPL(ata_pci_device_resume); #endif /* CONFIG_PCI */ + +EXPORT_SYMBOL_GPL(ata_device_suspend); +EXPORT_SYMBOL_GPL(ata_device_resume); +EXPORT_SYMBOL_GPL(ata_scsi_device_suspend); +EXPORT_SYMBOL_GPL(ata_scsi_device_resume); diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c index e0439be4b573..c1ebede14a48 100644 --- a/drivers/scsi/libata-scsi.c +++ b/drivers/scsi/libata-scsi.c @@ -396,6 +396,22 @@ void ata_dump_status(unsigned id, struct ata_taskfile *tf) } } +int ata_scsi_device_resume(struct scsi_device *sdev) +{ + struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0]; + struct ata_device *dev = &ap->device[sdev->id]; + + return ata_device_resume(ap, dev); +} + +int ata_scsi_device_suspend(struct scsi_device *sdev) +{ + struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0]; + struct ata_device *dev = &ap->device[sdev->id]; + + return ata_device_suspend(ap, dev); +} + /** * ata_to_sense_error - convert ATA error to SCSI error * @id: ATA device number diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 15842b1f0f4a..ea7f3a433572 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -263,9 +263,40 @@ static int scsi_bus_match(struct device *dev, struct device_driver *gendrv) return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0; } +static int scsi_bus_suspend(struct device * dev, pm_message_t state) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct scsi_host_template *sht = sdev->host->hostt; + int err; + + err = scsi_device_quiesce(sdev); + if (err) + return err; + + if (sht->suspend) + err = sht->suspend(sdev); + + return err; +} + +static int scsi_bus_resume(struct device * dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct scsi_host_template *sht = sdev->host->hostt; + int err = 0; + + if (sht->resume) + err = sht->resume(sdev); + + scsi_device_resume(sdev); + return err; +} + struct bus_type scsi_bus_type = { .name = "scsi", .match = scsi_bus_match, + .suspend = scsi_bus_suspend, + .resume = scsi_bus_resume, }; int scsi_sysfs_register(void) -- cgit v1.2.3