From 1c1452be2e9ae282a7316c3b23987811bd7acda6 Mon Sep 17 00:00:00 2001 From: Jonas Larsson Date: Tue, 31 Mar 2009 11:16:48 +0200 Subject: atmel-mci: Add support for inverted detect pin Same patch as before, modified to use bool. Also adds description of the new field in struct atmel_mci that I missed in the first patch. This patch adds Atmel MCI support for inverted detect pins. Signed-off-by: Jonas Larsson Acked-by: Pierre Ossman Signed-off-by: Haavard Skinnemoen --- drivers/mmc/host/atmel-mci.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index cf6a100bb38f..7b603e4b41db 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -177,6 +177,7 @@ struct atmel_mci { * available. * @wp_pin: GPIO pin used for card write protect sending, or negative * if not available. + * @detect_is_active_high: The state of the detect pin when it is active. * @detect_timer: Timer used for debouncing @detect_pin interrupts. */ struct atmel_mci_slot { @@ -196,6 +197,7 @@ struct atmel_mci_slot { int detect_pin; int wp_pin; + bool detect_is_active_high; struct timer_list detect_timer; }; @@ -924,7 +926,8 @@ static int atmci_get_cd(struct mmc_host *mmc) struct atmel_mci_slot *slot = mmc_priv(mmc); if (gpio_is_valid(slot->detect_pin)) { - present = !gpio_get_value(slot->detect_pin); + present = !(gpio_get_value(slot->detect_pin) ^ + slot->detect_is_active_high); dev_dbg(&mmc->class_dev, "card is %spresent\n", present ? "" : "not "); } @@ -1028,7 +1031,8 @@ static void atmci_detect_change(unsigned long data) return; enable_irq(gpio_to_irq(slot->detect_pin)); - present = !gpio_get_value(slot->detect_pin); + present = !(gpio_get_value(slot->detect_pin) ^ + slot->detect_is_active_high); present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags); dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n", @@ -1456,6 +1460,7 @@ static int __init atmci_init_slot(struct atmel_mci *host, slot->host = host; slot->detect_pin = slot_data->detect_pin; slot->wp_pin = slot_data->wp_pin; + slot->detect_is_active_high = slot_data->detect_is_active_high; slot->sdc_reg = sdc_reg; mmc->ops = &atmci_ops; @@ -1477,7 +1482,8 @@ static int __init atmci_init_slot(struct atmel_mci *host, if (gpio_request(slot->detect_pin, "mmc_detect")) { dev_dbg(&mmc->class_dev, "no detect pin available\n"); slot->detect_pin = -EBUSY; - } else if (gpio_get_value(slot->detect_pin)) { + } else if (gpio_get_value(slot->detect_pin) ^ + slot->detect_is_active_high) { clear_bit(ATMCI_CARD_PRESENT, &slot->flags); } } -- cgit v1.2.3 From 38f7b009a6ae1708fcf0f208aba9a9a4364bcfcf Mon Sep 17 00:00:00 2001 From: Hartley Sweeten Date: Wed, 15 Apr 2009 23:18:26 +0100 Subject: [ARM] 5452/1: ep93x: rtc: use ioremap'ed addresses Update the rtc-ep93xx driver to use ioremap'ed addresses. This removes the dependency on and properly reports the memory addresses used by the driver in /proc/iomem. In addition, ep93xx_rtc_init() is updated to use platform_driver_probe() instead of platform_driver_register(). Also, the device_create_file() calls are now properly checked for error conditions. The created sysfs files are also now removed when the driver is removed. The version number for the driver has been bumped at the request of Alessandro Zummo. Signed-off-by: H Hartley Sweeten Acked-by: Alessandro Zummo Signed-off-by: Russell King --- drivers/rtc/rtc-ep93xx.c | 149 +++++++++++++++++++++++++++++++++++------------ 1 file changed, 112 insertions(+), 37 deletions(-) (limited to 'drivers') diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c index f7a3283dd029..551332e4ed02 100644 --- a/drivers/rtc/rtc-ep93xx.c +++ b/drivers/rtc/rtc-ep93xx.c @@ -12,32 +12,56 @@ #include #include #include -#include +#include + +#define EP93XX_RTC_DATA 0x000 +#define EP93XX_RTC_MATCH 0x004 +#define EP93XX_RTC_STATUS 0x008 +#define EP93XX_RTC_STATUS_INTR (1<<0) +#define EP93XX_RTC_LOAD 0x00C +#define EP93XX_RTC_CONTROL 0x010 +#define EP93XX_RTC_CONTROL_MIE (1<<0) +#define EP93XX_RTC_SWCOMP 0x108 +#define EP93XX_RTC_SWCOMP_DEL_MASK 0x001f0000 +#define EP93XX_RTC_SWCOMP_DEL_SHIFT 16 +#define EP93XX_RTC_SWCOMP_INT_MASK 0x0000ffff +#define EP93XX_RTC_SWCOMP_INT_SHIFT 0 + +#define DRV_VERSION "0.3" -#define EP93XX_RTC_REG(x) (EP93XX_RTC_BASE + (x)) -#define EP93XX_RTC_DATA EP93XX_RTC_REG(0x0000) -#define EP93XX_RTC_LOAD EP93XX_RTC_REG(0x000C) -#define EP93XX_RTC_SWCOMP EP93XX_RTC_REG(0x0108) - -#define DRV_VERSION "0.2" +/* + * struct device dev.platform_data is used to store our private data + * because struct rtc_device does not have a variable to hold it. + */ +struct ep93xx_rtc { + void __iomem *mmio_base; +}; -static int ep93xx_get_swcomp(struct device *dev, unsigned short *preload, +static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload, unsigned short *delete) { - unsigned short comp = __raw_readl(EP93XX_RTC_SWCOMP); + struct ep93xx_rtc *ep93xx_rtc = dev->platform_data; + unsigned long comp; + + comp = __raw_readl(ep93xx_rtc->mmio_base + EP93XX_RTC_SWCOMP); if (preload) - *preload = comp & 0xffff; + *preload = (comp & EP93XX_RTC_SWCOMP_INT_MASK) + >> EP93XX_RTC_SWCOMP_INT_SHIFT; if (delete) - *delete = (comp >> 16) & 0x1f; + *delete = (comp & EP93XX_RTC_SWCOMP_DEL_MASK) + >> EP93XX_RTC_SWCOMP_DEL_SHIFT; return 0; } static int ep93xx_rtc_read_time(struct device *dev, struct rtc_time *tm) { - unsigned long time = __raw_readl(EP93XX_RTC_DATA); + struct ep93xx_rtc *ep93xx_rtc = dev->platform_data; + unsigned long time; + + time = __raw_readl(ep93xx_rtc->mmio_base + EP93XX_RTC_DATA); rtc_time_to_tm(time, tm); return 0; @@ -45,7 +69,9 @@ static int ep93xx_rtc_read_time(struct device *dev, struct rtc_time *tm) static int ep93xx_rtc_set_mmss(struct device *dev, unsigned long secs) { - __raw_writel(secs + 1, EP93XX_RTC_LOAD); + struct ep93xx_rtc *ep93xx_rtc = dev->platform_data; + + __raw_writel(secs + 1, ep93xx_rtc->mmio_base + EP93XX_RTC_LOAD); return 0; } @@ -53,7 +79,7 @@ static int ep93xx_rtc_proc(struct device *dev, struct seq_file *seq) { unsigned short preload, delete; - ep93xx_get_swcomp(dev, &preload, &delete); + ep93xx_rtc_get_swcomp(dev, &preload, &delete); seq_printf(seq, "preload\t\t: %d\n", preload); seq_printf(seq, "delete\t\t: %d\n", delete); @@ -67,54 +93,104 @@ static const struct rtc_class_ops ep93xx_rtc_ops = { .proc = ep93xx_rtc_proc, }; -static ssize_t ep93xx_sysfs_show_comp_preload(struct device *dev, +static ssize_t ep93xx_rtc_show_comp_preload(struct device *dev, struct device_attribute *attr, char *buf) { unsigned short preload; - ep93xx_get_swcomp(dev, &preload, NULL); + ep93xx_rtc_get_swcomp(dev, &preload, NULL); return sprintf(buf, "%d\n", preload); } -static DEVICE_ATTR(comp_preload, S_IRUGO, ep93xx_sysfs_show_comp_preload, NULL); +static DEVICE_ATTR(comp_preload, S_IRUGO, ep93xx_rtc_show_comp_preload, NULL); -static ssize_t ep93xx_sysfs_show_comp_delete(struct device *dev, +static ssize_t ep93xx_rtc_show_comp_delete(struct device *dev, struct device_attribute *attr, char *buf) { unsigned short delete; - ep93xx_get_swcomp(dev, NULL, &delete); + ep93xx_rtc_get_swcomp(dev, NULL, &delete); return sprintf(buf, "%d\n", delete); } -static DEVICE_ATTR(comp_delete, S_IRUGO, ep93xx_sysfs_show_comp_delete, NULL); +static DEVICE_ATTR(comp_delete, S_IRUGO, ep93xx_rtc_show_comp_delete, NULL); -static int __devinit ep93xx_rtc_probe(struct platform_device *dev) +static int __init ep93xx_rtc_probe(struct platform_device *pdev) { - struct rtc_device *rtc = rtc_device_register("ep93xx", - &dev->dev, &ep93xx_rtc_ops, THIS_MODULE); + struct ep93xx_rtc *ep93xx_rtc; + struct resource *res; + struct rtc_device *rtc; + int err; + + ep93xx_rtc = kzalloc(sizeof(struct ep93xx_rtc), GFP_KERNEL); + if (ep93xx_rtc == NULL) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) + return -ENXIO; + + res = request_mem_region(res->start, resource_size(res), pdev->name); + if (res == NULL) + return -EBUSY; + + ep93xx_rtc->mmio_base = ioremap(res->start, resource_size(res)); + if (ep93xx_rtc->mmio_base == NULL) { + err = -ENXIO; + goto fail; + } + pdev->dev.platform_data = ep93xx_rtc; + + rtc = rtc_device_register(pdev->name, + &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) { - return PTR_ERR(rtc); + err = PTR_ERR(rtc); + goto fail; } - platform_set_drvdata(dev, rtc); + platform_set_drvdata(pdev, rtc); - device_create_file(&dev->dev, &dev_attr_comp_preload); - device_create_file(&dev->dev, &dev_attr_comp_delete); + err = device_create_file(&pdev->dev, &dev_attr_comp_preload); + if (err) + goto fail; + err = device_create_file(&pdev->dev, &dev_attr_comp_delete); + if (err) { + device_remove_file(&pdev->dev, &dev_attr_comp_preload); + goto fail; + } return 0; + +fail: + if (ep93xx_rtc->mmio_base) { + iounmap(ep93xx_rtc->mmio_base); + pdev->dev.platform_data = NULL; + } + release_mem_region(res->start, resource_size(res)); + return err; } -static int __devexit ep93xx_rtc_remove(struct platform_device *dev) +static int __exit ep93xx_rtc_remove(struct platform_device *pdev) { - struct rtc_device *rtc = platform_get_drvdata(dev); + struct rtc_device *rtc = platform_get_drvdata(pdev); + struct ep93xx_rtc *ep93xx_rtc = pdev->dev.platform_data; + struct resource *res; + + /* cleanup sysfs */ + device_remove_file(&pdev->dev, &dev_attr_comp_delete); + device_remove_file(&pdev->dev, &dev_attr_comp_preload); + + rtc_device_unregister(rtc); + + iounmap(ep93xx_rtc->mmio_base); + pdev->dev.platform_data = NULL; - if (rtc) - rtc_device_unregister(rtc); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, resource_size(res)); - platform_set_drvdata(dev, NULL); + platform_set_drvdata(pdev, NULL); return 0; } @@ -122,23 +198,22 @@ static int __devexit ep93xx_rtc_remove(struct platform_device *dev) /* work with hotplug and coldplug */ MODULE_ALIAS("platform:ep93xx-rtc"); -static struct platform_driver ep93xx_rtc_platform_driver = { +static struct platform_driver ep93xx_rtc_driver = { .driver = { .name = "ep93xx-rtc", .owner = THIS_MODULE, }, - .probe = ep93xx_rtc_probe, - .remove = __devexit_p(ep93xx_rtc_remove), + .remove = __exit_p(ep93xx_rtc_remove), }; static int __init ep93xx_rtc_init(void) { - return platform_driver_register(&ep93xx_rtc_platform_driver); + return platform_driver_probe(&ep93xx_rtc_driver, ep93xx_rtc_probe); } static void __exit ep93xx_rtc_exit(void) { - platform_driver_unregister(&ep93xx_rtc_platform_driver); + platform_driver_unregister(&ep93xx_rtc_driver); } MODULE_AUTHOR("Alessandro Zummo "); -- cgit v1.2.3 From 8970ef47d56fd3db28ee798b9d400caf08abd924 Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Thu, 19 Mar 2009 15:02:34 +0000 Subject: [ARM] S3C24XX: Remove hardware specific registers from DMA calls The S3C24XX DMA API channel configuration registers are being passed values comprised of register values which makes it hard to move the API to cover both the S3C24XX and S3C64XX. These values can be calculated from knowing which device the channel is connected to, so remove them from the two calls s3c2410_dma_config and s3c2410_dma_devconfig. Signed-off-by: Ben Dooks Signed-off-by: Ben Dooks --- drivers/mmc/host/s3cmci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 2db166b7096f..889f35047a52 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -789,7 +789,7 @@ static void s3cmci_dma_setup(struct s3cmci_host *host, last_source = source; - s3c2410_dma_devconfig(host->dma, source, 3, + s3c2410_dma_devconfig(host->dma, source, host->mem->start + host->sdidata); if (!setup_ok) { -- cgit v1.2.3 From 8c8fdbc9bd9718b21146065de61c0cafdff11ecb Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Wed, 1 Apr 2009 12:40:15 +0200 Subject: [ARM] Remove arch-imx from build system arch-imx is superseeded by the MXC architecture support. This patch removes arch-imx from the build system. Signed-off-by: Sascha Hauer --- drivers/mmc/host/Kconfig | 2 +- drivers/serial/imx.c | 13 +++---------- drivers/spi/Kconfig | 2 +- drivers/video/Kconfig | 2 +- 4 files changed, 6 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index b4cf691f3f64..3eb87bda14f3 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -155,7 +155,7 @@ config MMC_ATMELMCI_DMA config MMC_IMX tristate "Motorola i.MX Multimedia Card Interface support" - depends on ARCH_IMX + depends on ARCH_MX1 help This selects the Motorola i.MX Multimedia card Interface. If you have a i.MX platform with a Multimedia Card slot, diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c index 9f460b175c50..3f5d5a200481 100644 --- a/drivers/serial/imx.c +++ b/drivers/serial/imx.c @@ -66,7 +66,7 @@ #define ONEMS 0xb0 /* One Millisecond register */ #define UTS 0xb4 /* UART Test Register */ #endif -#if defined(CONFIG_ARCH_IMX) || defined(CONFIG_ARCH_MX1) +#ifdef CONFIG_ARCH_MX1 #define BIPR1 0xb0 /* Incremental Preset Register 1 */ #define BIPR2 0xb4 /* Incremental Preset Register 2 */ #define BIPR3 0xb8 /* Incremental Preset Register 3 */ @@ -96,7 +96,7 @@ #define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */ #define UCR1_SNDBRK (1<<4) /* Send break */ #define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */ -#if defined(CONFIG_ARCH_IMX) || defined(CONFIG_ARCH_MX1) +#ifdef CONFIG_ARCH_MX1 #define UCR1_UARTCLKEN (1<<2) /* UART clock enabled */ #endif #if defined CONFIG_ARCH_MX3 || defined CONFIG_ARCH_MX2 @@ -127,7 +127,7 @@ #define UCR3_RXDSEN (1<<6) /* Receive status interrupt enable */ #define UCR3_AIRINTEN (1<<5) /* Async IR wake interrupt enable */ #define UCR3_AWAKEN (1<<4) /* Async wake interrupt enable */ -#ifdef CONFIG_ARCH_IMX +#ifdef CONFIG_ARCH_MX1 #define UCR3_REF25 (1<<3) /* Ref freq 25 MHz, only on mx1 */ #define UCR3_REF30 (1<<2) /* Ref Freq 30 MHz, only on mx1 */ #endif @@ -180,13 +180,6 @@ #define UTS_SOFTRST (1<<0) /* Software reset */ /* We've been assigned a range on the "Low-density serial ports" major */ -#ifdef CONFIG_ARCH_IMX -#define SERIAL_IMX_MAJOR 204 -#define MINOR_START 41 -#define DEV_NAME "ttySMX" -#define MAX_INTERNAL_IRQ IMX_IRQS -#endif - #ifdef CONFIG_ARCH_MXC #define SERIAL_IMX_MAJOR 207 #define MINOR_START 16 diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 83a185d52961..7c61251bea61 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -118,7 +118,7 @@ config SPI_GPIO config SPI_IMX tristate "Freescale iMX SPI controller" - depends on ARCH_IMX && EXPERIMENTAL + depends on ARCH_MX1 && EXPERIMENTAL help This enables using the Freescale iMX SPI controller in master mode. diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 7826bdce4bbe..93258e114517 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -397,7 +397,7 @@ config FB_SA1100 config FB_IMX tristate "Motorola i.MX LCD support" - depends on FB && (ARCH_IMX || ARCH_MX2) + depends on FB && (ARCH_MX1 || ARCH_MX2) select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT -- cgit v1.2.3 From 45001e92d0249a8c4b9f6c3695215652e8e8493d Mon Sep 17 00:00:00 2001 From: Alan Carvalho de Assis Date: Thu, 2 Apr 2009 12:38:41 -0300 Subject: i.MX31: Add hw-random for RNGA This hw-random driver add support to RNGA hardware found on some i.MX processors. Signed-off-by: Alan Carvalho de Assis Acked-by: Matt Mackall Signed-off-by: Sascha Hauer --- drivers/char/hw_random/Kconfig | 12 ++ drivers/char/hw_random/Makefile | 1 + drivers/char/hw_random/mxc-rnga.c | 247 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 260 insertions(+) create mode 100644 drivers/char/hw_random/mxc-rnga.c (limited to 'drivers') diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 5fab6470f4b2..26c93c75e62d 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -148,3 +148,15 @@ config HW_RANDOM_VIRTIO To compile this driver as a module, choose M here: the module will be called virtio-rng. If unsure, say N. + +config HW_RANDOM_MXC_RNGA + tristate "Freescale i.MX RNGA Random Number Generator" + depends on HW_RANDOM && ARCH_HAS_RNGA + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on Freescale i.MX processors. + + To compile this driver as a module, choose M here: the + module will be called mxc-rnga. + + If unsure, say Y. diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index e81d21a5f28f..fd1ecd2f6731 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -15,3 +15,4 @@ obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o +obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c new file mode 100644 index 000000000000..187c6be80f43 --- /dev/null +++ b/drivers/char/hw_random/mxc-rnga.c @@ -0,0 +1,247 @@ +/* + * RNG driver for Freescale RNGA + * + * Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved. + * Author: Alan Carvalho de Assis + */ + +/* + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + * + * This driver is based on other RNG drivers. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* RNGA Registers */ +#define RNGA_CONTROL 0x00 +#define RNGA_STATUS 0x04 +#define RNGA_ENTROPY 0x08 +#define RNGA_OUTPUT_FIFO 0x0c +#define RNGA_MODE 0x10 +#define RNGA_VERIFICATION_CONTROL 0x14 +#define RNGA_OSC_CONTROL_COUNTER 0x18 +#define RNGA_OSC1_COUNTER 0x1c +#define RNGA_OSC2_COUNTER 0x20 +#define RNGA_OSC_COUNTER_STATUS 0x24 + +/* RNGA Registers Range */ +#define RNG_ADDR_RANGE 0x28 + +/* RNGA Control Register */ +#define RNGA_CONTROL_SLEEP 0x00000010 +#define RNGA_CONTROL_CLEAR_INT 0x00000008 +#define RNGA_CONTROL_MASK_INTS 0x00000004 +#define RNGA_CONTROL_HIGH_ASSURANCE 0x00000002 +#define RNGA_CONTROL_GO 0x00000001 + +#define RNGA_STATUS_LEVEL_MASK 0x0000ff00 + +/* RNGA Status Register */ +#define RNGA_STATUS_OSC_DEAD 0x80000000 +#define RNGA_STATUS_SLEEP 0x00000010 +#define RNGA_STATUS_ERROR_INT 0x00000008 +#define RNGA_STATUS_FIFO_UNDERFLOW 0x00000004 +#define RNGA_STATUS_LAST_READ_STATUS 0x00000002 +#define RNGA_STATUS_SECURITY_VIOLATION 0x00000001 + +static struct platform_device *rng_dev; + +static int mxc_rnga_data_present(struct hwrng *rng) +{ + int level; + void __iomem *rng_base = (void __iomem *)rng->priv; + + /* how many random numbers is in FIFO? [0-16] */ + level = ((__raw_readl(rng_base + RNGA_STATUS) & + RNGA_STATUS_LEVEL_MASK) >> 8); + + return level > 0 ? 1 : 0; +} + +static int mxc_rnga_data_read(struct hwrng *rng, u32 * data) +{ + int err; + u32 ctrl; + void __iomem *rng_base = (void __iomem *)rng->priv; + + /* retrieve a random number from FIFO */ + *data = __raw_readl(rng_base + RNGA_OUTPUT_FIFO); + + /* some error while reading this random number? */ + err = __raw_readl(rng_base + RNGA_STATUS) & RNGA_STATUS_ERROR_INT; + + /* if error: clear error interrupt, but doesn't return random number */ + if (err) { + dev_dbg(&rng_dev->dev, "Error while reading random number!\n"); + ctrl = __raw_readl(rng_base + RNGA_CONTROL); + __raw_writel(ctrl | RNGA_CONTROL_CLEAR_INT, + rng_base + RNGA_CONTROL); + return 0; + } else + return 4; +} + +static int mxc_rnga_init(struct hwrng *rng) +{ + u32 ctrl, osc; + void __iomem *rng_base = (void __iomem *)rng->priv; + + /* wake up */ + ctrl = __raw_readl(rng_base + RNGA_CONTROL); + __raw_writel(ctrl & ~RNGA_CONTROL_SLEEP, rng_base + RNGA_CONTROL); + + /* verify if oscillator is working */ + osc = __raw_readl(rng_base + RNGA_STATUS); + if (osc & RNGA_STATUS_OSC_DEAD) { + dev_err(&rng_dev->dev, "RNGA Oscillator is dead!\n"); + return -ENODEV; + } + + /* go running */ + ctrl = __raw_readl(rng_base + RNGA_CONTROL); + __raw_writel(ctrl | RNGA_CONTROL_GO, rng_base + RNGA_CONTROL); + + return 0; +} + +static void mxc_rnga_cleanup(struct hwrng *rng) +{ + u32 ctrl; + void __iomem *rng_base = (void __iomem *)rng->priv; + + ctrl = __raw_readl(rng_base + RNGA_CONTROL); + + /* stop rnga */ + __raw_writel(ctrl & ~RNGA_CONTROL_GO, rng_base + RNGA_CONTROL); +} + +static struct hwrng mxc_rnga = { + .name = "mxc-rnga", + .init = mxc_rnga_init, + .cleanup = mxc_rnga_cleanup, + .data_present = mxc_rnga_data_present, + .data_read = mxc_rnga_data_read +}; + +static int __init mxc_rnga_probe(struct platform_device *pdev) +{ + int err = -ENODEV; + struct clk *clk; + struct resource *res, *mem; + void __iomem *rng_base = NULL; + + if (rng_dev) + return -EBUSY; + + clk = clk_get(&pdev->dev, "rng"); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "Could not get rng_clk!\n"); + err = PTR_ERR(clk); + goto out; + } + + clk_enable(clk); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + err = -ENOENT; + goto err_region; + } + + mem = request_mem_region(res->start, resource_size(res), pdev->name); + if (mem == NULL) { + err = -EBUSY; + goto err_region; + } + + rng_base = ioremap(res->start, resource_size(res)); + if (!rng_base) { + err = -ENOMEM; + goto err_ioremap; + } + + mxc_rnga.priv = (unsigned long)rng_base; + + err = hwrng_register(&mxc_rnga); + if (err) { + dev_err(&pdev->dev, "MXC RNGA registering failed (%d)\n", err); + goto err_register; + } + + rng_dev = pdev; + + dev_info(&pdev->dev, "MXC RNGA Registered.\n"); + + return 0; + +err_register: + iounmap(rng_base); + rng_base = NULL; + +err_ioremap: + release_mem_region(res->start, resource_size(res)); + +err_region: + clk_disable(clk); + clk_put(clk); + +out: + return err; +} + +static int __exit mxc_rnga_remove(struct platform_device *pdev) +{ + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + void __iomem *rng_base = (void __iomem *)mxc_rnga.priv; + struct clk *clk = clk_get(&pdev->dev, "rng"); + + hwrng_unregister(&mxc_rnga); + + iounmap(rng_base); + + release_mem_region(res->start, resource_size(res)); + + clk_disable(clk); + clk_put(clk); + + return 0; +} + +static struct platform_driver mxc_rnga_driver = { + .driver = { + .name = "mxc_rnga", + .owner = THIS_MODULE, + }, + .remove = __exit_p(mxc_rnga_remove), +}; + +static int __init mod_init(void) +{ + return platform_driver_probe(&mxc_rnga_driver, mxc_rnga_probe); +} + +static void __exit mod_exit(void) +{ + platform_driver_unregister(&mxc_rnga_driver); +} + +module_init(mod_init); +module_exit(mod_exit); + +MODULE_AUTHOR("Freescale Semiconductor, Inc."); +MODULE_DESCRIPTION("H/W RNGA driver for i.MX"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From c2e5307b902426247afa48d3f1ed4fa5409dcb49 Mon Sep 17 00:00:00 2001 From: Valentin Longchamp Date: Wed, 6 May 2009 11:54:48 +0200 Subject: mx31: changed CONSISTENT_DMA_SIZE to 8M for mx31 video Signed-off-by: Valentin Longchamp Acked-by: Guennadi Liakhovetski Signed-off-by: Sascha Hauer --- drivers/media/video/Kconfig | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig index 9d48da2fb013..57835f5715fc 100644 --- a/drivers/media/video/Kconfig +++ b/drivers/media/video/Kconfig @@ -758,10 +758,14 @@ config VIDEO_MX1 ---help--- This is a v4l2 driver for the i.MX1/i.MXL CMOS Sensor Interface +config MX3_VIDEO + bool + config VIDEO_MX3 tristate "i.MX3x Camera Sensor Interface driver" depends on VIDEO_DEV && MX3_IPU && SOC_CAMERA select VIDEOBUF_DMA_CONTIG + select MX3_VIDEO ---help--- This is a v4l2 driver for the i.MX3x Camera Sensor Interface -- cgit v1.2.3 From c8a4fb472c5101ec52f94b1e1277b8fde4b823cf Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 12 May 2009 21:41:03 +0200 Subject: FB: fix unsafe use of disable_irq() in mx3fb.c mx3fb.c calls disable_irq() from a DMA callback, i.e., in an IRQ-handler context, which has always been unsafe, and became deadly after the merge of threaded interrupt handler support. Use disable_irq_nosync() instead. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Sascha Hauer --- drivers/video/mx3fb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c index 9894de1c9b9f..7a168ba65c64 100644 --- a/drivers/video/mx3fb.c +++ b/drivers/video/mx3fb.c @@ -706,7 +706,7 @@ static void mx3fb_dma_done(void *arg) dev_dbg(mx3fb->dev, "irq %d callback\n", ichannel->eof_irq); /* We only need one interrupt, it will be re-enabled as needed */ - disable_irq(ichannel->eof_irq); + disable_irq_nosync(ichannel->eof_irq); complete(&mx3_fbi->flip_cmpl); } -- cgit v1.2.3 From 92e0d896ce3087112602449efd87c6d7f4eae8d0 Mon Sep 17 00:00:00 2001 From: Sergey Belyashov Date: Mon, 4 May 2009 13:01:02 +0400 Subject: HID: autocentering support for Logitech G25 Racing Wheel Some months ago I send patch which adds autocentering for Logitech MOMO Wheel. Now I have access to Logitech G25 Racing Wheel and test autocentering for it. I write patch for current kernel to support autocentering for G25 in legacy mode (this device supports other modes, but after switching device reconnects with ID 0xc299 and FF support comes out) and others Logitech (Driving Force, Formula Force Ex etc) wheels with ID 046d:c294. Signed-off-by: Sergey Belyashov Signed-off-by: Jiri Kosina --- drivers/hid/hid-lgff.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c index 51aff08e10ce..9735be6ee4ff 100644 --- a/drivers/hid/hid-lgff.c +++ b/drivers/hid/hid-lgff.c @@ -61,7 +61,7 @@ static const struct dev_type devices[] = { { 0x046d, 0xc219, ff_rumble }, { 0x046d, 0xc283, ff_joystick }, { 0x046d, 0xc286, ff_joystick }, - { 0x046d, 0xc294, ff_joystick }, + { 0x046d, 0xc294, ff_wheel }, { 0x046d, 0xc295, ff_joystick }, { 0x046d, 0xca03, ff_wheel }, }; -- cgit v1.2.3 From ca2dcd40f54c8928b3994712a6cadd2078a087fa Mon Sep 17 00:00:00 2001 From: Bastien Nocera Date: Mon, 11 May 2009 17:18:12 +0200 Subject: HID: Wacom Graphire Bluetooth driver Based on the work by Andrew Zabolotny, an HID driver for the Bluetooth Wacom tablet. This is required as it uses a slightly different protocols from what's currently support by the drivers/input/wacom* driver, and those only support USB. A user-space patch is required to activate mode 2 of the Wacom tablet, as hidp does not support hid_output_raw_report. Signed-off-by: Bastien Nocera Signed-off-by: Jiri Kosina --- drivers/hid/Kconfig | 7 ++ drivers/hid/Makefile | 1 + drivers/hid/hid-core.c | 1 + drivers/hid/hid-ids.h | 1 + drivers/hid/hid-wacom.c | 259 ++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 269 insertions(+) create mode 100644 drivers/hid/hid-wacom.c (limited to 'drivers') diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 7e67dcb3d4f6..21edf407bbda 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -271,6 +271,13 @@ config THRUSTMASTER_FF Say Y here if you have a THRUSTMASTER FireStore Dual Power 2 or a THRUSTMASTER Ferrari GT Rumble Force or Force Feedback Wheel. +config HID_WACOM + tristate "Wacom Bluetooth devices support" if EMBEDDED + depends on BT_HIDP + default !EMBEDDED + ---help--- + Support for Wacom Graphire Bluetooth tablet. + config ZEROPLUS_FF tristate "Zeroplus based game controller support" depends on USB_HID diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index 1f7cb0fd4505..39cebcfa898c 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -40,6 +40,7 @@ obj-$(CONFIG_GREENASIA_FF) += hid-gaff.o obj-$(CONFIG_THRUSTMASTER_FF) += hid-tmff.o obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o obj-$(CONFIG_ZEROPLUS_FF) += hid-zpff.o +obj-$(CONFIG_HID_WACOM) += hid-wacom.o obj-$(CONFIG_USB_HID) += usbhid/ obj-$(CONFIG_USB_MOUSE) += usbhid/ diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 8551693d645f..9f38ab874c93 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1312,6 +1312,7 @@ static const struct hid_device_id hid_blacklist[] = { { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) }, { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) }, { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 4d5ee2bbc62b..b519692732db 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -414,6 +414,7 @@ #define USB_DEVICE_ID_VERNIER_LCSPEC 0x0006 #define USB_VENDOR_ID_WACOM 0x056a +#define USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH 0x81 #define USB_VENDOR_ID_WISEGROUP 0x0925 #define USB_DEVICE_ID_1_PHIDGETSERVO_20 0x8101 diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c new file mode 100644 index 000000000000..1f9237f511e3 --- /dev/null +++ b/drivers/hid/hid-wacom.c @@ -0,0 +1,259 @@ +/* + * Bluetooth Wacom Tablet support + * + * Copyright (c) 1999 Andreas Gal + * Copyright (c) 2000-2005 Vojtech Pavlik + * Copyright (c) 2005 Michael Haboustak for Concept2, Inc + * Copyright (c) 2006-2007 Jiri Kosina + * Copyright (c) 2007 Paul Walmsley + * Copyright (c) 2008 Jiri Slaby + * Copyright (c) 2006 Andrew Zabolotny + * Copyright (c) 2009 Bastien Nocera + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include +#include +#include + +#include "hid-ids.h" + +struct wacom_data { + __u16 tool; + unsigned char butstate; +}; + +static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report, + u8 *raw_data, int size) +{ + struct wacom_data *wdata = hid_get_drvdata(hdev); + struct hid_input *hidinput; + struct input_dev *input; + unsigned char *data = (unsigned char *) raw_data; + int tool, x, y, rw; + + if (!(hdev->claimed & HID_CLAIMED_INPUT)) + return 0; + + tool = 0; + hidinput = list_entry(hdev->inputs.next, struct hid_input, list); + input = hidinput->input; + + /* Check if this is a tablet report */ + if (data[0] != 0x03) + return 0; + + /* Get X & Y positions */ + x = le16_to_cpu(*(__le16 *) &data[2]); + y = le16_to_cpu(*(__le16 *) &data[4]); + + /* Get current tool identifier */ + if (data[1] & 0x90) { /* If pen is in the in/active area */ + switch ((data[1] >> 5) & 3) { + case 0: /* Pen */ + tool = BTN_TOOL_PEN; + break; + + case 1: /* Rubber */ + tool = BTN_TOOL_RUBBER; + break; + + case 2: /* Mouse with wheel */ + case 3: /* Mouse without wheel */ + tool = BTN_TOOL_MOUSE; + break; + } + + /* Reset tool if out of active tablet area */ + if (!(data[1] & 0x10)) + tool = 0; + } + + /* If tool changed, notify input subsystem */ + if (wdata->tool != tool) { + if (wdata->tool) { + /* Completely reset old tool state */ + if (wdata->tool == BTN_TOOL_MOUSE) { + input_report_key(input, BTN_LEFT, 0); + input_report_key(input, BTN_RIGHT, 0); + input_report_key(input, BTN_MIDDLE, 0); + input_report_abs(input, ABS_DISTANCE, + input->absmax[ABS_DISTANCE]); + } else { + input_report_key(input, BTN_TOUCH, 0); + input_report_key(input, BTN_STYLUS, 0); + input_report_key(input, BTN_STYLUS2, 0); + input_report_abs(input, ABS_PRESSURE, 0); + } + input_report_key(input, wdata->tool, 0); + input_sync(input); + } + wdata->tool = tool; + if (tool) + input_report_key(input, tool, 1); + } + + if (tool) { + input_report_abs(input, ABS_X, x); + input_report_abs(input, ABS_Y, y); + + switch ((data[1] >> 5) & 3) { + case 2: /* Mouse with wheel */ + input_report_key(input, BTN_MIDDLE, data[1] & 0x04); + rw = (data[6] & 0x01) ? -1 : + (data[6] & 0x02) ? 1 : 0; + input_report_rel(input, REL_WHEEL, rw); + /* fall through */ + + case 3: /* Mouse without wheel */ + input_report_key(input, BTN_LEFT, data[1] & 0x01); + input_report_key(input, BTN_RIGHT, data[1] & 0x02); + /* Compute distance between mouse and tablet */ + rw = 44 - (data[6] >> 2); + if (rw < 0) + rw = 0; + else if (rw > 31) + rw = 31; + input_report_abs(input, ABS_DISTANCE, rw); + break; + + default: + input_report_abs(input, ABS_PRESSURE, + data[6] | (((__u16) (data[1] & 0x08)) << 5)); + input_report_key(input, BTN_TOUCH, data[1] & 0x01); + input_report_key(input, BTN_STYLUS, data[1] & 0x02); + input_report_key(input, BTN_STYLUS2, (tool == BTN_TOOL_PEN) && data[1] & 0x04); + break; + } + + input_sync(input); + } + + /* Report the state of the two buttons at the top of the tablet + * as two extra fingerpad keys (buttons 4 & 5). */ + rw = data[7] & 0x03; + if (rw != wdata->butstate) { + wdata->butstate = rw; + input_report_key(input, BTN_0, rw & 0x02); + input_report_key(input, BTN_1, rw & 0x01); + input_event(input, EV_MSC, MSC_SERIAL, 0xf0); + input_sync(input); + } + + return 1; +} + +static int wacom_probe(struct hid_device *hdev, + const struct hid_device_id *id) +{ + struct hid_input *hidinput; + struct input_dev *input; + struct wacom_data *wdata; + int ret; + + wdata = kzalloc(sizeof(*wdata), GFP_KERNEL); + if (wdata == NULL) { + dev_err(&hdev->dev, "can't alloc wacom descriptor\n"); + return -ENOMEM; + } + + hid_set_drvdata(hdev, wdata); + + ret = hid_parse(hdev); + if (ret) { + dev_err(&hdev->dev, "parse failed\n"); + goto err_free; + } + + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); + if (ret) { + dev_err(&hdev->dev, "hw start failed\n"); + goto err_free; + } + + hidinput = list_entry(hdev->inputs.next, struct hid_input, list); + input = hidinput->input; + + /* Basics */ + input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL); + input->absbit[0] |= BIT(ABS_X) | BIT(ABS_Y) | + BIT(ABS_PRESSURE) | BIT(ABS_DISTANCE); + input->relbit[0] |= BIT(REL_WHEEL); + set_bit(BTN_TOOL_PEN, input->keybit); + set_bit(BTN_TOUCH, input->keybit); + set_bit(BTN_STYLUS, input->keybit); + set_bit(BTN_STYLUS2, input->keybit); + set_bit(BTN_LEFT, input->keybit); + set_bit(BTN_RIGHT, input->keybit); + set_bit(BTN_MIDDLE, input->keybit); + + /* Pad */ + input->evbit[0] |= BIT(EV_MSC); + input->mscbit[0] |= BIT(MSC_SERIAL); + + /* Distance, rubber and mouse */ + input->absbit[0] |= BIT(ABS_DISTANCE); + set_bit(BTN_TOOL_RUBBER, input->keybit); + set_bit(BTN_TOOL_MOUSE, input->keybit); + + input->absmax[ABS_PRESSURE] = 511; + input->absmax[ABS_DISTANCE] = 32; + + input->absmax[ABS_X] = 16704; + input->absmax[ABS_Y] = 12064; + input->absfuzz[ABS_X] = 4; + input->absfuzz[ABS_Y] = 4; + + return 0; +err_free: + kfree(wdata); + return ret; +} + +static void wacom_remove(struct hid_device *hdev) +{ + hid_hw_stop(hdev); + kfree(hid_get_drvdata(hdev)); +} + +static const struct hid_device_id wacom_devices[] = { + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) }, + + { } +}; +MODULE_DEVICE_TABLE(hid, wacom_devices); + +static struct hid_driver wacom_driver = { + .name = "wacom", + .id_table = wacom_devices, + .probe = wacom_probe, + .remove = wacom_remove, + .raw_event = wacom_raw_event, +}; + +static int wacom_init(void) +{ + int ret; + + ret = hid_register_driver(&wacom_driver); + if (ret) + printk(KERN_ERR "can't register wacom driver\n"); + printk(KERN_ERR "wacom driver registered\n"); + return ret; +} + +static void wacom_exit(void) +{ + hid_unregister_driver(&wacom_driver); +} + +module_init(wacom_init); +module_exit(wacom_exit); +MODULE_LICENSE("GPL"); + -- cgit v1.2.3 From bf31a1a02eb28d9bda0bb74345df7889faeb7335 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Wed, 13 May 2009 16:52:40 -0700 Subject: IB/ehca: Replace vmalloc() with kmalloc() for queue allocation To improve performance of driver resource allocation, replace vmalloc() calls with kmalloc(). Signed-off-by: Stefan Roscher Signed-off-by: Roland Dreier --- drivers/infiniband/hw/ehca/ipz_pt_fn.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c index c3a328465431..a2605593ae79 100644 --- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c +++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c @@ -220,7 +220,7 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue, queue->small_page = NULL; /* allocate queue page pointers */ - queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *)); + queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL); if (!queue->queue_pages) { ehca_gen_err("Couldn't allocate queue page list"); return 0; @@ -240,7 +240,7 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue, ipz_queue_ctor_exit0: ehca_gen_err("Couldn't alloc pages queue=%p " "nr_of_pages=%x", queue, nr_of_pages); - vfree(queue->queue_pages); + kfree(queue->queue_pages); return 0; } @@ -262,7 +262,7 @@ int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue) free_page((unsigned long)queue->queue_pages[i]); } - vfree(queue->queue_pages); + kfree(queue->queue_pages); return 1; } -- cgit v1.2.3 From c94f156f63c835ffc02b686f9d4238b106f31a5d Mon Sep 17 00:00:00 2001 From: Stefan Roscher Date: Wed, 13 May 2009 16:52:42 -0700 Subject: IB/ehca: Fall back to vmalloc() for big allocations In case of large queue pairs there is the possibillity of allocation failures due to memory fragmentation when using kmalloc(). To ensure the memory is allocated even if kmalloc() can not find chunks which are big enough, we fall back to allocating the memory with vmalloc(). Signed-off-by: Stefan Roscher Signed-off-by: Roland Dreier --- drivers/infiniband/hw/ehca/ipz_pt_fn.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c index a2605593ae79..1227c593627a 100644 --- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c +++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c @@ -222,8 +222,11 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue, /* allocate queue page pointers */ queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL); if (!queue->queue_pages) { - ehca_gen_err("Couldn't allocate queue page list"); - return 0; + queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *)); + if (!queue->queue_pages) { + ehca_gen_err("Couldn't allocate queue page list"); + return 0; + } } memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *)); @@ -240,7 +243,10 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue, ipz_queue_ctor_exit0: ehca_gen_err("Couldn't alloc pages queue=%p " "nr_of_pages=%x", queue, nr_of_pages); - kfree(queue->queue_pages); + if (is_vmalloc_addr(queue->queue_pages)) + vfree(queue->queue_pages); + else + kfree(queue->queue_pages); return 0; } @@ -262,7 +268,10 @@ int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue) free_page((unsigned long)queue->queue_pages[i]); } - kfree(queue->queue_pages); + if (is_vmalloc_addr(queue->queue_pages)) + vfree(queue->queue_pages); + else + kfree(queue->queue_pages); return 1; } -- cgit v1.2.3 From 1988d1fa1a9d642c5714a6afc9775fba0627f3ed Mon Sep 17 00:00:00 2001 From: Stefan Roscher Date: Wed, 13 May 2009 16:52:43 -0700 Subject: IB/ehca: Remove unnecessary memory operations for userspace queue pairs The queue map for flush completion circumvention is only used for kernel space queue pairs. This patch skips the allocation of the queue maps in case the QP is created for userspace. In addition, this patch does not iomap the galpas for kernel usage if the queue pair is only used in userspace. These changes will improve the performance of creation of userspace queue pairs. Signed-off-by: Stefan Roscher Signed-off-by: Roland Dreier --- drivers/infiniband/hw/ehca/ehca_qp.c | 94 ++++++++++++++++++++--------------- drivers/infiniband/hw/ehca/hcp_if.c | 6 +-- drivers/infiniband/hw/ehca/hcp_if.h | 2 +- drivers/infiniband/hw/ehca/hcp_phyp.c | 11 ++-- drivers/infiniband/hw/ehca/hcp_phyp.h | 2 +- 5 files changed, 65 insertions(+), 50 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index 00c108159714..ead4e718c082 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c @@ -461,7 +461,7 @@ static struct ehca_qp *internal_create_qp( ib_device); struct ib_ucontext *context = NULL; u64 h_ret; - int is_llqp = 0, has_srq = 0; + int is_llqp = 0, has_srq = 0, is_user = 0; int qp_type, max_send_sge, max_recv_sge, ret; /* h_call's out parameters */ @@ -609,9 +609,6 @@ static struct ehca_qp *internal_create_qp( } } - if (pd->uobject && udata) - context = pd->uobject->context; - my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL); if (!my_qp) { ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); @@ -619,6 +616,11 @@ static struct ehca_qp *internal_create_qp( return ERR_PTR(-ENOMEM); } + if (pd->uobject && udata) { + is_user = 1; + context = pd->uobject->context; + } + atomic_set(&my_qp->nr_events, 0); init_waitqueue_head(&my_qp->wait_completion); spin_lock_init(&my_qp->spinlock_s); @@ -707,7 +709,7 @@ static struct ehca_qp *internal_create_qp( (parms.squeue.is_small || parms.rqueue.is_small); } - h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms); + h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms, is_user); if (h_ret != H_SUCCESS) { ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli", h_ret); @@ -769,18 +771,20 @@ static struct ehca_qp *internal_create_qp( goto create_qp_exit2; } - my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length / - my_qp->ipz_squeue.qe_size; - my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries * - sizeof(struct ehca_qmap_entry)); - if (!my_qp->sq_map.map) { - ehca_err(pd->device, "Couldn't allocate squeue " - "map ret=%i", ret); - goto create_qp_exit3; + if (!is_user) { + my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length / + my_qp->ipz_squeue.qe_size; + my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries * + sizeof(struct ehca_qmap_entry)); + if (!my_qp->sq_map.map) { + ehca_err(pd->device, "Couldn't allocate squeue " + "map ret=%i", ret); + goto create_qp_exit3; + } + INIT_LIST_HEAD(&my_qp->sq_err_node); + /* to avoid the generation of bogus flush CQEs */ + reset_queue_map(&my_qp->sq_map); } - INIT_LIST_HEAD(&my_qp->sq_err_node); - /* to avoid the generation of bogus flush CQEs */ - reset_queue_map(&my_qp->sq_map); } if (HAS_RQ(my_qp)) { @@ -792,20 +796,21 @@ static struct ehca_qp *internal_create_qp( "and pages ret=%i", ret); goto create_qp_exit4; } - - my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length / - my_qp->ipz_rqueue.qe_size; - my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries * - sizeof(struct ehca_qmap_entry)); - if (!my_qp->rq_map.map) { - ehca_err(pd->device, "Couldn't allocate squeue " - "map ret=%i", ret); - goto create_qp_exit5; + if (!is_user) { + my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length / + my_qp->ipz_rqueue.qe_size; + my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries * + sizeof(struct ehca_qmap_entry)); + if (!my_qp->rq_map.map) { + ehca_err(pd->device, "Couldn't allocate squeue " + "map ret=%i", ret); + goto create_qp_exit5; + } + INIT_LIST_HEAD(&my_qp->rq_err_node); + /* to avoid the generation of bogus flush CQEs */ + reset_queue_map(&my_qp->rq_map); } - INIT_LIST_HEAD(&my_qp->rq_err_node); - /* to avoid the generation of bogus flush CQEs */ - reset_queue_map(&my_qp->rq_map); - } else if (init_attr->srq) { + } else if (init_attr->srq && !is_user) { /* this is a base QP, use the queue map of the SRQ */ my_qp->rq_map = my_srq->rq_map; INIT_LIST_HEAD(&my_qp->rq_err_node); @@ -918,7 +923,7 @@ create_qp_exit7: kfree(my_qp->mod_qp_parm); create_qp_exit6: - if (HAS_RQ(my_qp)) + if (HAS_RQ(my_qp) && !is_user) vfree(my_qp->rq_map.map); create_qp_exit5: @@ -926,7 +931,7 @@ create_qp_exit5: ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); create_qp_exit4: - if (HAS_SQ(my_qp)) + if (HAS_SQ(my_qp) && !is_user) vfree(my_qp->sq_map.map); create_qp_exit3: @@ -1244,6 +1249,7 @@ static int internal_modify_qp(struct ib_qp *ibqp, u64 update_mask; u64 h_ret; int bad_wqe_cnt = 0; + int is_user = 0; int squeue_locked = 0; unsigned long flags = 0; @@ -1266,6 +1272,8 @@ static int internal_modify_qp(struct ib_qp *ibqp, ret = ehca2ib_return_code(h_ret); goto modify_qp_exit1; } + if (ibqp->uobject) + is_user = 1; qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state); @@ -1728,7 +1736,8 @@ static int internal_modify_qp(struct ib_qp *ibqp, goto modify_qp_exit2; } } - if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)) { + if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR) + && !is_user) { ret = check_for_left_cqes(my_qp, shca); if (ret) goto modify_qp_exit2; @@ -1738,16 +1747,17 @@ static int internal_modify_qp(struct ib_qp *ibqp, ipz_qeit_reset(&my_qp->ipz_rqueue); ipz_qeit_reset(&my_qp->ipz_squeue); - if (qp_cur_state == IB_QPS_ERR) { + if (qp_cur_state == IB_QPS_ERR && !is_user) { del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); if (HAS_RQ(my_qp)) del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node); } - reset_queue_map(&my_qp->sq_map); + if (!is_user) + reset_queue_map(&my_qp->sq_map); - if (HAS_RQ(my_qp)) + if (HAS_RQ(my_qp) && !is_user) reset_queue_map(&my_qp->rq_map); } @@ -2138,10 +2148,12 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, int ret; u64 h_ret; u8 port_num; + int is_user = 0; enum ib_qp_type qp_type; unsigned long flags; if (uobject) { + is_user = 1; if (my_qp->mm_count_galpa || my_qp->mm_count_rqueue || my_qp->mm_count_squeue) { ehca_err(dev, "Resources still referenced in " @@ -2168,10 +2180,10 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, * SRQs will never get into an error list and do not have a recv_cq, * so we need to skip them here. */ - if (HAS_RQ(my_qp) && !IS_SRQ(my_qp)) + if (HAS_RQ(my_qp) && !IS_SRQ(my_qp) && !is_user) del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node); - if (HAS_SQ(my_qp)) + if (HAS_SQ(my_qp) && !is_user) del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); /* now wait until all pending events have completed */ @@ -2209,13 +2221,13 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, if (HAS_RQ(my_qp)) { ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); - - vfree(my_qp->rq_map.map); + if (!is_user) + vfree(my_qp->rq_map.map); } if (HAS_SQ(my_qp)) { ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); - - vfree(my_qp->sq_map.map); + if (!is_user) + vfree(my_qp->sq_map.map); } kmem_cache_free(qp_cache, my_qp); atomic_dec(&shca->num_qps); diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c index d0ab0c0d5e91..4d5dc3304d42 100644 --- a/drivers/infiniband/hw/ehca/hcp_if.c +++ b/drivers/infiniband/hw/ehca/hcp_if.c @@ -284,7 +284,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle, param->act_pages = (u32)outs[4]; if (ret == H_SUCCESS) - hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]); + hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]); if (ret == H_NOT_ENOUGH_RESOURCES) ehca_gen_err("Not enough resources. ret=%lli", ret); @@ -293,7 +293,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle, } u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, - struct ehca_alloc_qp_parms *parms) + struct ehca_alloc_qp_parms *parms, int is_user) { u64 ret; u64 allocate_controls, max_r10_reg, r11, r12; @@ -359,7 +359,7 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]); if (ret == H_SUCCESS) - hcp_galpas_ctor(&parms->galpas, outs[6], outs[6]); + hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]); if (ret == H_NOT_ENOUGH_RESOURCES) ehca_gen_err("Not enough resources. ret=%lli", ret); diff --git a/drivers/infiniband/hw/ehca/hcp_if.h b/drivers/infiniband/hw/ehca/hcp_if.h index 2c3c6e0ea5c2..39c1c3618ec7 100644 --- a/drivers/infiniband/hw/ehca/hcp_if.h +++ b/drivers/infiniband/hw/ehca/hcp_if.h @@ -78,7 +78,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle, * initialize resources, create empty QPPTs (2 rings). */ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, - struct ehca_alloc_qp_parms *parms); + struct ehca_alloc_qp_parms *parms, int is_user); u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle, const u8 port_id, diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.c b/drivers/infiniband/hw/ehca/hcp_phyp.c index 214821095cb1..b3e0e72e8a73 100644 --- a/drivers/infiniband/hw/ehca/hcp_phyp.c +++ b/drivers/infiniband/hw/ehca/hcp_phyp.c @@ -54,12 +54,15 @@ int hcall_unmap_page(u64 mapaddr) return 0; } -int hcp_galpas_ctor(struct h_galpas *galpas, +int hcp_galpas_ctor(struct h_galpas *galpas, int is_user, u64 paddr_kernel, u64 paddr_user) { - int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle); - if (ret) - return ret; + if (!is_user) { + int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle); + if (ret) + return ret; + } else + galpas->kernel.fw_handle = 0; galpas->user.fw_handle = paddr_user; diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.h b/drivers/infiniband/hw/ehca/hcp_phyp.h index 5305c2a3ed94..204227d5303a 100644 --- a/drivers/infiniband/hw/ehca/hcp_phyp.h +++ b/drivers/infiniband/hw/ehca/hcp_phyp.h @@ -78,7 +78,7 @@ static inline void hipz_galpa_store(struct h_galpa galpa, u32 offset, u64 value) *(volatile u64 __force *)addr = value; } -int hcp_galpas_ctor(struct h_galpas *galpas, +int hcp_galpas_ctor(struct h_galpas *galpas, int is_user, u64 paddr_kernel, u64 paddr_user); int hcp_galpas_dtor(struct h_galpas *galpas); -- cgit v1.2.3 From bde2cfaf8ff5511b4f434078554f89ff6cb677f2 Mon Sep 17 00:00:00 2001 From: Stefan Roscher Date: Wed, 13 May 2009 16:52:43 -0700 Subject: IB/ehca: Increment version number Signed-off-by: Stefan Roscher Signed-off-by: Roland Dreier --- drivers/infiniband/hw/ehca/ehca_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 368311ce332b..85905ab9391f 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c @@ -52,7 +52,7 @@ #include "ehca_tools.h" #include "hcp_if.h" -#define HCAD_VERSION "0026" +#define HCAD_VERSION "0027" MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Christoph Raisch "); -- cgit v1.2.3 From 5b891a9332dc4212bf166a4506092fbcd60f2319 Mon Sep 17 00:00:00 2001 From: Jack Stone Date: Wed, 13 May 2009 16:53:39 -0700 Subject: infiniband: Remove void casts Remove uneeded casts of void *. Signed-off-by: Jack Stone Signed-off-by: Roland Dreier --- drivers/infiniband/hw/amso1100/c2_cq.c | 4 ++-- drivers/infiniband/hw/ehca/ehca_irq.c | 9 ++++----- 2 files changed, 6 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c index bb17cce3cb59..f5c45b194f53 100644 --- a/drivers/infiniband/hw/amso1100/c2_cq.c +++ b/drivers/infiniband/hw/amso1100/c2_cq.c @@ -133,7 +133,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev, struct c2_qp *qp; int is_recv = 0; - ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq); + ce = c2_mq_consume(&cq->mq); if (!ce) { return -EAGAIN; } @@ -146,7 +146,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev, while ((qp = (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) { c2_mq_free(&cq->mq); - ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq); + ce = c2_mq_consume(&cq->mq); if (!ce) return -EAGAIN; } diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index 99bcbd7ffb0a..4b89b791be6a 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c @@ -479,13 +479,13 @@ void ehca_tasklet_neq(unsigned long data) struct ehca_eqe *eqe; u64 ret; - eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq); + eqe = ehca_poll_eq(shca, &shca->neq); while (eqe) { if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry)) parse_ec(shca, eqe->entry); - eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq); + eqe = ehca_poll_eq(shca, &shca->neq); } ret = hipz_h_reset_event(shca->ipz_hca_handle, @@ -572,8 +572,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq) eqe_cnt = 0; do { u32 token; - eqe_cache[eqe_cnt].eqe = - (struct ehca_eqe *)ehca_poll_eq(shca, eq); + eqe_cache[eqe_cnt].eqe = ehca_poll_eq(shca, eq); if (!eqe_cache[eqe_cnt].eqe) break; eqe_value = eqe_cache[eqe_cnt].eqe->entry; @@ -637,7 +636,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq) goto unlock_irq_spinlock; do { struct ehca_eqe *eqe; - eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq); + eqe = ehca_poll_eq(shca, &shca->eq); if (!eqe) break; process_eqe(shca, eqe); -- cgit v1.2.3 From 6e03a201bbe8137487f340d26aa662110e324b20 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 9 Apr 2009 22:04:07 -0700 Subject: firmware: speed up request_firmware(), v3 Rather than calling vmalloc() repeatedly to grow the firmware image as we receive data from userspace, just allocate and fill individual pages. Then vmap() the whole lot in one go when we're done. A quick test with a 337KiB iwlagn firmware shows the time taken for request_firmware() going from ~32ms to ~5ms after I apply this patch. [v2: define PAGE_KERNEL_RO as PAGE_KERNEL where necessary, use min_t()] [v3: kunmap() takes the struct page *, not the virtual address] Signed-off-by: David Woodhouse Tested-by: Sachin Sant --- drivers/base/firmware_class.c | 129 +++++++++++++++++++++++++++++++++--------- 1 file changed, 103 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index d3a59c688fe4..8a267c427629 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -17,7 +17,7 @@ #include #include #include - +#include #include #include "base.h" @@ -45,7 +45,10 @@ struct firmware_priv { struct bin_attribute attr_data; struct firmware *fw; unsigned long status; - int alloc_size; + struct page **pages; + int nr_pages; + int page_array_size; + const char *vdata; struct timer_list timeout; }; @@ -122,6 +125,10 @@ static ssize_t firmware_loading_show(struct device *dev, return sprintf(buf, "%d\n", loading); } +/* Some architectures don't have PAGE_KERNEL_RO */ +#ifndef PAGE_KERNEL_RO +#define PAGE_KERNEL_RO PAGE_KERNEL +#endif /** * firmware_loading_store - set value in the 'loading' control file * @dev: device pointer @@ -141,6 +148,7 @@ static ssize_t firmware_loading_store(struct device *dev, { struct firmware_priv *fw_priv = dev_get_drvdata(dev); int loading = simple_strtol(buf, NULL, 10); + int i; switch (loading) { case 1: @@ -151,13 +159,30 @@ static ssize_t firmware_loading_store(struct device *dev, } vfree(fw_priv->fw->data); fw_priv->fw->data = NULL; + for (i = 0; i < fw_priv->nr_pages; i++) + __free_page(fw_priv->pages[i]); + kfree(fw_priv->pages); + fw_priv->pages = NULL; + fw_priv->page_array_size = 0; + fw_priv->nr_pages = 0; fw_priv->fw->size = 0; - fw_priv->alloc_size = 0; set_bit(FW_STATUS_LOADING, &fw_priv->status); mutex_unlock(&fw_lock); break; case 0: if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) { + vfree(fw_priv->fw->data); + fw_priv->fw->data = vmap(fw_priv->pages, + fw_priv->nr_pages, + 0, PAGE_KERNEL_RO); + if (!fw_priv->fw->data) { + dev_err(dev, "%s: vmap() failed\n", __func__); + goto err; + } + /* Pages will be freed by vfree() */ + fw_priv->pages = NULL; + fw_priv->page_array_size = 0; + fw_priv->nr_pages = 0; complete(&fw_priv->completion); clear_bit(FW_STATUS_LOADING, &fw_priv->status); break; @@ -167,6 +192,7 @@ static ssize_t firmware_loading_store(struct device *dev, dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading); /* fallthrough */ case -1: + err: fw_load_abort(fw_priv); break; } @@ -191,8 +217,28 @@ firmware_data_read(struct kobject *kobj, struct bin_attribute *bin_attr, ret_count = -ENODEV; goto out; } - ret_count = memory_read_from_buffer(buffer, count, &offset, - fw->data, fw->size); + if (offset > fw->size) + return 0; + if (count > fw->size - offset) + count = fw->size - offset; + + ret_count = count; + + while (count) { + void *page_data; + int page_nr = offset >> PAGE_SHIFT; + int page_ofs = offset & (PAGE_SIZE-1); + int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); + + page_data = kmap(fw_priv->pages[page_nr]); + + memcpy(buffer, page_data + page_ofs, page_cnt); + + kunmap(fw_priv->pages[page_nr]); + buffer += page_cnt; + offset += page_cnt; + count -= page_cnt; + } out: mutex_unlock(&fw_lock); return ret_count; @@ -201,27 +247,39 @@ out: static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size) { - u8 *new_data; - int new_size = fw_priv->alloc_size; + int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT; + + /* If the array of pages is too small, grow it... */ + if (fw_priv->page_array_size < pages_needed) { + int new_array_size = max(pages_needed, + fw_priv->page_array_size * 2); + struct page **new_pages; + + new_pages = kmalloc(new_array_size * sizeof(void *), + GFP_KERNEL); + if (!new_pages) { + fw_load_abort(fw_priv); + return -ENOMEM; + } + memcpy(new_pages, fw_priv->pages, + fw_priv->page_array_size * sizeof(void *)); + memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) * + (new_array_size - fw_priv->page_array_size)); + kfree(fw_priv->pages); + fw_priv->pages = new_pages; + fw_priv->page_array_size = new_array_size; + } - if (min_size <= fw_priv->alloc_size) - return 0; + while (fw_priv->nr_pages < pages_needed) { + fw_priv->pages[fw_priv->nr_pages] = + alloc_page(GFP_KERNEL | __GFP_HIGHMEM); - new_size = ALIGN(min_size, PAGE_SIZE); - new_data = vmalloc(new_size); - if (!new_data) { - printk(KERN_ERR "%s: unable to alloc buffer\n", __func__); - /* Make sure that we don't keep incomplete data */ - fw_load_abort(fw_priv); - return -ENOMEM; - } - fw_priv->alloc_size = new_size; - if (fw_priv->fw->data) { - memcpy(new_data, fw_priv->fw->data, fw_priv->fw->size); - vfree(fw_priv->fw->data); + if (!fw_priv->pages[fw_priv->nr_pages]) { + fw_load_abort(fw_priv); + return -ENOMEM; + } + fw_priv->nr_pages++; } - fw_priv->fw->data = new_data; - BUG_ON(min_size > fw_priv->alloc_size); return 0; } @@ -258,10 +316,25 @@ firmware_data_write(struct kobject *kobj, struct bin_attribute *bin_attr, if (retval) goto out; - memcpy((u8 *)fw->data + offset, buffer, count); - - fw->size = max_t(size_t, offset + count, fw->size); retval = count; + + while (count) { + void *page_data; + int page_nr = offset >> PAGE_SHIFT; + int page_ofs = offset & (PAGE_SIZE - 1); + int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); + + page_data = kmap(fw_priv->pages[page_nr]); + + memcpy(page_data + page_ofs, buffer, page_cnt); + + kunmap(fw_priv->pages[page_nr]); + buffer += page_cnt; + offset += page_cnt; + count -= page_cnt; + } + + fw->size = max_t(size_t, offset, fw->size); out: mutex_unlock(&fw_lock); return retval; @@ -277,7 +350,11 @@ static struct bin_attribute firmware_attr_data_tmpl = { static void fw_dev_release(struct device *dev) { struct firmware_priv *fw_priv = dev_get_drvdata(dev); + int i; + for (i = 0; i < fw_priv->nr_pages; i++) + __free_page(fw_priv->pages[i]); + kfree(fw_priv->pages); kfree(fw_priv); kfree(dev); -- cgit v1.2.3 From fac733f029251a393c42a8313432f2d9fe43bb83 Mon Sep 17 00:00:00 2001 From: Jussi Kivilinna Date: Wed, 13 May 2009 11:54:38 +0300 Subject: HID: force feedback support for SmartJoy PLUS PS2/USB adapter This driver adds force feedback support for SmartJoy PLUS PS2/USB adapter. I made this driver one device spesific instead of making generic 'wisegroup-ff' because I have another Wisegroup PS2/USB adapter that doesn't work same way as SmartJoy PLUS. If another device that is compatible pops up, this driver could be then renamed to something more generic. Signed-off-by: Jussi Kivilinna Signed-off-by: Jiri Kosina --- drivers/hid/Kconfig | 15 +++++ drivers/hid/Makefile | 1 + drivers/hid/hid-core.c | 1 + drivers/hid/hid-ids.h | 1 + drivers/hid/hid-sjoy.c | 180 +++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 198 insertions(+) create mode 100644 drivers/hid/hid-sjoy.c (limited to 'drivers') diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 21edf407bbda..4cdf846da62c 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -256,6 +256,21 @@ config GREENASIA_FF (like MANTA Warrior MM816 and SpeedLink Strike2 SL-6635) or adapter and want to enable force feedback support for it. +config HID_SMARTJOYPLUS + tristate "SmartJoy PLUS PS2/USB adapter support" if EMBEDDED + depends on USB_HID + default !EMBEDDED + ---help--- + Support for SmartJoy PLUS PS2/USB adapter. + +config SMARTJOYPLUS_FF + bool "SmartJoy PLUS PS2/USB adapter force feedback support" + depends on HID_SMARTJOYPLUS + select INPUT_FF_MEMLESS + ---help--- + Say Y here if you have a SmartJoy PLUS PS2/USB adapter and want to + enable force feedback support for it. + config HID_TOPSEED tristate "TopSeed Cyberlink remote control support" if EMBEDDED depends on USB_HID diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index 39cebcfa898c..eddd5b633b63 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -34,6 +34,7 @@ obj-$(CONFIG_HID_NTRIG) += hid-ntrig.o obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o +obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o obj-$(CONFIG_HID_SONY) += hid-sony.o obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o obj-$(CONFIG_GREENASIA_FF) += hid-gaff.o diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 9f38ab874c93..f2c21d5d24e8 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1312,6 +1312,7 @@ static const struct hid_device_id hid_blacklist[] = { { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) }, { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, + { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) }, { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index b519692732db..630101037921 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -417,6 +417,7 @@ #define USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH 0x81 #define USB_VENDOR_ID_WISEGROUP 0x0925 +#define USB_DEVICE_ID_SMARTJOY_PLUS 0x0005 #define USB_DEVICE_ID_1_PHIDGETSERVO_20 0x8101 #define USB_DEVICE_ID_4_PHIDGETSERVO_20 0x8104 #define USB_DEVICE_ID_8_8_4_IF_KIT 0x8201 diff --git a/drivers/hid/hid-sjoy.c b/drivers/hid/hid-sjoy.c new file mode 100644 index 000000000000..eab169e5c371 --- /dev/null +++ b/drivers/hid/hid-sjoy.c @@ -0,0 +1,180 @@ +/* + * Force feedback support for SmartJoy PLUS PS2->USB adapter + * + * Copyright (c) 2009 Jussi Kivilinna + * + * Based of hid-pl.c and hid-gaff.c + * Copyright (c) 2007, 2009 Anssi Hannula + * Copyright (c) 2008 Lukasz Lubojanski + */ + +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* #define DEBUG */ + +#include +#include +#include +#include "hid-ids.h" + +#ifdef CONFIG_SMARTJOYPLUS_FF +#include "usbhid/usbhid.h" + +struct sjoyff_device { + struct hid_report *report; +}; + +static int hid_sjoyff_play(struct input_dev *dev, void *data, + struct ff_effect *effect) +{ + struct hid_device *hid = input_get_drvdata(dev); + struct sjoyff_device *sjoyff = data; + u32 left, right; + + left = effect->u.rumble.strong_magnitude; + right = effect->u.rumble.weak_magnitude; + dev_dbg(&dev->dev, "called with 0x%08x 0x%08x\n", left, right); + + left = left * 0xff / 0xffff; + right = (right != 0); /* on/off only */ + + sjoyff->report->field[0]->value[1] = right; + sjoyff->report->field[0]->value[2] = left; + dev_dbg(&dev->dev, "running with 0x%02x 0x%02x\n", left, right); + usbhid_submit_report(hid, sjoyff->report, USB_DIR_OUT); + + return 0; +} + +static int sjoyff_init(struct hid_device *hid) +{ + struct sjoyff_device *sjoyff; + struct hid_report *report; + struct hid_input *hidinput = list_entry(hid->inputs.next, + struct hid_input, list); + struct list_head *report_list = + &hid->report_enum[HID_OUTPUT_REPORT].report_list; + struct list_head *report_ptr = report_list; + struct input_dev *dev; + int error; + + if (list_empty(report_list)) { + dev_err(&hid->dev, "no output reports found\n"); + return -ENODEV; + } + + report_ptr = report_ptr->next; + + if (report_ptr == report_list) { + dev_err(&hid->dev, "required output report is " + "missing\n"); + return -ENODEV; + } + + report = list_entry(report_ptr, struct hid_report, list); + if (report->maxfield < 1) { + dev_err(&hid->dev, "no fields in the report\n"); + return -ENODEV; + } + + if (report->field[0]->report_count < 3) { + dev_err(&hid->dev, "not enough values in the field\n"); + return -ENODEV; + } + + sjoyff = kzalloc(sizeof(struct sjoyff_device), GFP_KERNEL); + if (!sjoyff) + return -ENOMEM; + + dev = hidinput->input; + + set_bit(FF_RUMBLE, dev->ffbit); + + error = input_ff_create_memless(dev, sjoyff, hid_sjoyff_play); + if (error) { + kfree(sjoyff); + return error; + } + + sjoyff->report = report; + sjoyff->report->field[0]->value[0] = 0x01; + sjoyff->report->field[0]->value[1] = 0x00; + sjoyff->report->field[0]->value[2] = 0x00; + usbhid_submit_report(hid, sjoyff->report, USB_DIR_OUT); + + dev_info(&hid->dev, + "Force feedback for SmartJoy PLUS PS2/USB adapter\n"); + + return 0; +} +#else +static inline int sjoyff_init(struct hid_device *hid) +{ + return 0; +} +#endif + +static int sjoy_probe(struct hid_device *hdev, const struct hid_device_id *id) +{ + int ret; + + ret = hid_parse(hdev); + if (ret) { + dev_err(&hdev->dev, "parse failed\n"); + goto err; + } + + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); + if (ret) { + dev_err(&hdev->dev, "hw start failed\n"); + goto err; + } + + sjoyff_init(hdev); + + return 0; +err: + return ret; +} + +static const struct hid_device_id sjoy_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, + { } +}; +MODULE_DEVICE_TABLE(hid, sjoy_devices); + +static struct hid_driver sjoy_driver = { + .name = "smartjoyplus", + .id_table = sjoy_devices, + .probe = sjoy_probe, +}; + +static int sjoy_init(void) +{ + return hid_register_driver(&sjoy_driver); +} + +static void sjoy_exit(void) +{ + hid_unregister_driver(&sjoy_driver); +} + +module_init(sjoy_init); +module_exit(sjoy_exit); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jussi Kivilinna"); + -- cgit v1.2.3 From 0f6f4319a72a2b32d19643ff811f25633d8b0207 Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Fri, 15 May 2009 15:46:44 +0200 Subject: HID: fix hid-ff drivers so that devices work even without ff support Currently, the hid-*ff force feedback drivers, which claim the blacklisted device on a HID bus, are only compiled in if the user selects force feedback support. However we want the device to be supported even when the kernel is configured without force feedback. This patch fixes the drivers in a way that they get compiled even if force feedback is turned off; all the force feedback support code is compiled out in such case, and the driver works as a usual driver on HID bus, claiming and initializing the device, making it operational without FF effects. Reported-by: Jussi Kivilinna Signed-off-by: Jiri Kosina --- drivers/hid/Kconfig | 70 +++++++++++++++++++++++++++++++++++++++----------- drivers/hid/Makefile | 8 +++--- drivers/hid/hid-drff.c | 8 ++++++ drivers/hid/hid-gaff.c | 8 ++++++ drivers/hid/hid-tmff.c | 17 ++++++++---- drivers/hid/hid-zpff.c | 7 +++++ 6 files changed, 94 insertions(+), 24 deletions(-) (limited to 'drivers') diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 4cdf846da62c..7831a0318d3c 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -116,9 +116,16 @@ config HID_CYPRESS ---help--- Support for cypress mouse and barcode readers. -config DRAGONRISE_FF - tristate "DragonRise Inc. force feedback support" +config HID_DRAGONRISE + tristate "DragonRise Inc. support" if EMBEDDED depends on USB_HID + default !EMBEDDED + ---help--- + Say Y here if you have DragonRise Inc.game controllers. + +config DRAGONRISE_FF + bool "DragonRise Inc. force feedback support" + depends on HID_DRAGONRISE select INPUT_FF_MEMLESS ---help--- Say Y here if you want to enable force feedback support for DragonRise Inc. @@ -160,7 +167,7 @@ config HID_LOGITECH Support for Logitech devices that are not fully compliant with HID standard. config LOGITECH_FF - bool "Logitech force feedback" + bool "Logitech force feedback support" depends on HID_LOGITECH select INPUT_FF_MEMLESS help @@ -176,7 +183,7 @@ config LOGITECH_FF force feedback. config LOGIRUMBLEPAD2_FF - bool "Logitech Rumblepad 2 force feedback" + bool "Logitech Rumblepad 2 force feedback support" depends on HID_LOGITECH select INPUT_FF_MEMLESS help @@ -211,11 +218,19 @@ config HID_PANTHERLORD ---help--- Support for PantherLord/GreenAsia based device support. +config HID_PANTHERLORD + tristate "Pantherlord support" if EMBEDDED + depends on USB_HID + default !EMBEDDED + ---help--- + Say Y here if you have a PantherLord/GreenAsia based game controller + or adapter. + config PANTHERLORD_FF bool "Pantherlord force feedback support" depends on HID_PANTHERLORD select INPUT_FF_MEMLESS - help + ---help--- Say Y here if you have a PantherLord/GreenAsia based game controller or adapter and want to enable force feedback support for it. @@ -247,9 +262,17 @@ config HID_SUNPLUS ---help--- Support for Sunplus wireless desktop. -config GREENASIA_FF - tristate "GreenAsia (Product ID 0x12) force feedback support" +config HID_GREENASIA + tristate "GreenAsia (Product ID 0x12) support" if EMBEDDED depends on USB_HID + default !EMBEDDED + ---help--- + Say Y here if you have a GreenAsia (Product ID 0x12) based game + controller or adapter. + +config GREENASIA_FF + bool "GreenAsia (Product ID 0x12) force feedback support" + depends on HID_GREENASIA select INPUT_FF_MEMLESS ---help--- Say Y here if you have a GreenAsia (Product ID 0x12) based game controller @@ -278,13 +301,22 @@ config HID_TOPSEED ---help--- Say Y if you have a TopSeed Cyberlink remote control. -config THRUSTMASTER_FF - tristate "ThrustMaster devices support" +config HID_THRUSTMASTER + tristate "ThrustMaster devices support" if EMBEDDED depends on USB_HID + default !EMBEDDED + ---help--- + Say Y here if you have a THRUSTMASTER FireStore Dual Power 2 or + a THRUSTMASTER Ferrari GT Rumble Wheel. + +config THRUSTMASTER_FF + bool "ThrustMaster devices force feedback support" + depends on HID_THRUSTMASTER select INPUT_FF_MEMLESS - help + ---help--- Say Y here if you have a THRUSTMASTER FireStore Dual Power 2 or - a THRUSTMASTER Ferrari GT Rumble Force or Force Feedback Wheel. + a THRUSTMASTER Ferrari GT Rumble Force or Force Feedback Wheel and + want to enable force feedback support for it. config HID_WACOM tristate "Wacom Bluetooth devices support" if EMBEDDED @@ -293,13 +325,21 @@ config HID_WACOM ---help--- Support for Wacom Graphire Bluetooth tablet. -config ZEROPLUS_FF - tristate "Zeroplus based game controller support" +config HID_ZEROPLUS + tristate "Zeroplus based game controller support" if EMBEDDED depends on USB_HID - select INPUT_FF_MEMLESS - help + default !EMBEDDED + ---help--- Say Y here if you have a Zeroplus based game controller. +config ZEROPLUS_FF + bool "Zeroplus based game controller force feedback support" + depends on HID_ZEROPLUS + select INPUT_FF_MEMLESS + ---help--- + Say Y here if you have a Zeroplus based game controller and want + to have force feedback support for it. + endmenu endif # HID_SUPPORT diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index eddd5b633b63..db35151673b1 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -22,7 +22,7 @@ obj-$(CONFIG_HID_BELKIN) += hid-belkin.o obj-$(CONFIG_HID_CHERRY) += hid-cherry.o obj-$(CONFIG_HID_CHICONY) += hid-chicony.o obj-$(CONFIG_HID_CYPRESS) += hid-cypress.o -obj-$(CONFIG_DRAGONRISE_FF) += hid-drff.o +obj-$(CONFIG_HID_DRAGONRISE) += hid-drff.o obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o obj-$(CONFIG_HID_GYRATION) += hid-gyration.o obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o @@ -37,10 +37,10 @@ obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o obj-$(CONFIG_HID_SONY) += hid-sony.o obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o -obj-$(CONFIG_GREENASIA_FF) += hid-gaff.o -obj-$(CONFIG_THRUSTMASTER_FF) += hid-tmff.o +obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o +obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o -obj-$(CONFIG_ZEROPLUS_FF) += hid-zpff.o +obj-$(CONFIG_HID_ZEROPLUS) += hid-zpff.o obj-$(CONFIG_HID_WACOM) += hid-wacom.o obj-$(CONFIG_USB_HID) += usbhid/ diff --git a/drivers/hid/hid-drff.c b/drivers/hid/hid-drff.c index 34f3eb65100b..a239d20ad7a5 100644 --- a/drivers/hid/hid-drff.c +++ b/drivers/hid/hid-drff.c @@ -32,6 +32,8 @@ #include #include "hid-ids.h" + +#ifdef CONFIG_DRAGONRISE_FF #include "usbhid/usbhid.h" struct drff_device { @@ -135,6 +137,12 @@ static int drff_init(struct hid_device *hid) return 0; } +#else +static inline int drff_init(struct hid_device *hid) +{ + return 0; +} +#endif static int dr_probe(struct hid_device *hdev, const struct hid_device_id *id) { diff --git a/drivers/hid/hid-gaff.c b/drivers/hid/hid-gaff.c index 510ad3ab8d33..8a11ccddaf2e 100644 --- a/drivers/hid/hid-gaff.c +++ b/drivers/hid/hid-gaff.c @@ -31,6 +31,8 @@ #include #include #include "hid-ids.h" + +#ifdef CONFIG_GREENASIA_FF #include "usbhid/usbhid.h" struct gaff_device { @@ -130,6 +132,12 @@ static int gaff_init(struct hid_device *hid) return 0; } +#else +static inline int gaff_init(struct hid_device *hdev) +{ + return 0; +} +#endif static int ga_probe(struct hid_device *hdev, const struct hid_device_id *id) { diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c index 7c1f7b50330c..fcd6ccd02fee 100644 --- a/drivers/hid/hid-tmff.c +++ b/drivers/hid/hid-tmff.c @@ -33,11 +33,6 @@ #include "hid-ids.h" -#include "usbhid/usbhid.h" - -/* Usages for thrustmaster devices I know about */ -#define THRUSTMASTER_USAGE_FF (HID_UP_GENDESK | 0xbb) - static const signed short ff_rumble[] = { FF_RUMBLE, -1 @@ -48,6 +43,12 @@ static const signed short ff_joystick[] = { -1 }; +#ifdef CONFIG_THRUSTMASTER_FF +#include "usbhid/usbhid.h" + +/* Usages for thrustmaster devices I know about */ +#define THRUSTMASTER_USAGE_FF (HID_UP_GENDESK | 0xbb) + struct tmff_device { struct hid_report *report; struct hid_field *ff_field; @@ -209,6 +210,12 @@ fail: kfree(tmff); return error; } +#else +static inline int tmff_init(struct hid_device *hid, const signed short *ff_bits) +{ + return 0; +} +#endif static int tm_probe(struct hid_device *hdev, const struct hid_device_id *id) { diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c index 85a198a18537..57f710757bf4 100644 --- a/drivers/hid/hid-zpff.c +++ b/drivers/hid/hid-zpff.c @@ -27,6 +27,7 @@ #include "hid-ids.h" +#ifdef CONFIG_ZEROPLUS_FF #include "usbhid/usbhid.h" struct zpff_device { @@ -108,6 +109,12 @@ static int zpff_init(struct hid_device *hid) return 0; } +#else +static inline int zpff_init(struct hid_device *hid) +{ + return 0; +} +#endif static int zp_probe(struct hid_device *hdev, const struct hid_device_id *id) { -- cgit v1.2.3 From f0bca459829fd33d659c8cd0369ac86a3924a9bc Mon Sep 17 00:00:00 2001 From: Sergey Belyashov Date: Fri, 15 May 2009 16:05:57 +0200 Subject: HID: autocentering support for Logitech Force 3D Pro This patch adds autocentering support for Logitech Force 3D Pro. Signed-off-by: Sergey Belyashov Signed-off-by: Jiri Kosina --- drivers/hid/hid-lgff.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c index 9735be6ee4ff..56099709581c 100644 --- a/drivers/hid/hid-lgff.c +++ b/drivers/hid/hid-lgff.c @@ -50,6 +50,12 @@ static const signed short ff_joystick[] = { -1 }; +static const signed short ff_joystick_ac[] = { + FF_CONSTANT, + FF_AUTOCENTER, + -1 +}; + static const signed short ff_wheel[] = { FF_CONSTANT, FF_AUTOCENTER, @@ -60,7 +66,7 @@ static const struct dev_type devices[] = { { 0x046d, 0xc211, ff_rumble }, { 0x046d, 0xc219, ff_rumble }, { 0x046d, 0xc283, ff_joystick }, - { 0x046d, 0xc286, ff_joystick }, + { 0x046d, 0xc286, ff_joystick_ac }, { 0x046d, 0xc294, ff_wheel }, { 0x046d, 0xc295, ff_joystick }, { 0x046d, 0xca03, ff_wheel }, -- cgit v1.2.3 From 28e43a519b9edb8277fc6b490ad17aa38c45a02b Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Fri, 15 May 2009 10:16:45 -0700 Subject: RDMA/nes: Fix off-by-one bugs in reset_adapter_ne020() and init_serdes() With a postfix increment, i is incremented one past 10K/5K before the loop ends, so the error messages will be displayed too soon if the test succeeds on the last iteration. Fix the comparisons to be > instead of >=. Signed-off-by: Roel Kluin Signed-off-by: Roland Dreier --- drivers/infiniband/hw/nes/nes_hw.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index b832a7b814a2..4a84d02ece06 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -667,7 +667,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_ i = 0; while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000) mdelay(1); - if (i >= 10000) { + if (i > 10000) { nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n"); return 0; } @@ -675,7 +675,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_ i = 0; while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000) mdelay(1); - if (i >= 10000) { + if (i > 10000) { printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n", nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS)); return 0; @@ -701,7 +701,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_ i = 0; while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000) mdelay(1); - if (i >= 10000) { + if (i > 10000) { nes_debug(NES_DBG_INIT, "Did not see port soft reset done.\n"); return 0; } @@ -711,7 +711,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_ while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0) & 0x0000000f)) != 0x0000000f) && i++ < 5000) mdelay(1); - if (i >= 5000) { + if (i > 5000) { nes_debug(NES_DBG_INIT, "Serdes 0 not ready, status=%x\n", u32temp); return 0; } @@ -722,7 +722,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_ while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1) & 0x0000000f)) != 0x0000000f) && i++ < 5000) mdelay(1); - if (i >= 5000) { + if (i > 5000) { nes_debug(NES_DBG_INIT, "Serdes 1 not ready, status=%x\n", u32temp); return 0; } @@ -792,7 +792,7 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0) & 0x0000000f)) != 0x0000000f) && i++ < 5000) mdelay(1); - if (i >= 5000) { + if (i > 5000) { nes_debug(NES_DBG_PHY, "Init: serdes 0 not ready, status=%x\n", u32temp); return 1; } @@ -815,7 +815,7 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1) & 0x0000000f)) != 0x0000000f) && (i++ < 5000)) mdelay(1); - if (i >= 5000) { + if (i > 5000) { printk("%s: Init: serdes 1 not ready, status=%x\n", __func__, u32temp); /* return 1; */ } -- cgit v1.2.3 From 2eec8c318b9bbfe9e0f2a889b4ad3f4b4e5ba429 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Fri, 20 Mar 2009 20:27:37 +0100 Subject: mx3fb: Issue prettier log message Without this patch we end up with a log message like "mx3_sdc_fb mx3_sdc_fb: mx3fb: fb registered". That's two fb too much for my taste. Signed-off-by: Sascha Hauer --- drivers/video/mx3fb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c index 7a168ba65c64..b7af5256e887 100644 --- a/drivers/video/mx3fb.c +++ b/drivers/video/mx3fb.c @@ -1366,7 +1366,7 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan) mx3fb_blank(FB_BLANK_UNBLANK, fbi); - dev_info(dev, "mx3fb: fb registered, using mode %s\n", fb_mode); + dev_info(dev, "registered, using mode %s\n", fb_mode); ret = register_framebuffer(fbi); if (ret < 0) -- cgit v1.2.3 From 6029336426a2b43e4bc6f4a84be8789a047d139e Mon Sep 17 00:00:00 2001 From: Joao Ramos Date: Sun, 17 May 2009 17:22:54 +0200 Subject: ide: try to use PIO Mode 0 during probe if possible Initially set PIO Mode 0 for all host drivers that have a 'set_pio_mode' method before the IDE core figures out the most suited PIO mode for the attached device. Signed-off-by: Joao Ramos Cc: Sergei Shtylyov Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/ide-probe.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers') diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 7f264ed1141b..b609a581df44 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -1032,6 +1032,15 @@ static void ide_port_init_devices(ide_hwif_t *hwif) if (port_ops && port_ops->init_dev) port_ops->init_dev(drive); } + + ide_port_for_each_dev(i, drive, hwif) { + /* + * default to PIO Mode 0 before we figure out + * the most suited mode for the attached device + */ + if (port_ops && port_ops->set_pio_mode) + port_ops->set_pio_mode(drive, 0); + } } static void ide_init_port(ide_hwif_t *hwif, unsigned int port, -- cgit v1.2.3 From 5f582c8e2193e3848039de87e6a3ace7cbc7ed88 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Sun, 17 May 2009 19:12:18 +0200 Subject: ide: BUG() on unknown flags in ide_disk_special() Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/ide-io.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index a0309ea661ac..ef806ab88fb4 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -201,12 +201,8 @@ static ide_startstop_t ide_disk_special(ide_drive_t *drive) } else if (s->b.set_multmode) { s->b.set_multmode = 0; ide_tf_set_setmult_cmd(drive, &cmd.tf); - } else if (s->all) { - int special = s->all; - s->all = 0; - printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special); - return ide_stopped; - } + } else + BUG(); cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; -- cgit v1.2.3 From 582078ee3d7dacd74a7b3fe02ea258cadf32b602 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Sun, 17 May 2009 19:12:18 +0200 Subject: ide: merge ide_disk_special() into do_special() (v2) While at it: - change debug printk() level to KERN_DEBUG and use __func__ - update documentation v2: - fix DEBUG build (noticed by Sergei) There should be no functional changes caused by this patch. Cc: Sergei Shtylyov Cc: Joe Perches Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/ide-io.c | 44 ++++++++++++++++++-------------------------- 1 file changed, 18 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index ef806ab88fb4..18557683ed5a 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -184,11 +184,28 @@ static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf) tf->command = ATA_CMD_SET_MULTI; } -static ide_startstop_t ide_disk_special(ide_drive_t *drive) +/** + * do_special - issue some special commands + * @drive: drive the command is for + * + * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS, + * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive. + */ + +static ide_startstop_t do_special(ide_drive_t *drive) { special_t *s = &drive->special; struct ide_cmd cmd; +#ifdef DEBUG + printk(KERN_DEBUG "%s: %s: 0x%02x\n", drive->name, __func__, s->all); +#endif + if (drive->media != ide_disk) { + s->all = 0; + drive->mult_req = 0; + return ide_stopped; + } + memset(&cmd, 0, sizeof(cmd)); cmd.protocol = ATA_PROT_NODATA; @@ -213,31 +230,6 @@ static ide_startstop_t ide_disk_special(ide_drive_t *drive) return ide_started; } -/** - * do_special - issue some special commands - * @drive: drive the command is for - * - * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS, - * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive. - * - * It used to do much more, but has been scaled back. - */ - -static ide_startstop_t do_special (ide_drive_t *drive) -{ - special_t *s = &drive->special; - -#ifdef DEBUG - printk("%s: do_special: 0x%02x\n", drive->name, s->all); -#endif - if (drive->media == ide_disk) - return ide_disk_special(drive); - - s->all = 0; - drive->mult_req = 0; - return ide_stopped; -} - void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd) { ide_hwif_t *hwif = drive->hwif; -- cgit v1.2.3 From ca1b96e00ab5d1b0838965834469a0284c81a517 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Sun, 17 May 2009 19:12:21 +0200 Subject: ide: replace special_t typedef by IDE_SFLAG_* flags Replace: - special_t typedef by IDE_SFLAG_* flags - 'special_t special' ide_drive_t's field by 'u8 special_flags' one There should be no functional changes caused by this patch. Acked-by: Sergei Shtylyov Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/ide-disk.c | 4 ++-- drivers/ide/ide-eh.c | 9 ++++----- drivers/ide/ide-io.c | 21 +++++++++++---------- drivers/ide/ide-probe.c | 6 +++--- drivers/ide/ide-taskfile.c | 2 +- drivers/ide/siimage.c | 4 ++-- 6 files changed, 23 insertions(+), 23 deletions(-) (limited to 'drivers') diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index c2438804d3c4..d345f5f23f01 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -428,14 +428,14 @@ static int set_multcount(ide_drive_t *drive, int arg) if (arg < 0 || arg > (drive->id[ATA_ID_MAX_MULTSECT] & 0xff)) return -EINVAL; - if (drive->special.b.set_multmode) + if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) return -EBUSY; rq = blk_get_request(drive->queue, READ, __GFP_WAIT); rq->cmd_type = REQ_TYPE_ATA_TASKFILE; drive->mult_req = arg; - drive->special.b.set_multmode = 1; + drive->special_flags |= IDE_SFLAG_SET_MULTMODE; error = blk_execute_rq(drive->queue, NULL, rq, 0); blk_put_request(rq); diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c index 5d5fb961b5ce..39d589254d41 100644 --- a/drivers/ide/ide-eh.c +++ b/drivers/ide/ide-eh.c @@ -52,7 +52,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, } if ((rq->errors & ERROR_RECAL) == ERROR_RECAL) - drive->special.b.recalibrate = 1; + drive->special_flags |= IDE_SFLAG_RECALIBRATE; ++rq->errors; @@ -268,9 +268,8 @@ static void ide_disk_pre_reset(ide_drive_t *drive) { int legacy = (drive->id[ATA_ID_CFS_ENABLE_2] & 0x0400) ? 0 : 1; - drive->special.all = 0; - drive->special.b.set_geometry = legacy; - drive->special.b.recalibrate = legacy; + drive->special_flags = + legacy ? (IDE_SFLAG_SET_GEOMETRY | IDE_SFLAG_RECALIBRATE) : 0; drive->mult_count = 0; drive->dev_flags &= ~IDE_DFLAG_PARKED; @@ -280,7 +279,7 @@ static void ide_disk_pre_reset(ide_drive_t *drive) drive->mult_req = 0; if (drive->mult_req != drive->mult_count) - drive->special.b.set_multmode = 1; + drive->special_flags |= IDE_SFLAG_SET_MULTMODE; } static void pre_reset(ide_drive_t *drive) diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 18557683ed5a..644d7b4454a6 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -194,14 +194,14 @@ static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf) static ide_startstop_t do_special(ide_drive_t *drive) { - special_t *s = &drive->special; struct ide_cmd cmd; #ifdef DEBUG - printk(KERN_DEBUG "%s: %s: 0x%02x\n", drive->name, __func__, s->all); + printk(KERN_DEBUG "%s: %s: 0x%02x\n", drive->name, __func__, + drive->special_flags); #endif if (drive->media != ide_disk) { - s->all = 0; + drive->special_flags = 0; drive->mult_req = 0; return ide_stopped; } @@ -209,14 +209,14 @@ static ide_startstop_t do_special(ide_drive_t *drive) memset(&cmd, 0, sizeof(cmd)); cmd.protocol = ATA_PROT_NODATA; - if (s->b.set_geometry) { - s->b.set_geometry = 0; + if (drive->special_flags & IDE_SFLAG_SET_GEOMETRY) { + drive->special_flags &= ~IDE_SFLAG_SET_GEOMETRY; ide_tf_set_specify_cmd(drive, &cmd.tf); - } else if (s->b.recalibrate) { - s->b.recalibrate = 0; + } else if (drive->special_flags & IDE_SFLAG_RECALIBRATE) { + drive->special_flags &= ~IDE_SFLAG_RECALIBRATE; ide_tf_set_restore_cmd(drive, &cmd.tf); - } else if (s->b.set_multmode) { - s->b.set_multmode = 0; + } else if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) { + drive->special_flags &= ~IDE_SFLAG_SET_MULTMODE; ide_tf_set_setmult_cmd(drive, &cmd.tf); } else BUG(); @@ -339,7 +339,8 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) printk(KERN_ERR "%s: drive not ready for command\n", drive->name); return startstop; } - if (!drive->special.all) { + + if (drive->special_flags == 0) { struct ide_driver *drv; /* diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index b609a581df44..727a67109ff0 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -97,7 +97,7 @@ static void ide_disk_init_mult_count(ide_drive_t *drive) drive->mult_req = id[ATA_ID_MULTSECT] & 0xff; if (drive->mult_req) - drive->special.b.set_multmode = 1; + drive->special_flags |= IDE_SFLAG_SET_MULTMODE; } } @@ -1138,8 +1138,8 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif) drive->hwif = hwif; drive->ready_stat = ATA_DRDY; drive->bad_wstat = BAD_W_STAT; - drive->special.b.recalibrate = 1; - drive->special.b.set_geometry = 1; + drive->special_flags = IDE_SFLAG_RECALIBRATE | + IDE_SFLAG_SET_GEOMETRY; drive->name[0] = 'h'; drive->name[1] = 'd'; drive->name[2] = 'a' + j; diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index f400eb4d4aff..8cab3c26acda 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c @@ -166,7 +166,7 @@ static ide_startstop_t task_no_data_intr(ide_drive_t *drive) if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) { if (custom && tf->command == ATA_CMD_SET_MULTI) { drive->mult_req = drive->mult_count = 0; - drive->special.b.recalibrate = 1; + drive->special_flags |= IDE_SFLAG_RECALIBRATE; (void)ide_dump_status(drive, __func__, stat); return ide_stopped; } else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) { diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c index e4973cd1fba9..bd82d228608c 100644 --- a/drivers/ide/siimage.c +++ b/drivers/ide/siimage.c @@ -451,8 +451,8 @@ static int sil_sata_reset_poll(ide_drive_t *drive) static void sil_sata_pre_reset(ide_drive_t *drive) { if (drive->media == ide_disk) { - drive->special.b.set_geometry = 0; - drive->special.b.recalibrate = 0; + drive->special_flags &= + ~(IDE_SFLAG_SET_GEOMETRY | IDE_SFLAG_RECALIBRATE); } } -- cgit v1.2.3 From 29e52cf793ded6bece50de50e738596f94f07d9f Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Sun, 17 May 2009 19:12:22 +0200 Subject: ide: remove chipset field from hw_regs_t * Convert host drivers that still use hw_regs_t's chipset field to use the one in struct ide_port_info instead. * Move special handling of ide_pci chipset type from ide_hw_configure() to ide_init_port(). * Remove chipset field from hw_regs_t. While at it: - remove stale comment in delkin_cb.c There should be no functional changes caused by this patch. Acked-by: Sergei Shtylyov Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/at91_ide.c | 2 +- drivers/ide/au1xxx-ide.c | 2 +- drivers/ide/buddha.c | 3 +-- drivers/ide/cmd640.c | 2 -- drivers/ide/delkin_cb.c | 2 +- drivers/ide/falconide.c | 3 +-- drivers/ide/gayle.c | 3 +-- drivers/ide/icside.c | 3 ++- drivers/ide/ide-4drives.c | 2 +- drivers/ide/ide-cs.c | 2 +- drivers/ide/ide-generic.c | 3 +-- drivers/ide/ide-h8300.c | 2 +- drivers/ide/ide-legacy.c | 1 - drivers/ide/ide-pnp.c | 2 +- drivers/ide/ide-probe.c | 4 +--- drivers/ide/ide_platform.c | 3 +-- drivers/ide/macide.c | 3 +-- drivers/ide/palm_bk3710.c | 2 +- drivers/ide/q40ide.c | 3 +-- drivers/ide/rapide.c | 2 +- drivers/ide/scc_pata.c | 2 +- drivers/ide/setup-pci.c | 1 - drivers/ide/sgiioc4.c | 1 - 23 files changed, 20 insertions(+), 33 deletions(-) (limited to 'drivers') diff --git a/drivers/ide/at91_ide.c b/drivers/ide/at91_ide.c index 403d0e4265db..8d39cc9bdf92 100644 --- a/drivers/ide/at91_ide.c +++ b/drivers/ide/at91_ide.c @@ -216,6 +216,7 @@ static const struct ide_port_info at91_ide_port_info __initdata = { .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA | IDE_HFLAG_SINGLE | IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_UNMASK_IRQS, .pio_mask = ATA_PIO6, + .chipset = ide_generic, }; /* @@ -304,7 +305,6 @@ static int __init at91_ide_probe(struct platform_device *pdev) ide_std_init_ports(&hw, tf_base, ctl_base + 6); hw.irq = board->irq_pin; - hw.chipset = ide_generic; hw.dev = &pdev->dev; host = ide_host_alloc(&at91_ide_port_info, hws); diff --git a/drivers/ide/au1xxx-ide.c b/drivers/ide/au1xxx-ide.c index 46013644c965..9b31f830e2f5 100644 --- a/drivers/ide/au1xxx-ide.c +++ b/drivers/ide/au1xxx-ide.c @@ -499,6 +499,7 @@ static const struct ide_port_info au1xxx_port_info = { #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA .mwdma_mask = ATA_MWDMA2, #endif + .chipset = ide_au1xxx, }; static int au_ide_probe(struct platform_device *dev) @@ -548,7 +549,6 @@ static int au_ide_probe(struct platform_device *dev) auide_setup_ports(&hw, ahwif); hw.irq = ahwif->irq; hw.dev = &dev->dev; - hw.chipset = ide_au1xxx; ret = ide_host_add(&au1xxx_port_info, hws, &host); if (ret) diff --git a/drivers/ide/buddha.c b/drivers/ide/buddha.c index d028f8864bc1..9aa2cd9be310 100644 --- a/drivers/ide/buddha.c +++ b/drivers/ide/buddha.c @@ -139,13 +139,12 @@ static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base, hw->irq = IRQ_AMIGA_PORTS; hw->ack_intr = ack_intr; - - hw->chipset = ide_generic; } static const struct ide_port_info buddha_port_info = { .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, .irq_flags = IRQF_SHARED, + .chipset = ide_generic, }; /* diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c index 8890276fef7f..e862a2503ab0 100644 --- a/drivers/ide/cmd640.c +++ b/drivers/ide/cmd640.c @@ -762,11 +762,9 @@ static int __init cmd640x_init(void) ide_std_init_ports(&hw[0], 0x1f0, 0x3f6); hw[0].irq = 14; - hw[0].chipset = ide_cmd640; ide_std_init_ports(&hw[1], 0x170, 0x376); hw[1].irq = 15; - hw[1].chipset = ide_cmd640; printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x" "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr); diff --git a/drivers/ide/delkin_cb.c b/drivers/ide/delkin_cb.c index f153b95619bb..a0de834a81c3 100644 --- a/drivers/ide/delkin_cb.c +++ b/drivers/ide/delkin_cb.c @@ -68,6 +68,7 @@ static const struct ide_port_info delkin_cb_port_info = { IDE_HFLAG_NO_DMA, .irq_flags = IRQF_SHARED, .init_chipset = delkin_cb_init_chipset, + .chipset = ide_pci, }; static int __devinit @@ -97,7 +98,6 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id) ide_std_init_ports(&hw, base + 0x10, base + 0x1e); hw.irq = dev->irq; hw.dev = &dev->dev; - hw.chipset = ide_pci; /* this enables IRQ sharing */ rc = ide_host_add(&delkin_cb_port_info, hws, &host); if (rc) diff --git a/drivers/ide/falconide.c b/drivers/ide/falconide.c index 0e2df6755ec9..770cfa67bdc8 100644 --- a/drivers/ide/falconide.c +++ b/drivers/ide/falconide.c @@ -111,6 +111,7 @@ static const struct ide_port_info falconide_port_info = { .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA, .irq_flags = IRQF_SHARED, + .chipset = ide_generic, }; static void __init falconide_setup_ports(hw_regs_t *hw) @@ -128,8 +129,6 @@ static void __init falconide_setup_ports(hw_regs_t *hw) hw->irq = IRQ_MFP_IDE; hw->ack_intr = NULL; - - hw->chipset = ide_generic; } /* diff --git a/drivers/ide/gayle.c b/drivers/ide/gayle.c index c7119516c5a7..71db2f9c3361 100644 --- a/drivers/ide/gayle.c +++ b/drivers/ide/gayle.c @@ -106,14 +106,13 @@ static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base, hw->irq = IRQ_AMIGA_PORTS; hw->ack_intr = ack_intr; - - hw->chipset = ide_generic; } static const struct ide_port_info gayle_port_info = { .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA, .irq_flags = IRQF_SHARED, + .chipset = ide_generic, }; /* diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c index 36da913cc553..6352a44ed179 100644 --- a/drivers/ide/icside.c +++ b/drivers/ide/icside.c @@ -398,11 +398,11 @@ static void icside_setup_ports(hw_regs_t *hw, void __iomem *base, hw->irq = ec->irq; hw->dev = &ec->dev; - hw->chipset = ide_acorn; } static const struct ide_port_info icside_v5_port_info = { .host_flags = IDE_HFLAG_NO_DMA, + .chipset = ide_acorn, }; static int __devinit @@ -457,6 +457,7 @@ static const struct ide_port_info icside_v6_port_info __initdata = { .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, .mwdma_mask = ATA_MWDMA2, .swdma_mask = ATA_SWDMA2, + .chipset = ide_acorn, }; static int __devinit diff --git a/drivers/ide/ide-4drives.c b/drivers/ide/ide-4drives.c index 78aca75a2c48..617ca7a5ec8a 100644 --- a/drivers/ide/ide-4drives.c +++ b/drivers/ide/ide-4drives.c @@ -25,6 +25,7 @@ static const struct ide_port_info ide_4drives_port_info = { .port_ops = &ide_4drives_port_ops, .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA | IDE_HFLAG_4DRIVES, + .chipset = ide_4drives, }; static int __init ide_4drives_init(void) @@ -52,7 +53,6 @@ static int __init ide_4drives_init(void) ide_std_init_ports(&hw, base, ctl); hw.irq = 14; - hw.chipset = ide_4drives; return ide_host_add(&ide_4drives_port_info, hws, NULL); } diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c index 9e47f3529d55..43d09dcae28c 100644 --- a/drivers/ide/ide-cs.c +++ b/drivers/ide/ide-cs.c @@ -155,6 +155,7 @@ static const struct ide_port_info idecs_port_info = { .port_ops = &idecs_port_ops, .host_flags = IDE_HFLAG_NO_DMA, .irq_flags = IRQF_SHARED, + .chipset = ide_pci, }; static struct ide_host *idecs_register(unsigned long io, unsigned long ctl, @@ -181,7 +182,6 @@ static struct ide_host *idecs_register(unsigned long io, unsigned long ctl, memset(&hw, 0, sizeof(hw)); ide_std_init_ports(&hw, io, ctl); hw.irq = irq; - hw.chipset = ide_pci; hw.dev = &handle->dev; rc = ide_host_add(&idecs_port_info, hws, &host); diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c index 7812ca0be13b..0427759d0187 100644 --- a/drivers/ide/ide-generic.c +++ b/drivers/ide/ide-generic.c @@ -29,6 +29,7 @@ MODULE_PARM_DESC(probe_mask, "probe mask for legacy ISA IDE ports"); static const struct ide_port_info ide_generic_port_info = { .host_flags = IDE_HFLAG_NO_DMA, + .chipset = ide_generic, }; #ifdef CONFIG_ARM @@ -132,8 +133,6 @@ static int __init ide_generic_init(void) #else hw.irq = legacy_irqs[i]; #endif - hw.chipset = ide_generic; - rc = ide_host_add(&ide_generic_port_info, hws, NULL); if (rc) { release_region(io_addr + 0x206, 1); diff --git a/drivers/ide/ide-h8300.c b/drivers/ide/ide-h8300.c index c06ebdc4a130..40eff6c9759c 100644 --- a/drivers/ide/ide-h8300.c +++ b/drivers/ide/ide-h8300.c @@ -73,12 +73,12 @@ static inline void hw_setup(hw_regs_t *hw) hw->io_ports_array[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i; hw->io_ports.ctl_addr = CONFIG_H8300_IDE_ALT; hw->irq = EXT_IRQ0 + CONFIG_H8300_IDE_IRQ; - hw->chipset = ide_generic; } static const struct ide_port_info h8300_port_info = { .tp_ops = &h8300_tp_ops, .host_flags = IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_NO_DMA, + .chipset = ide_generic, }; static int __init h8300_ide_init(void) diff --git a/drivers/ide/ide-legacy.c b/drivers/ide/ide-legacy.c index 8c5dcbf22547..0c5b29c56cbe 100644 --- a/drivers/ide/ide-legacy.c +++ b/drivers/ide/ide-legacy.c @@ -33,7 +33,6 @@ static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw, ide_std_init_ports(hw, base, ctl); hw->irq = irq; - hw->chipset = d->chipset; hw->config = config; hws[port_no] = hw; diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c index 6e80b774e88a..47043fda2398 100644 --- a/drivers/ide/ide-pnp.c +++ b/drivers/ide/ide-pnp.c @@ -29,6 +29,7 @@ static struct pnp_device_id idepnp_devices[] = { static const struct ide_port_info ide_pnp_port_info = { .host_flags = IDE_HFLAG_NO_DMA, + .chipset = ide_generic, }; static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) @@ -62,7 +63,6 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) memset(&hw, 0, sizeof(hw)); ide_std_init_ports(&hw, base, ctl); hw.irq = pnp_irq(dev, 0); - hw.chipset = ide_generic; rc = ide_host_add(&ide_pnp_port_info, hws, &host); if (rc) diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 727a67109ff0..f17ba1932ad6 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -1048,8 +1048,7 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port, { hwif->channel = port; - if (d->chipset) - hwif->chipset = d->chipset; + hwif->chipset = d->chipset ? d->chipset : ide_pci; if (d->init_iops) d->init_iops(hwif); @@ -1178,7 +1177,6 @@ static void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw) { memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports)); hwif->irq = hw->irq; - hwif->chipset = hw->chipset; hwif->dev = hw->dev; hwif->gendev.parent = hw->parent ? hw->parent : hw->dev; hwif->ack_intr = hw->ack_intr; diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c index 051b4ab0f359..813653362a26 100644 --- a/drivers/ide/ide_platform.c +++ b/drivers/ide/ide_platform.c @@ -40,12 +40,11 @@ static void __devinit plat_ide_setup_ports(hw_regs_t *hw, hw->io_ports.ctl_addr = (unsigned long)ctrl; hw->irq = irq; - - hw->chipset = ide_generic; } static const struct ide_port_info platform_ide_port_info = { .host_flags = IDE_HFLAG_NO_DMA, + .chipset = ide_generic, }; static int __devinit plat_ide_probe(struct platform_device *pdev) diff --git a/drivers/ide/macide.c b/drivers/ide/macide.c index 4b1718e83283..3af9e96da617 100644 --- a/drivers/ide/macide.c +++ b/drivers/ide/macide.c @@ -76,13 +76,12 @@ static void __init macide_setup_ports(hw_regs_t *hw, unsigned long base, hw->irq = irq; hw->ack_intr = ack_intr; - - hw->chipset = ide_generic; } static const struct ide_port_info macide_port_info = { .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, .irq_flags = IRQF_SHARED, + .chipset = ide_generic, }; static const char *mac_ide_name[] = diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c index 09d813d313f4..a455c25f43cc 100644 --- a/drivers/ide/palm_bk3710.c +++ b/drivers/ide/palm_bk3710.c @@ -306,6 +306,7 @@ static struct ide_port_info __devinitdata palm_bk3710_port_info = { .host_flags = IDE_HFLAG_MMIO, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, + .chipset = ide_palm3710, }; static int __init palm_bk3710_probe(struct platform_device *pdev) @@ -363,7 +364,6 @@ static int __init palm_bk3710_probe(struct platform_device *pdev) (base + IDE_PALM_ATA_PRI_CTL_OFFSET); hw.irq = irq->start; hw.dev = &pdev->dev; - hw.chipset = ide_palm3710; palm_bk3710_port_info.udma_mask = rate < 100000000 ? ATA_UDMA4 : ATA_UDMA5; diff --git a/drivers/ide/q40ide.c b/drivers/ide/q40ide.c index c79346679244..7488d4ff3d7c 100644 --- a/drivers/ide/q40ide.c +++ b/drivers/ide/q40ide.c @@ -70,8 +70,6 @@ static void q40_ide_setup_ports(hw_regs_t *hw, unsigned long base, hw->irq = irq; hw->ack_intr = ack_intr; - - hw->chipset = ide_generic; } static void q40ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, @@ -119,6 +117,7 @@ static const struct ide_port_info q40ide_port_info = { .tp_ops = &q40ide_tp_ops, .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, .irq_flags = IRQF_SHARED, + .chipset = ide_generic, }; /* diff --git a/drivers/ide/rapide.c b/drivers/ide/rapide.c index d5003ca69801..bd4d7a8a666c 100644 --- a/drivers/ide/rapide.c +++ b/drivers/ide/rapide.c @@ -13,6 +13,7 @@ static const struct ide_port_info rapide_port_info = { .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, + .chipset = ide_generic, }; static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base, @@ -49,7 +50,6 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id) memset(&hw, 0, sizeof(hw)); rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq); - hw.chipset = ide_generic; hw.dev = &ec->dev; ret = ide_host_add(&rapide_port_info, hws, &host); diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c index 5be41f25204f..9e3aef317332 100644 --- a/drivers/ide/scc_pata.c +++ b/drivers/ide/scc_pata.c @@ -567,7 +567,6 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev, hw.io_ports_array[i] = ports->dma + 0x20 + i * 4; hw.irq = dev->irq; hw.dev = &dev->dev; - hw.chipset = ide_pci; rc = ide_host_add(d, hws, &host); if (rc) @@ -823,6 +822,7 @@ static const struct ide_port_info scc_chipset __devinitdata = { .host_flags = IDE_HFLAG_SINGLE, .irq_flags = IRQF_SHARED, .pio_mask = ATA_PIO4, + .chipset = ide_pci, }; /** diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c index 7a3a12d6e638..82519ddc9108 100644 --- a/drivers/ide/setup-pci.c +++ b/drivers/ide/setup-pci.c @@ -344,7 +344,6 @@ static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d, memset(hw, 0, sizeof(*hw)); hw->dev = &dev->dev; - hw->chipset = d->chipset ? d->chipset : ide_pci; ide_std_init_ports(hw, base, ctl | 2); return 0; diff --git a/drivers/ide/sgiioc4.c b/drivers/ide/sgiioc4.c index e5d2a48a84de..676d41c7add5 100644 --- a/drivers/ide/sgiioc4.c +++ b/drivers/ide/sgiioc4.c @@ -575,7 +575,6 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev) memset(&hw, 0, sizeof(hw)); sgiioc4_init_hwif_ports(&hw, cmd_base, ctl, irqport); hw.irq = dev->irq; - hw.chipset = ide_pci; hw.dev = &dev->dev; /* Initializing chipset IRQ Registers */ -- cgit v1.2.3 From dca3983059a4481e4ae97bbf0ac4b4c21429e1a5 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Sun, 17 May 2009 19:12:24 +0200 Subject: ide: pass number of ports to ide_host_{alloc,add}() (v2) Pass number of ports to ide_host_{alloc,add}() and then update all users accordingly. v2: - drop no longer needed NULL initializers in buddha.c, cmd640.c and gayle.c (noticed by Sergei) There should be no functional changes caused by this patch. Acked-by: Sergei Shtylyov Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/at91_ide.c | 5 ++--- drivers/ide/au1xxx-ide.c | 4 ++-- drivers/ide/buddha.c | 4 ++-- drivers/ide/cmd640.c | 5 +++-- drivers/ide/cs5520.c | 4 ++-- drivers/ide/delkin_cb.c | 4 ++-- drivers/ide/falconide.c | 4 ++-- drivers/ide/gayle.c | 4 ++-- drivers/ide/icside.c | 8 ++++---- drivers/ide/ide-4drives.c | 4 ++-- drivers/ide/ide-cs.c | 4 ++-- drivers/ide/ide-generic.c | 4 ++-- drivers/ide/ide-h8300.c | 4 ++-- drivers/ide/ide-legacy.c | 4 ++-- drivers/ide/ide-pnp.c | 4 ++-- drivers/ide/ide-probe.c | 9 +++++---- drivers/ide/ide_platform.c | 4 ++-- drivers/ide/macide.c | 4 ++-- drivers/ide/palm_bk3710.c | 4 ++-- drivers/ide/pmac.c | 4 ++-- drivers/ide/q40ide.c | 4 ++-- drivers/ide/rapide.c | 4 ++-- drivers/ide/scc_pata.c | 4 ++-- drivers/ide/setup-pci.c | 6 +++--- drivers/ide/sgiioc4.c | 4 ++-- drivers/ide/tx4938ide.c | 5 ++--- drivers/ide/tx4939ide.c | 5 ++--- 27 files changed, 61 insertions(+), 62 deletions(-) (limited to 'drivers') diff --git a/drivers/ide/at91_ide.c b/drivers/ide/at91_ide.c index 8d39cc9bdf92..11fe1ffdff76 100644 --- a/drivers/ide/at91_ide.c +++ b/drivers/ide/at91_ide.c @@ -247,8 +247,7 @@ irqreturn_t at91_irq_handler(int irq, void *dev_id) static int __init at91_ide_probe(struct platform_device *pdev) { int ret; - hw_regs_t hw; - hw_regs_t *hws[] = { &hw, NULL, NULL, NULL }; + hw_regs_t hw, *hws[] = { &hw }; struct ide_host *host; struct resource *res; unsigned long tf_base = 0, ctl_base = 0; @@ -307,7 +306,7 @@ static int __init at91_ide_probe(struct platform_device *pdev) hw.irq = board->irq_pin; hw.dev = &pdev->dev; - host = ide_host_alloc(&at91_ide_port_info, hws); + host = ide_host_alloc(&at91_ide_port_info, hws, 1); if (!host) { perr("failed to allocate ide host\n"); return -ENOMEM; diff --git a/drivers/ide/au1xxx-ide.c b/drivers/ide/au1xxx-ide.c index 9b31f830e2f5..32f5be686018 100644 --- a/drivers/ide/au1xxx-ide.c +++ b/drivers/ide/au1xxx-ide.c @@ -508,7 +508,7 @@ static int au_ide_probe(struct platform_device *dev) struct resource *res; struct ide_host *host; int ret = 0; - hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; + hw_regs_t hw, *hws[] = { &hw }; #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) char *mode = "MWDMA2"; @@ -550,7 +550,7 @@ static int au_ide_probe(struct platform_device *dev) hw.irq = ahwif->irq; hw.dev = &dev->dev; - ret = ide_host_add(&au1xxx_port_info, hws, &host); + ret = ide_host_add(&au1xxx_port_info, hws, 1, &host); if (ret) goto out; diff --git a/drivers/ide/buddha.c b/drivers/ide/buddha.c index 9aa2cd9be310..0450652cdabb 100644 --- a/drivers/ide/buddha.c +++ b/drivers/ide/buddha.c @@ -160,7 +160,7 @@ static int __init buddha_init(void) while ((z = zorro_find_device(ZORRO_WILDCARD, z))) { unsigned long board; - hw_regs_t hw[MAX_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL }; + hw_regs_t hw[MAX_NUM_HWIFS], *hws[MAX_NUM_HWIFS]; if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) { buddha_num_hwifs = BUDDHA_NUM_HWIFS; @@ -224,7 +224,7 @@ fail_base2: hws[i] = &hw[i]; } - ide_host_add(&buddha_port_info, hws, NULL); + ide_host_add(&buddha_port_info, hws, i, NULL); } return 0; diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c index e862a2503ab0..edb3a7a35c80 100644 --- a/drivers/ide/cmd640.c +++ b/drivers/ide/cmd640.c @@ -708,7 +708,7 @@ static int __init cmd640x_init(void) int second_port_cmd640 = 0, rc; const char *bus_type, *port2; u8 b, cfr; - hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL }; + hw_regs_t hw[2], *hws[2]; if (cmd640_vlb && probe_for_cmd640_vlb()) { bus_type = "VLB"; @@ -822,7 +822,8 @@ static int __init cmd640x_init(void) cmd640_dump_regs(); #endif - return ide_host_add(&cmd640_port_info, hws, NULL); + return ide_host_add(&cmd640_port_info, hws, second_port_cmd640 ? 2 : 1, + NULL); } module_param_named(probe_vlb, cmd640_vlb, bool, 0); diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c index 87987a7d36c9..a9023d7843f2 100644 --- a/drivers/ide/cs5520.c +++ b/drivers/ide/cs5520.c @@ -110,7 +110,7 @@ static const struct ide_port_info cyrix_chipset __devinitdata = { static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id) { const struct ide_port_info *d = &cyrix_chipset; - hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL }; + hw_regs_t hw[2], *hws[] = { NULL, NULL }; ide_setup_pci_noise(dev, d); @@ -136,7 +136,7 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic ide_pci_setup_ports(dev, d, &hw[0], &hws[0]); hw[0].irq = 14; - return ide_host_add(d, hws, NULL); + return ide_host_add(d, hws, 2, NULL); } static const struct pci_device_id cs5520_pci_tbl[] = { diff --git a/drivers/ide/delkin_cb.c b/drivers/ide/delkin_cb.c index a0de834a81c3..d4a76f22ed15 100644 --- a/drivers/ide/delkin_cb.c +++ b/drivers/ide/delkin_cb.c @@ -77,7 +77,7 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id) struct ide_host *host; unsigned long base; int rc; - hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; + hw_regs_t hw, *hws[] = { &hw }; rc = pci_enable_device(dev); if (rc) { @@ -99,7 +99,7 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id) hw.irq = dev->irq; hw.dev = &dev->dev; - rc = ide_host_add(&delkin_cb_port_info, hws, &host); + rc = ide_host_add(&delkin_cb_port_info, hws, 1, &host); if (rc) goto out_disable; diff --git a/drivers/ide/falconide.c b/drivers/ide/falconide.c index 770cfa67bdc8..adb5b0cf7626 100644 --- a/drivers/ide/falconide.c +++ b/drivers/ide/falconide.c @@ -138,7 +138,7 @@ static void __init falconide_setup_ports(hw_regs_t *hw) static int __init falconide_init(void) { struct ide_host *host; - hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; + hw_regs_t hw, *hws[] = { &hw }; int rc; if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE)) @@ -153,7 +153,7 @@ static int __init falconide_init(void) falconide_setup_ports(&hw); - host = ide_host_alloc(&falconide_port_info, hws); + host = ide_host_alloc(&falconide_port_info, hws, 1); if (host == NULL) { rc = -ENOMEM; goto err; diff --git a/drivers/ide/gayle.c b/drivers/ide/gayle.c index 71db2f9c3361..253ff34afd8f 100644 --- a/drivers/ide/gayle.c +++ b/drivers/ide/gayle.c @@ -125,7 +125,7 @@ static int __init gayle_init(void) unsigned long base, ctrlport, irqport; ide_ack_intr_t *ack_intr; int a4000, i, rc; - hw_regs_t hw[GAYLE_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL }; + hw_regs_t hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS]; if (!MACH_IS_AMIGA) return -ENODEV; @@ -170,7 +170,7 @@ found: hws[i] = &hw[i]; } - rc = ide_host_add(&gayle_port_info, hws, NULL); + rc = ide_host_add(&gayle_port_info, hws, i, NULL); if (rc) release_mem_region(res_start, res_n); diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c index 6352a44ed179..6223b80beb35 100644 --- a/drivers/ide/icside.c +++ b/drivers/ide/icside.c @@ -410,7 +410,7 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec) { void __iomem *base; struct ide_host *host; - hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; + hw_regs_t hw, *hws[] = { &hw }; int ret; base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); @@ -431,7 +431,7 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec) icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec); - host = ide_host_alloc(&icside_v5_port_info, hws); + host = ide_host_alloc(&icside_v5_port_info, hws, 1); if (host == NULL) return -ENODEV; @@ -467,7 +467,7 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec) struct ide_host *host; unsigned int sel = 0; int ret; - hw_regs_t hw[2], *hws[] = { &hw[0], &hw[1], NULL, NULL }; + hw_regs_t hw[2], *hws[] = { &hw[0], &hw[1] }; struct ide_port_info d = icside_v6_port_info; ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); @@ -507,7 +507,7 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec) icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec); icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec); - host = ide_host_alloc(&d, hws); + host = ide_host_alloc(&d, hws, 2); if (host == NULL) return -ENODEV; diff --git a/drivers/ide/ide-4drives.c b/drivers/ide/ide-4drives.c index 617ca7a5ec8a..189b8bd9957e 100644 --- a/drivers/ide/ide-4drives.c +++ b/drivers/ide/ide-4drives.c @@ -31,7 +31,7 @@ static const struct ide_port_info ide_4drives_port_info = { static int __init ide_4drives_init(void) { unsigned long base = 0x1f0, ctl = 0x3f6; - hw_regs_t hw, *hws[] = { &hw, &hw, NULL, NULL }; + hw_regs_t hw, *hws[] = { &hw, &hw }; if (probe_4drives == 0) return -ENODEV; @@ -54,7 +54,7 @@ static int __init ide_4drives_init(void) ide_std_init_ports(&hw, base, ctl); hw.irq = 14; - return ide_host_add(&ide_4drives_port_info, hws, NULL); + return ide_host_add(&ide_4drives_port_info, hws, 2, NULL); } module_init(ide_4drives_init); diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c index 43d09dcae28c..63309ad04cb2 100644 --- a/drivers/ide/ide-cs.c +++ b/drivers/ide/ide-cs.c @@ -164,7 +164,7 @@ static struct ide_host *idecs_register(unsigned long io, unsigned long ctl, struct ide_host *host; ide_hwif_t *hwif; int i, rc; - hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; + hw_regs_t hw, *hws[] = { &hw }; if (!request_region(io, 8, DRV_NAME)) { printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n", @@ -184,7 +184,7 @@ static struct ide_host *idecs_register(unsigned long io, unsigned long ctl, hw.irq = irq; hw.dev = &handle->dev; - rc = ide_host_add(&idecs_port_info, hws, &host); + rc = ide_host_add(&idecs_port_info, hws, 1, &host); if (rc) goto out_release; diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c index 0427759d0187..0d40848540d4 100644 --- a/drivers/ide/ide-generic.c +++ b/drivers/ide/ide-generic.c @@ -86,7 +86,7 @@ static void ide_generic_check_pci_legacy_iobases(int *primary, int *secondary) static int __init ide_generic_init(void) { - hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; + hw_regs_t hw, *hws[] = { &hw }; unsigned long io_addr; int i, rc = 0, primary = 0, secondary = 0; @@ -133,7 +133,7 @@ static int __init ide_generic_init(void) #else hw.irq = legacy_irqs[i]; #endif - rc = ide_host_add(&ide_generic_port_info, hws, NULL); + rc = ide_host_add(&ide_generic_port_info, hws, 1, NULL); if (rc) { release_region(io_addr + 0x206, 1); release_region(io_addr, 8); diff --git a/drivers/ide/ide-h8300.c b/drivers/ide/ide-h8300.c index 40eff6c9759c..0b5fabe2806d 100644 --- a/drivers/ide/ide-h8300.c +++ b/drivers/ide/ide-h8300.c @@ -83,7 +83,7 @@ static const struct ide_port_info h8300_port_info = { static int __init h8300_ide_init(void) { - hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; + hw_regs_t hw, *hws[] = { &hw }; printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n"); @@ -96,7 +96,7 @@ static int __init h8300_ide_init(void) hw_setup(&hw); - return ide_host_add(&h8300_port_info, hws, NULL); + return ide_host_add(&h8300_port_info, hws, 1, NULL); out_busy: printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n"); diff --git a/drivers/ide/ide-legacy.c b/drivers/ide/ide-legacy.c index 0c5b29c56cbe..98389e539909 100644 --- a/drivers/ide/ide-legacy.c +++ b/drivers/ide/ide-legacy.c @@ -40,7 +40,7 @@ static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw, int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config) { - hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL }; + hw_regs_t hw[2], *hws[] = { NULL, NULL }; memset(&hw, 0, sizeof(hw)); @@ -52,6 +52,6 @@ int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config) (d->host_flags & IDE_HFLAG_SINGLE)) return -ENOENT; - return ide_host_add(d, hws, NULL); + return ide_host_add(d, hws, 2, NULL); } EXPORT_SYMBOL_GPL(ide_legacy_device_add); diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c index 47043fda2398..6bca0f05ee90 100644 --- a/drivers/ide/ide-pnp.c +++ b/drivers/ide/ide-pnp.c @@ -37,7 +37,7 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) struct ide_host *host; unsigned long base, ctl; int rc; - hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; + hw_regs_t hw, *hws[] = { &hw }; printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n"); @@ -64,7 +64,7 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) ide_std_init_ports(&hw, base, ctl); hw.irq = pnp_irq(dev, 0); - rc = ide_host_add(&ide_pnp_port_info, hws, &host); + rc = ide_host_add(&ide_pnp_port_info, hws, 1, &host); if (rc) goto out; diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index f17ba1932ad6..6c7451a6e609 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -1261,7 +1261,8 @@ out_nomem: return -ENOMEM; } -struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws) +struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws, + unsigned int n_ports) { struct ide_host *host; struct device *dev = hws[0] ? hws[0]->dev : NULL; @@ -1272,7 +1273,7 @@ struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws) if (host == NULL) return NULL; - for (i = 0; i < MAX_HOST_PORTS; i++) { + for (i = 0; i < n_ports; i++) { ide_hwif_t *hwif; int idx; @@ -1443,12 +1444,12 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d, EXPORT_SYMBOL_GPL(ide_host_register); int ide_host_add(const struct ide_port_info *d, hw_regs_t **hws, - struct ide_host **hostp) + unsigned int n_ports, struct ide_host **hostp) { struct ide_host *host; int rc; - host = ide_host_alloc(d, hws); + host = ide_host_alloc(d, hws, n_ports); if (host == NULL) return -ENOMEM; diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c index 813653362a26..47413c2b5f8e 100644 --- a/drivers/ide/ide_platform.c +++ b/drivers/ide/ide_platform.c @@ -54,7 +54,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev) struct pata_platform_info *pdata; struct ide_host *host; int ret = 0, mmio = 0; - hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; + hw_regs_t hw, *hws[] = { &hw }; struct ide_port_info d = platform_ide_port_info; pdata = pdev->dev.platform_data; @@ -98,7 +98,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev) if (mmio) d.host_flags |= IDE_HFLAG_MMIO; - ret = ide_host_add(&d, hws, &host); + ret = ide_host_add(&d, hws, 1, &host); if (ret) goto out; diff --git a/drivers/ide/macide.c b/drivers/ide/macide.c index 3af9e96da617..31aa27818604 100644 --- a/drivers/ide/macide.c +++ b/drivers/ide/macide.c @@ -96,7 +96,7 @@ static int __init macide_init(void) ide_ack_intr_t *ack_intr; unsigned long base; int irq; - hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; + hw_regs_t hw, *hws[] = { &hw }; if (!MACH_IS_MAC) return -ENODEV; @@ -126,7 +126,7 @@ static int __init macide_init(void) macide_setup_ports(&hw, base, irq, ack_intr); - return ide_host_add(&macide_port_info, hws, NULL); + return ide_host_add(&macide_port_info, hws, 1, NULL); } module_init(macide_init); diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c index a455c25f43cc..4507a6d801bc 100644 --- a/drivers/ide/palm_bk3710.c +++ b/drivers/ide/palm_bk3710.c @@ -316,7 +316,7 @@ static int __init palm_bk3710_probe(struct platform_device *pdev) void __iomem *base; unsigned long rate, mem_size; int i, rc; - hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; + hw_regs_t hw, *hws[] = { &hw }; clk = clk_get(&pdev->dev, "IDECLK"); if (IS_ERR(clk)) @@ -369,7 +369,7 @@ static int __init palm_bk3710_probe(struct platform_device *pdev) ATA_UDMA5; /* Register the IDE interface with Linux */ - rc = ide_host_add(&palm_bk3710_port_info, hws, NULL); + rc = ide_host_add(&palm_bk3710_port_info, hws, 1, NULL); if (rc) goto out; diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c index f76e4e6b408f..f4f806476e0a 100644 --- a/drivers/ide/pmac.c +++ b/drivers/ide/pmac.c @@ -1029,7 +1029,7 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, hw_regs_t *hw) const int *bidp; struct ide_host *host; ide_hwif_t *hwif; - hw_regs_t *hws[] = { hw, NULL, NULL, NULL }; + hw_regs_t *hws[] = { hw }; struct ide_port_info d = pmac_port_info; int rc; @@ -1077,7 +1077,7 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, hw_regs_t *hw) /* Make sure we have sane timings */ sanitize_timings(pmif); - host = ide_host_alloc(&d, hws); + host = ide_host_alloc(&d, hws, 1); if (host == NULL) return -ENOMEM; hwif = host->ports[0]; diff --git a/drivers/ide/q40ide.c b/drivers/ide/q40ide.c index 7488d4ff3d7c..e46229fe5ea3 100644 --- a/drivers/ide/q40ide.c +++ b/drivers/ide/q40ide.c @@ -135,7 +135,7 @@ static const char *q40_ide_names[Q40IDE_NUM_HWIFS]={ static int __init q40ide_init(void) { int i; - hw_regs_t hw[Q40IDE_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL }; + hw_regs_t hw[Q40IDE_NUM_HWIFS], *hws[] = { NULL, NULL }; if (!MACH_IS_Q40) return -ENODEV; @@ -162,7 +162,7 @@ static int __init q40ide_init(void) hws[i] = &hw[i]; } - return ide_host_add(&q40ide_port_info, hws, NULL); + return ide_host_add(&q40ide_port_info, hws, Q40IDE_NUM_HWIFS, NULL); } module_init(q40ide_init); diff --git a/drivers/ide/rapide.c b/drivers/ide/rapide.c index bd4d7a8a666c..c4da3dd39f5c 100644 --- a/drivers/ide/rapide.c +++ b/drivers/ide/rapide.c @@ -36,7 +36,7 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id) void __iomem *base; struct ide_host *host; int ret; - hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; + hw_regs_t hw, *hws[] = { &hw }; ret = ecard_request_resources(ec); if (ret) @@ -52,7 +52,7 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id) rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq); hw.dev = &ec->dev; - ret = ide_host_add(&rapide_port_info, hws, &host); + ret = ide_host_add(&rapide_port_info, hws, 1, &host); if (ret) goto release; diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c index 9e3aef317332..9415f8c8a41d 100644 --- a/drivers/ide/scc_pata.c +++ b/drivers/ide/scc_pata.c @@ -559,7 +559,7 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev, { struct scc_ports *ports = pci_get_drvdata(dev); struct ide_host *host; - hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; + hw_regs_t hw, *hws[] = { &hw }; int i, rc; memset(&hw, 0, sizeof(hw)); @@ -568,7 +568,7 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev, hw.irq = dev->irq; hw.dev = &dev->dev; - rc = ide_host_add(d, hws, &host); + rc = ide_host_add(d, hws, 1, &host); if (rc) return rc; diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c index 82519ddc9108..d78f4c994517 100644 --- a/drivers/ide/setup-pci.c +++ b/drivers/ide/setup-pci.c @@ -538,7 +538,7 @@ int ide_pci_init_one(struct pci_dev *dev, const struct ide_port_info *d, void *priv) { struct ide_host *host; - hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL }; + hw_regs_t hw[2], *hws[] = { NULL, NULL }; int ret; ret = ide_setup_pci_controller(dev, d, 1); @@ -547,7 +547,7 @@ int ide_pci_init_one(struct pci_dev *dev, const struct ide_port_info *d, ide_pci_setup_ports(dev, d, &hw[0], &hws[0]); - host = ide_host_alloc(d, hws); + host = ide_host_alloc(d, hws, 2); if (host == NULL) { ret = -ENOMEM; goto out; @@ -596,7 +596,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2, ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]); } - host = ide_host_alloc(d, hws); + host = ide_host_alloc(d, hws, 4); if (host == NULL) { ret = -ENOMEM; goto out; diff --git a/drivers/ide/sgiioc4.c b/drivers/ide/sgiioc4.c index 676d41c7add5..3f8ee357ffb3 100644 --- a/drivers/ide/sgiioc4.c +++ b/drivers/ide/sgiioc4.c @@ -546,7 +546,7 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev) unsigned long cmd_base, irqport; unsigned long bar0, cmd_phys_base, ctl; void __iomem *virt_base; - hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; + hw_regs_t hw, *hws[] = { &hw }; int rc; /* Get the CmdBlk and CtrlBlk Base Registers */ @@ -580,7 +580,7 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev) /* Initializing chipset IRQ Registers */ writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4)); - rc = ide_host_add(&sgiioc4_port_info, hws, NULL); + rc = ide_host_add(&sgiioc4_port_info, hws, 1, NULL); if (!rc) return 0; diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c index e33d764e2945..16adc18499fa 100644 --- a/drivers/ide/tx4938ide.c +++ b/drivers/ide/tx4938ide.c @@ -130,8 +130,7 @@ static const struct ide_port_info tx4938ide_port_info __initdata = { static int __init tx4938ide_probe(struct platform_device *pdev) { - hw_regs_t hw; - hw_regs_t *hws[] = { &hw, NULL, NULL, NULL }; + hw_regs_t hw, *hws[] = { &hw }; struct ide_host *host; struct resource *res; struct tx4938ide_platform_info *pdata = pdev->dev.platform_data; @@ -183,7 +182,7 @@ static int __init tx4938ide_probe(struct platform_device *pdev) tx4938ide_tune_ebusc(pdata->ebus_ch, pdata->gbus_clock, 0); else d.port_ops = NULL; - ret = ide_host_add(&d, hws, &host); + ret = ide_host_add(&d, hws, 1, &host); if (!ret) platform_set_drvdata(pdev, host); return ret; diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c index 564422d23976..fa57920d003a 100644 --- a/drivers/ide/tx4939ide.c +++ b/drivers/ide/tx4939ide.c @@ -537,8 +537,7 @@ static const struct ide_port_info tx4939ide_port_info __initdata = { static int __init tx4939ide_probe(struct platform_device *pdev) { - hw_regs_t hw; - hw_regs_t *hws[] = { &hw, NULL, NULL, NULL }; + hw_regs_t hw, *hws[] = { &hw }; struct ide_host *host; struct resource *res; int irq, ret; @@ -581,7 +580,7 @@ static int __init tx4939ide_probe(struct platform_device *pdev) hw.dev = &pdev->dev; pr_info("TX4939 IDE interface (base %#lx, irq %d)\n", mapbase, irq); - host = ide_host_alloc(&tx4939ide_port_info, hws); + host = ide_host_alloc(&tx4939ide_port_info, hws, 1); if (!host) return -ENOMEM; /* use extra_base for base address of the all registers */ -- cgit v1.2.3 From 9f36d31437922354d104a2db407f397e79e4027e Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Sun, 17 May 2009 19:12:25 +0200 Subject: ide: remove hw_regs_t typedef Remove hw_regs_t typedef and rename struct hw_regs_s to struct ide_hw. There should be no functional changes caused by this patch. Acked-by: Sergei Shtylyov Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/at91_ide.c | 2 +- drivers/ide/au1xxx-ide.c | 4 ++-- drivers/ide/buddha.c | 4 ++-- drivers/ide/cmd640.c | 2 +- drivers/ide/cs5520.c | 2 +- drivers/ide/delkin_cb.c | 2 +- drivers/ide/falconide.c | 4 ++-- drivers/ide/gayle.c | 4 ++-- drivers/ide/icside.c | 6 +++--- drivers/ide/ide-4drives.c | 2 +- drivers/ide/ide-cs.c | 2 +- drivers/ide/ide-generic.c | 2 +- drivers/ide/ide-h8300.c | 6 +++--- drivers/ide/ide-legacy.c | 4 ++-- drivers/ide/ide-pnp.c | 2 +- drivers/ide/ide-probe.c | 10 +++++----- drivers/ide/ide_platform.c | 4 ++-- drivers/ide/macide.c | 4 ++-- drivers/ide/palm_bk3710.c | 2 +- drivers/ide/pmac.c | 11 ++++++----- drivers/ide/q40ide.c | 6 +++--- drivers/ide/rapide.c | 4 ++-- drivers/ide/scc_pata.c | 2 +- drivers/ide/setup-pci.c | 16 ++++++++-------- drivers/ide/sgiioc4.c | 4 ++-- drivers/ide/tx4938ide.c | 2 +- drivers/ide/tx4939ide.c | 2 +- 27 files changed, 58 insertions(+), 57 deletions(-) (limited to 'drivers') diff --git a/drivers/ide/at91_ide.c b/drivers/ide/at91_ide.c index 11fe1ffdff76..fc0949a8cfde 100644 --- a/drivers/ide/at91_ide.c +++ b/drivers/ide/at91_ide.c @@ -247,7 +247,7 @@ irqreturn_t at91_irq_handler(int irq, void *dev_id) static int __init at91_ide_probe(struct platform_device *pdev) { int ret; - hw_regs_t hw, *hws[] = { &hw }; + struct ide_hw hw, *hws[] = { &hw }; struct ide_host *host; struct resource *res; unsigned long tf_base = 0, ctl_base = 0; diff --git a/drivers/ide/au1xxx-ide.c b/drivers/ide/au1xxx-ide.c index 32f5be686018..58121bd6c115 100644 --- a/drivers/ide/au1xxx-ide.c +++ b/drivers/ide/au1xxx-ide.c @@ -449,7 +449,7 @@ static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d) } #endif -static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif) +static void auide_setup_ports(struct ide_hw *hw, _auide_hwif *ahwif) { int i; unsigned long *ata_regs = hw->io_ports_array; @@ -508,7 +508,7 @@ static int au_ide_probe(struct platform_device *dev) struct resource *res; struct ide_host *host; int ret = 0; - hw_regs_t hw, *hws[] = { &hw }; + struct ide_hw hw, *hws[] = { &hw }; #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) char *mode = "MWDMA2"; diff --git a/drivers/ide/buddha.c b/drivers/ide/buddha.c index 0450652cdabb..e3c6a5913305 100644 --- a/drivers/ide/buddha.c +++ b/drivers/ide/buddha.c @@ -121,7 +121,7 @@ static int xsurf_ack_intr(ide_hwif_t *hwif) return 1; } -static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base, +static void __init buddha_setup_ports(struct ide_hw *hw, unsigned long base, unsigned long ctl, unsigned long irq_port, ide_ack_intr_t *ack_intr) { @@ -160,7 +160,7 @@ static int __init buddha_init(void) while ((z = zorro_find_device(ZORRO_WILDCARD, z))) { unsigned long board; - hw_regs_t hw[MAX_NUM_HWIFS], *hws[MAX_NUM_HWIFS]; + struct ide_hw hw[MAX_NUM_HWIFS], *hws[MAX_NUM_HWIFS]; if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) { buddha_num_hwifs = BUDDHA_NUM_HWIFS; diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c index edb3a7a35c80..1683ed5c7329 100644 --- a/drivers/ide/cmd640.c +++ b/drivers/ide/cmd640.c @@ -708,7 +708,7 @@ static int __init cmd640x_init(void) int second_port_cmd640 = 0, rc; const char *bus_type, *port2; u8 b, cfr; - hw_regs_t hw[2], *hws[2]; + struct ide_hw hw[2], *hws[2]; if (cmd640_vlb && probe_for_cmd640_vlb()) { bus_type = "VLB"; diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c index a9023d7843f2..bd066bb9d611 100644 --- a/drivers/ide/cs5520.c +++ b/drivers/ide/cs5520.c @@ -110,7 +110,7 @@ static const struct ide_port_info cyrix_chipset __devinitdata = { static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id) { const struct ide_port_info *d = &cyrix_chipset; - hw_regs_t hw[2], *hws[] = { NULL, NULL }; + struct ide_hw hw[2], *hws[] = { NULL, NULL }; ide_setup_pci_noise(dev, d); diff --git a/drivers/ide/delkin_cb.c b/drivers/ide/delkin_cb.c index d4a76f22ed15..1e10eba62ceb 100644 --- a/drivers/ide/delkin_cb.c +++ b/drivers/ide/delkin_cb.c @@ -77,7 +77,7 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id) struct ide_host *host; unsigned long base; int rc; - hw_regs_t hw, *hws[] = { &hw }; + struct ide_hw hw, *hws[] = { &hw }; rc = pci_enable_device(dev); if (rc) { diff --git a/drivers/ide/falconide.c b/drivers/ide/falconide.c index adb5b0cf7626..22fa27389c3b 100644 --- a/drivers/ide/falconide.c +++ b/drivers/ide/falconide.c @@ -114,7 +114,7 @@ static const struct ide_port_info falconide_port_info = { .chipset = ide_generic, }; -static void __init falconide_setup_ports(hw_regs_t *hw) +static void __init falconide_setup_ports(struct ide_hw *hw) { int i; @@ -138,7 +138,7 @@ static void __init falconide_setup_ports(hw_regs_t *hw) static int __init falconide_init(void) { struct ide_host *host; - hw_regs_t hw, *hws[] = { &hw }; + struct ide_hw hw, *hws[] = { &hw }; int rc; if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE)) diff --git a/drivers/ide/gayle.c b/drivers/ide/gayle.c index 253ff34afd8f..4451a6a5dfe0 100644 --- a/drivers/ide/gayle.c +++ b/drivers/ide/gayle.c @@ -88,7 +88,7 @@ static int gayle_ack_intr_a1200(ide_hwif_t *hwif) return 1; } -static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base, +static void __init gayle_setup_ports(struct ide_hw *hw, unsigned long base, unsigned long ctl, unsigned long irq_port, ide_ack_intr_t *ack_intr) { @@ -125,7 +125,7 @@ static int __init gayle_init(void) unsigned long base, ctrlport, irqport; ide_ack_intr_t *ack_intr; int a4000, i, rc; - hw_regs_t hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS]; + struct ide_hw hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS]; if (!MACH_IS_AMIGA) return -ENODEV; diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c index 6223b80beb35..c5269fa1f733 100644 --- a/drivers/ide/icside.c +++ b/drivers/ide/icside.c @@ -381,7 +381,7 @@ static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d) return -EOPNOTSUPP; } -static void icside_setup_ports(hw_regs_t *hw, void __iomem *base, +static void icside_setup_ports(struct ide_hw *hw, void __iomem *base, struct cardinfo *info, struct expansion_card *ec) { unsigned long port = (unsigned long)base + info->dataoffset; @@ -410,7 +410,7 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec) { void __iomem *base; struct ide_host *host; - hw_regs_t hw, *hws[] = { &hw }; + struct ide_hw hw, *hws[] = { &hw }; int ret; base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); @@ -467,7 +467,7 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec) struct ide_host *host; unsigned int sel = 0; int ret; - hw_regs_t hw[2], *hws[] = { &hw[0], &hw[1] }; + struct ide_hw hw[2], *hws[] = { &hw[0], &hw[1] }; struct ide_port_info d = icside_v6_port_info; ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); diff --git a/drivers/ide/ide-4drives.c b/drivers/ide/ide-4drives.c index 189b8bd9957e..979d342c338a 100644 --- a/drivers/ide/ide-4drives.c +++ b/drivers/ide/ide-4drives.c @@ -31,7 +31,7 @@ static const struct ide_port_info ide_4drives_port_info = { static int __init ide_4drives_init(void) { unsigned long base = 0x1f0, ctl = 0x3f6; - hw_regs_t hw, *hws[] = { &hw, &hw }; + struct ide_hw hw, *hws[] = { &hw, &hw }; if (probe_4drives == 0) return -ENODEV; diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c index 63309ad04cb2..527908ff298c 100644 --- a/drivers/ide/ide-cs.c +++ b/drivers/ide/ide-cs.c @@ -164,7 +164,7 @@ static struct ide_host *idecs_register(unsigned long io, unsigned long ctl, struct ide_host *host; ide_hwif_t *hwif; int i, rc; - hw_regs_t hw, *hws[] = { &hw }; + struct ide_hw hw, *hws[] = { &hw }; if (!request_region(io, 8, DRV_NAME)) { printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n", diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c index 0d40848540d4..54d7c4685d23 100644 --- a/drivers/ide/ide-generic.c +++ b/drivers/ide/ide-generic.c @@ -86,7 +86,7 @@ static void ide_generic_check_pci_legacy_iobases(int *primary, int *secondary) static int __init ide_generic_init(void) { - hw_regs_t hw, *hws[] = { &hw }; + struct ide_hw hw, *hws[] = { &hw }; unsigned long io_addr; int i, rc = 0, primary = 0, secondary = 0; diff --git a/drivers/ide/ide-h8300.c b/drivers/ide/ide-h8300.c index 0b5fabe2806d..520f42c5445a 100644 --- a/drivers/ide/ide-h8300.c +++ b/drivers/ide/ide-h8300.c @@ -64,11 +64,11 @@ static const struct ide_tp_ops h8300_tp_ops = { #define H8300_IDE_GAP (2) -static inline void hw_setup(hw_regs_t *hw) +static inline void hw_setup(struct ide_hw *hw) { int i; - memset(hw, 0, sizeof(hw_regs_t)); + memset(hw, 0, sizeof(*hw)); for (i = 0; i <= 7; i++) hw->io_ports_array[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i; hw->io_ports.ctl_addr = CONFIG_H8300_IDE_ALT; @@ -83,7 +83,7 @@ static const struct ide_port_info h8300_port_info = { static int __init h8300_ide_init(void) { - hw_regs_t hw, *hws[] = { &hw }; + struct ide_hw hw, *hws[] = { &hw }; printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n"); diff --git a/drivers/ide/ide-legacy.c b/drivers/ide/ide-legacy.c index 98389e539909..b9654a7bb7be 100644 --- a/drivers/ide/ide-legacy.c +++ b/drivers/ide/ide-legacy.c @@ -1,7 +1,7 @@ #include #include -static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw, +static void ide_legacy_init_one(struct ide_hw **hws, struct ide_hw *hw, u8 port_no, const struct ide_port_info *d, unsigned long config) { @@ -40,7 +40,7 @@ static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw, int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config) { - hw_regs_t hw[2], *hws[] = { NULL, NULL }; + struct ide_hw hw[2], *hws[] = { NULL, NULL }; memset(&hw, 0, sizeof(hw)); diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c index 6bca0f05ee90..017b1df3b805 100644 --- a/drivers/ide/ide-pnp.c +++ b/drivers/ide/ide-pnp.c @@ -37,7 +37,7 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) struct ide_host *host; unsigned long base, ctl; int rc; - hw_regs_t hw, *hws[] = { &hw }; + struct ide_hw hw, *hws[] = { &hw }; printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n"); diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 6c7451a6e609..29363829a3fe 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -1173,7 +1173,7 @@ static void ide_init_port_data(ide_hwif_t *hwif, unsigned int index) ide_port_init_devices_data(hwif); } -static void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw) +static void ide_init_port_hw(ide_hwif_t *hwif, struct ide_hw *hw) { memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports)); hwif->irq = hw->irq; @@ -1261,8 +1261,8 @@ out_nomem: return -ENOMEM; } -struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws, - unsigned int n_ports) +struct ide_host *ide_host_alloc(const struct ide_port_info *d, + struct ide_hw **hws, unsigned int n_ports) { struct ide_host *host; struct device *dev = hws[0] ? hws[0]->dev : NULL; @@ -1349,7 +1349,7 @@ static void ide_disable_port(ide_hwif_t *hwif) } int ide_host_register(struct ide_host *host, const struct ide_port_info *d, - hw_regs_t **hws) + struct ide_hw **hws) { ide_hwif_t *hwif, *mate = NULL; int i, j = 0; @@ -1443,7 +1443,7 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d, } EXPORT_SYMBOL_GPL(ide_host_register); -int ide_host_add(const struct ide_port_info *d, hw_regs_t **hws, +int ide_host_add(const struct ide_port_info *d, struct ide_hw **hws, unsigned int n_ports, struct ide_host **hostp) { struct ide_host *host; diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c index 47413c2b5f8e..ee9b55ecc62b 100644 --- a/drivers/ide/ide_platform.c +++ b/drivers/ide/ide_platform.c @@ -21,7 +21,7 @@ #include #include -static void __devinit plat_ide_setup_ports(hw_regs_t *hw, +static void __devinit plat_ide_setup_ports(struct ide_hw *hw, void __iomem *base, void __iomem *ctrl, struct pata_platform_info *pdata, @@ -54,7 +54,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev) struct pata_platform_info *pdata; struct ide_host *host; int ret = 0, mmio = 0; - hw_regs_t hw, *hws[] = { &hw }; + struct ide_hw hw, *hws[] = { &hw }; struct ide_port_info d = platform_ide_port_info; pdata = pdev->dev.platform_data; diff --git a/drivers/ide/macide.c b/drivers/ide/macide.c index 31aa27818604..1447c8c90565 100644 --- a/drivers/ide/macide.c +++ b/drivers/ide/macide.c @@ -62,7 +62,7 @@ int macide_ack_intr(ide_hwif_t* hwif) return 0; } -static void __init macide_setup_ports(hw_regs_t *hw, unsigned long base, +static void __init macide_setup_ports(struct ide_hw *hw, unsigned long base, int irq, ide_ack_intr_t *ack_intr) { int i; @@ -96,7 +96,7 @@ static int __init macide_init(void) ide_ack_intr_t *ack_intr; unsigned long base; int irq; - hw_regs_t hw, *hws[] = { &hw }; + struct ide_hw hw, *hws[] = { &hw }; if (!MACH_IS_MAC) return -ENODEV; diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c index 4507a6d801bc..3c1dc0152153 100644 --- a/drivers/ide/palm_bk3710.c +++ b/drivers/ide/palm_bk3710.c @@ -316,7 +316,7 @@ static int __init palm_bk3710_probe(struct platform_device *pdev) void __iomem *base; unsigned long rate, mem_size; int i, rc; - hw_regs_t hw, *hws[] = { &hw }; + struct ide_hw hw, *hws[] = { &hw }; clk = clk_get(&pdev->dev, "IDECLK"); if (IS_ERR(clk)) diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c index f4f806476e0a..97642a7a79c4 100644 --- a/drivers/ide/pmac.c +++ b/drivers/ide/pmac.c @@ -1023,13 +1023,14 @@ static const struct ide_port_info pmac_port_info = { * Setup, register & probe an IDE channel driven by this driver, this is * called by one of the 2 probe functions (macio or PCI). */ -static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, hw_regs_t *hw) +static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, + struct ide_hw *hw) { struct device_node *np = pmif->node; const int *bidp; struct ide_host *host; ide_hwif_t *hwif; - hw_regs_t *hws[] = { hw }; + struct ide_hw *hws[] = { hw }; struct ide_port_info d = pmac_port_info; int rc; @@ -1124,7 +1125,7 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, hw_regs_t *hw) return 0; } -static void __devinit pmac_ide_init_ports(hw_regs_t *hw, unsigned long base) +static void __devinit pmac_ide_init_ports(struct ide_hw *hw, unsigned long base) { int i; @@ -1144,7 +1145,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match) unsigned long regbase; pmac_ide_hwif_t *pmif; int irq, rc; - hw_regs_t hw; + struct ide_hw hw; pmif = kzalloc(sizeof(*pmif), GFP_KERNEL); if (pmif == NULL) @@ -1268,7 +1269,7 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id) void __iomem *base; unsigned long rbase, rlen; int rc; - hw_regs_t hw; + struct ide_hw hw; np = pci_device_to_OF_node(pdev); if (np == NULL) { diff --git a/drivers/ide/q40ide.c b/drivers/ide/q40ide.c index e46229fe5ea3..ab49a97023d9 100644 --- a/drivers/ide/q40ide.c +++ b/drivers/ide/q40ide.c @@ -51,11 +51,11 @@ static int q40ide_default_irq(unsigned long base) /* * Addresses are pretranslated for Q40 ISA access. */ -static void q40_ide_setup_ports(hw_regs_t *hw, unsigned long base, +static void q40_ide_setup_ports(struct ide_hw *hw, unsigned long base, ide_ack_intr_t *ack_intr, int irq) { - memset(hw, 0, sizeof(hw_regs_t)); + memset(hw, 0, sizeof(*hw)); /* BIG FAT WARNING: assumption: only DATA port is ever used in 16 bit mode */ hw->io_ports.data_addr = Q40_ISA_IO_W(base); @@ -135,7 +135,7 @@ static const char *q40_ide_names[Q40IDE_NUM_HWIFS]={ static int __init q40ide_init(void) { int i; - hw_regs_t hw[Q40IDE_NUM_HWIFS], *hws[] = { NULL, NULL }; + struct ide_hw hw[Q40IDE_NUM_HWIFS], *hws[] = { NULL, NULL }; if (!MACH_IS_Q40) return -ENODEV; diff --git a/drivers/ide/rapide.c b/drivers/ide/rapide.c index c4da3dd39f5c..00f54248f41f 100644 --- a/drivers/ide/rapide.c +++ b/drivers/ide/rapide.c @@ -16,7 +16,7 @@ static const struct ide_port_info rapide_port_info = { .chipset = ide_generic, }; -static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base, +static void rapide_setup_ports(struct ide_hw *hw, void __iomem *base, void __iomem *ctrl, unsigned int sz, int irq) { unsigned long port = (unsigned long)base; @@ -36,7 +36,7 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id) void __iomem *base; struct ide_host *host; int ret; - hw_regs_t hw, *hws[] = { &hw }; + struct ide_hw hw, *hws[] = { &hw }; ret = ecard_request_resources(ec); if (ret) diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c index 9415f8c8a41d..1104bb301eb9 100644 --- a/drivers/ide/scc_pata.c +++ b/drivers/ide/scc_pata.c @@ -559,7 +559,7 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev, { struct scc_ports *ports = pci_get_drvdata(dev); struct ide_host *host; - hw_regs_t hw, *hws[] = { &hw }; + struct ide_hw hw, *hws[] = { &hw }; int i, rc; memset(&hw, 0, sizeof(hw)); diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c index d78f4c994517..5314edffc303 100644 --- a/drivers/ide/setup-pci.c +++ b/drivers/ide/setup-pci.c @@ -301,11 +301,11 @@ static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info * } /** - * ide_hw_configure - configure a hw_regs_t instance + * ide_hw_configure - configure a struct ide_hw instance * @dev: PCI device holding interface * @d: IDE port info * @port: port number - * @hw: hw_regs_t instance corresponding to this port + * @hw: struct ide_hw instance corresponding to this port * * Perform the initial set up for the hardware interface structure. This * is done per interface port rather than per PCI device. There may be @@ -315,7 +315,7 @@ static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info * */ static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d, - unsigned int port, hw_regs_t *hw) + unsigned int port, struct ide_hw *hw) { unsigned long ctl = 0, base = 0; @@ -445,8 +445,8 @@ out: * ide_pci_setup_ports - configure ports/devices on PCI IDE * @dev: PCI device * @d: IDE port info - * @hw: hw_regs_t instances corresponding to this PCI IDE device - * @hws: hw_regs_t pointers table to update + * @hw: struct ide_hw instances corresponding to this PCI IDE device + * @hws: struct ide_hw pointers table to update * * Scan the interfaces attached to this device and do any * necessary per port setup. Attach the devices and ask the @@ -458,7 +458,7 @@ out: */ void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, - hw_regs_t *hw, hw_regs_t **hws) + struct ide_hw *hw, struct ide_hw **hws) { int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port; u8 tmp; @@ -538,7 +538,7 @@ int ide_pci_init_one(struct pci_dev *dev, const struct ide_port_info *d, void *priv) { struct ide_host *host; - hw_regs_t hw[2], *hws[] = { NULL, NULL }; + struct ide_hw hw[2], *hws[] = { NULL, NULL }; int ret; ret = ide_setup_pci_controller(dev, d, 1); @@ -586,7 +586,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2, struct pci_dev *pdev[] = { dev1, dev2 }; struct ide_host *host; int ret, i; - hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL }; + struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL }; for (i = 0; i < 2; i++) { ret = ide_setup_pci_controller(pdev[i], d, !i); diff --git a/drivers/ide/sgiioc4.c b/drivers/ide/sgiioc4.c index 3f8ee357ffb3..5f37f168f944 100644 --- a/drivers/ide/sgiioc4.c +++ b/drivers/ide/sgiioc4.c @@ -91,7 +91,7 @@ typedef struct { static void -sgiioc4_init_hwif_ports(hw_regs_t * hw, unsigned long data_port, +sgiioc4_init_hwif_ports(struct ide_hw *hw, unsigned long data_port, unsigned long ctrl_port, unsigned long irq_port) { unsigned long reg = data_port; @@ -546,7 +546,7 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev) unsigned long cmd_base, irqport; unsigned long bar0, cmd_phys_base, ctl; void __iomem *virt_base; - hw_regs_t hw, *hws[] = { &hw }; + struct ide_hw hw, *hws[] = { &hw }; int rc; /* Get the CmdBlk and CtrlBlk Base Registers */ diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c index 16adc18499fa..ea89fddeed91 100644 --- a/drivers/ide/tx4938ide.c +++ b/drivers/ide/tx4938ide.c @@ -130,7 +130,7 @@ static const struct ide_port_info tx4938ide_port_info __initdata = { static int __init tx4938ide_probe(struct platform_device *pdev) { - hw_regs_t hw, *hws[] = { &hw }; + struct ide_hw hw, *hws[] = { &hw }; struct ide_host *host; struct resource *res; struct tx4938ide_platform_info *pdata = pdev->dev.platform_data; diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c index fa57920d003a..9f73fd43d1f4 100644 --- a/drivers/ide/tx4939ide.c +++ b/drivers/ide/tx4939ide.c @@ -537,7 +537,7 @@ static const struct ide_port_info tx4939ide_port_info __initdata = { static int __init tx4939ide_probe(struct platform_device *pdev) { - hw_regs_t hw, *hws[] = { &hw }; + struct ide_hw hw, *hws[] = { &hw }; struct ide_host *host; struct resource *res; int irq, ret; -- cgit v1.2.3 From ec976d6eb021dc8f2994248c310a41540f4756bd Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Wed, 13 May 2009 22:52:24 +0100 Subject: [ARM] S3C24XX: GPIO: Move gpio functions out of Move all the gpio functions out of as this file is for defining the generic IO base addresses for the kernel IO calls. Make a new header to take this and include it via the chain from which is what most of these files should be using (and will be changed as soon as possible). Note, this does make minor changes to some drivers but should not mess up any pending merges. CC: Richard Purdie Acked-by: Mark Brown CC: David Brownell Signed-off-by: Ben Dooks --- drivers/leds/leds-h1940.c | 2 ++ drivers/leds/leds-s3c24xx.c | 1 + drivers/mmc/host/s3cmci.c | 3 ++- drivers/spi/spi_s3c24xx_gpio.c | 1 + 4 files changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/leds/leds-h1940.c b/drivers/leds/leds-h1940.c index 1aa46a390a0d..173d104d9ff2 100644 --- a/drivers/leds/leds-h1940.c +++ b/drivers/leds/leds-h1940.c @@ -16,6 +16,8 @@ #include #include #include +#include + #include #include #include diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c index aa2e7ae0cdae..aa7acf3b9224 100644 --- a/drivers/leds/leds-s3c24xx.c +++ b/drivers/leds/leds-s3c24xx.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 2db166b7096f..2e7da8e853cf 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -1121,7 +1122,7 @@ static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) case MMC_POWER_OFF: default: s3c2410_gpio_setpin(S3C2410_GPE5, 0); - s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPE5_OUTP); + s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPIO_OUTPUT); if (host->is2440) mci_con |= S3C2440_SDICON_SDRESET; diff --git a/drivers/spi/spi_s3c24xx_gpio.c b/drivers/spi/spi_s3c24xx_gpio.c index f2447a5476bb..bbf9371cd284 100644 --- a/drivers/spi/spi_s3c24xx_gpio.c +++ b/drivers/spi/spi_s3c24xx_gpio.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include -- cgit v1.2.3 From 89f536ccfa8b370ff4d054f4061858ca9322c25a Mon Sep 17 00:00:00 2001 From: Stephane Chatty Date: Wed, 20 May 2009 15:41:24 +0200 Subject: HID: add new multitouch and digitizer contants Added constants to hid.h for all digitizer usages (including the new multitouch ones that are not yet in the official USB spec but are being pushed by Microsft as described in their paper "Digitizer Drivers for Windows Touch and Pen-Based Computers"). Updated hid-debug.c to support the new MT input constants such as ABS_MT_POSITION_X. Signed-off-by: Stephane Chatty Signed-off-by: Jiri Kosina --- drivers/hid/hid-debug.c | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 47ac1a7d66e1..04359ed64b87 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -137,6 +137,14 @@ static const struct hid_usage_entry hid_usage_table[] = { {0, 0x44, "BarrelSwitch"}, {0, 0x45, "Eraser"}, {0, 0x46, "TabletPick"}, + {0, 0x47, "Confidence"}, + {0, 0x48, "Width"}, + {0, 0x49, "Height"}, + {0, 0x51, "ContactID"}, + {0, 0x52, "InputMode"}, + {0, 0x53, "DeviceIndex"}, + {0, 0x54, "ContactCount"}, + {0, 0x55, "ContactMaximumNumber"}, { 15, 0, "PhysicalInterfaceDevice" }, {0, 0x00, "Undefined"}, {0, 0x01, "Physical_Interface_Device"}, @@ -514,9 +522,11 @@ static const char *events[EV_MAX + 1] = { [EV_FF_STATUS] = "ForceFeedbackStatus", }; -static const char *syncs[2] = { +static const char *syncs[3] = { [SYN_REPORT] = "Report", [SYN_CONFIG] = "Config", + [SYN_MT_REPORT] = "MT Report", }; + static const char *keys[KEY_MAX + 1] = { [KEY_RESERVED] = "Reserved", [KEY_ESC] = "Esc", [KEY_1] = "1", [KEY_2] = "2", @@ -734,8 +744,17 @@ static const char *absolutes[ABS_MAX + 1] = { [ABS_HAT2Y] = "Hat2Y", [ABS_HAT3X] = "Hat3X", [ABS_HAT3Y] = "Hat 3Y", [ABS_PRESSURE] = "Pressure", [ABS_DISTANCE] = "Distance", [ABS_TILT_X] = "XTilt", - [ABS_TILT_Y] = "YTilt", [ABS_TOOL_WIDTH] = "Tool Width", + [ABS_TILT_Y] = "YTilt", [ABS_TOOL_WIDTH] = "ToolWidth", [ABS_VOLUME] = "Volume", [ABS_MISC] = "Misc", + [ABS_MT_TOUCH_MAJOR] = "MTMajor", + [ABS_MT_TOUCH_MINOR] = "MTMinor", + [ABS_MT_WIDTH_MAJOR] = "MTMajorW", + [ABS_MT_WIDTH_MINOR] = "MTMinorW", + [ABS_MT_ORIENTATION] = "MTOrientation", + [ABS_MT_POSITION_X] = "MTPositionX", + [ABS_MT_POSITION_Y] = "MTPositionY", + [ABS_MT_TOOL_TYPE] = "MTToolType", + [ABS_MT_BLOB_ID] = "MTBlobID", }; static const char *misc[MSC_MAX + 1] = { -- cgit v1.2.3 From 57fd637ad9ac6b13c1c47b9a0ced4ee99bb26e76 Mon Sep 17 00:00:00 2001 From: Stephane Chatty Date: Wed, 20 May 2009 15:49:35 +0200 Subject: HID: Multitouch support for the N-Trig touchscreen Adds support for multitouch interaction on the N-Trig touchscreen, using the new ABS_MT_* input constants. Single touch support works as previously. This code was tested against two versions of the N- Trig firmware: one that supports dual pen/finger single touch, and one that supports finger multitouch but no pen at all. Copyright notices that looked wrong were removed, as it seems that there is only code written in 2009 by Rafin Rubin and Stephane Chatty in this file. Signed-off-by: Stephane Chatty Signed-off-by: Jiri Kosina --- drivers/hid/hid-ntrig.c | 222 +++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 211 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c index c5b252be9c21..75ed9d2c1a36 100644 --- a/drivers/hid/hid-ntrig.c +++ b/drivers/hid/hid-ntrig.c @@ -1,13 +1,8 @@ /* - * HID driver for some ntrig "special" devices + * HID driver for N-Trig touchscreens * - * Copyright (c) 1999 Andreas Gal - * Copyright (c) 2000-2005 Vojtech Pavlik - * Copyright (c) 2005 Michael Haboustak for Concept2, Inc - * Copyright (c) 2006-2007 Jiri Kosina - * Copyright (c) 2007 Paul Walmsley - * Copyright (c) 2008 Jiri Slaby * Copyright (c) 2008 Rafi Rubin + * Copyright (c) 2009 Stephane Chatty * */ @@ -29,15 +24,79 @@ #define nt_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ EV_KEY, (c)) +struct ntrig_data { + __s32 x, y, id, w, h; + char reading_a_point, found_contact_id; +}; + +/* + * this driver is aimed at two firmware versions in circulation: + * - dual pen/finger single touch + * - finger multitouch, pen not working + */ + static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { - if ((usage->hid & HID_USAGE_PAGE) == HID_UP_DIGITIZER && - (usage->hid & 0xff) == 0x47) { - nt_map_key_clear(BTN_TOOL_DOUBLETAP); - return 1; + switch (usage->hid & HID_USAGE_PAGE) { + + case HID_UP_GENDESK: + switch (usage->hid) { + case HID_GD_X: + hid_map_usage(hi, usage, bit, max, + EV_ABS, ABS_MT_POSITION_X); + input_set_abs_params(hi->input, ABS_X, + field->logical_minimum, + field->logical_maximum, 0, 0); + return 1; + case HID_GD_Y: + hid_map_usage(hi, usage, bit, max, + EV_ABS, ABS_MT_POSITION_Y); + input_set_abs_params(hi->input, ABS_Y, + field->logical_minimum, + field->logical_maximum, 0, 0); + return 1; + } + return 0; + + case HID_UP_DIGITIZER: + switch (usage->hid) { + /* we do not want to map these for now */ + case HID_DG_INVERT: /* value is always 0 */ + case HID_DG_ERASER: /* value is always 0 */ + case HID_DG_CONTACTID: /* value is useless */ + case HID_DG_BARRELSWITCH: /* doubtful */ + case HID_DG_INPUTMODE: + case HID_DG_DEVICEINDEX: + case HID_DG_CONTACTCOUNT: + case HID_DG_CONTACTMAX: + return -1; + + /* original mapping by Rafi Rubin */ + case HID_DG_CONFIDENCE: + nt_map_key_clear(BTN_TOOL_DOUBLETAP); + return 1; + + /* width/height mapped on TouchMajor/TouchMinor/Orientation */ + case HID_DG_WIDTH: + hid_map_usage(hi, usage, bit, max, + EV_ABS, ABS_MT_TOUCH_MAJOR); + return 1; + case HID_DG_HEIGHT: + hid_map_usage(hi, usage, bit, max, + EV_ABS, ABS_MT_TOUCH_MINOR); + input_set_abs_params(hi->input, ABS_MT_ORIENTATION, + 0, 1, 0, 0); + return 1; + } + return 0; + + case 0xff000000: + /* we do not want to map these: no input-oriented meaning */ + return -1; } + return 0; } @@ -51,6 +110,138 @@ static int ntrig_input_mapped(struct hid_device *hdev, struct hid_input *hi, return 0; } + +/* + * this function is called upon all reports + * so that we can filter contact point information, + * decide whether we are in multi or single touch mode + * and call input_mt_sync after each point if necessary + */ +static int ntrig_event (struct hid_device *hid, struct hid_field *field, + struct hid_usage *usage, __s32 value) +{ + struct input_dev *input = field->hidinput->input; + struct ntrig_data *nd = hid_get_drvdata(hid); + + if (hid->claimed & HID_CLAIMED_INPUT) { + switch (usage->hid) { + case HID_GD_X: + nd->x = value; + nd->reading_a_point = 1; + break; + case HID_GD_Y: + nd->y = value; + break; + case HID_DG_CONTACTID: + nd->id = value; + /* we receive this only when in multitouch mode */ + nd->found_contact_id = 1; + break; + case HID_DG_WIDTH: + nd->w = value; + break; + case HID_DG_HEIGHT: + nd->h = value; + /* + * when in single touch mode, this is the last + * report received in a finger event. We want + * to emit a normal (X, Y) position + */ + if (! nd->found_contact_id) { + input_event(input, EV_ABS, ABS_X, nd->x); + input_event(input, EV_ABS, ABS_Y, nd->y); + } + break; + case HID_DG_TIPPRESSURE: + /* + * when in single touch mode, this is the last + * report received in a pen event. We want + * to emit a normal (X, Y) position + */ + if (! nd->found_contact_id) { + input_event(input, EV_ABS, ABS_X, nd->x); + input_event(input, EV_ABS, ABS_Y, nd->y); + input_event(input, EV_ABS, ABS_PRESSURE, value); + } + break; + case 0xff000002: + /* + * we receive this when the device is in multitouch + * mode. The first of the three values tagged with + * this usage tells if the contact point is real + * or a placeholder + */ + if (!nd->reading_a_point || value != 1) + break; + /* emit a normal (X, Y) for the first point only */ + if (nd->id == 0) { + input_event(input, EV_ABS, ABS_X, nd->x); + input_event(input, EV_ABS, ABS_Y, nd->y); + } + input_event(input, EV_ABS, ABS_MT_POSITION_X, nd->x); + input_event(input, EV_ABS, ABS_MT_POSITION_Y, nd->y); + if (nd->w > nd->h) { + input_event(input, EV_ABS, + ABS_MT_ORIENTATION, 1); + input_event(input, EV_ABS, + ABS_MT_TOUCH_MAJOR, nd->w); + input_event(input, EV_ABS, + ABS_MT_TOUCH_MINOR, nd->h); + } else { + input_event(input, EV_ABS, + ABS_MT_ORIENTATION, 0); + input_event(input, EV_ABS, + ABS_MT_TOUCH_MAJOR, nd->h); + input_event(input, EV_ABS, + ABS_MT_TOUCH_MINOR, nd->w); + } + input_mt_sync(field->hidinput->input); + nd->reading_a_point = 0; + nd->found_contact_id = 0; + break; + + default: + /* fallback to the generic hidinput handling */ + return 0; + } + } + + /* we have handled the hidinput part, now remains hiddev */ + if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event) + hid->hiddev_hid_event(hid, field, usage, value); + + return 1; +} + +static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id) +{ + int ret; + struct ntrig_data *nd; + + nd = kmalloc(sizeof(struct ntrig_data), GFP_KERNEL); + if (!nd) { + dev_err(&hdev->dev, "cannot allocate N-Trig data\n"); + return -ENOMEM; + } + nd->reading_a_point = 0; + nd->found_contact_id = 0; + hid_set_drvdata(hdev, nd); + + ret = hid_parse(hdev); + if (!ret) + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); + + if (ret) + kfree (nd); + return ret; +} + +static void ntrig_remove(struct hid_device *hdev) +{ + hid_hw_stop(hdev); + kfree(hid_get_drvdata(hdev)); +} + static const struct hid_device_id ntrig_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN), .driver_data = NTRIG_DUPLICATE_USAGES }, @@ -58,11 +249,20 @@ static const struct hid_device_id ntrig_devices[] = { }; MODULE_DEVICE_TABLE(hid, ntrig_devices); +static const struct hid_usage_id ntrig_grabbed_usages[] = { + { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID }, + { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} +}; + static struct hid_driver ntrig_driver = { .name = "ntrig", .id_table = ntrig_devices, + .probe = ntrig_probe, + .remove = ntrig_remove, .input_mapping = ntrig_input_mapping, .input_mapped = ntrig_input_mapped, + .usage_table = ntrig_grabbed_usages, + .event = ntrig_event, }; static int ntrig_init(void) -- cgit v1.2.3 From 0003b795c310da83501fcf0329f6be7a0984647d Mon Sep 17 00:00:00 2001 From: Eric Lammerts Date: Tue, 19 May 2009 20:53:20 -0400 Subject: fix oops when using console=ttymxcN with N > 0 Signed-off-by: Eric Lammerts Signed-off-by: Sascha Hauer --- drivers/serial/imx.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c index 3f5d5a200481..738c8a5f64f2 100644 --- a/drivers/serial/imx.c +++ b/drivers/serial/imx.c @@ -1024,6 +1024,8 @@ imx_console_setup(struct console *co, char *options) if (co->index == -1 || co->index >= ARRAY_SIZE(imx_ports)) co->index = 0; sport = imx_ports[co->index]; + if(sport == NULL) + return -ENODEV; if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); -- cgit v1.2.3 From d53b48d512ef477c939aba09c7e258b8dc331b6a Mon Sep 17 00:00:00 2001 From: Seokmann Ju Date: Mon, 6 Apr 2009 22:33:37 -0700 Subject: [SCSI] qla2xxx: Correct bus-reset behaviour with recent ISPs. The short-circuit to skip the non-applicable 'full-login-lip' process on 81xx ISPs was nested too deeply in the 'bus-reset' routine, as the code in qla2x00_loop_reset() should skip the whole enable_lip_full_login process. The original code could cause device tear-down due to the qla2x00_wait_for_loop_ready() call taking a large amount of time. Signed-off-by: Seokmann Ju Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_mbx.c | 3 --- drivers/scsi/qla2xxx/qla_os.c | 3 ++- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index e67c1660bf46..14584380ad26 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -1864,9 +1864,6 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - if (IS_QLA81XX(vha->hw)) - return QLA_SUCCESS; - DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n", vha->host_no)); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index e4fdcdad80d0..29234ba42b42 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1037,7 +1037,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) struct fc_port *fcport; struct qla_hw_data *ha = vha->hw; - if (ha->flags.enable_lip_full_login && !vha->vp_idx) { + if (ha->flags.enable_lip_full_login && !vha->vp_idx && + !IS_QLA81XX(ha)) { ret = qla2x00_full_login_lip(vha); if (ret != QLA_SUCCESS) { DEBUG2_3(printk("%s(%ld): failed: " -- cgit v1.2.3 From bad7001c200458c24864df6f2b1b66548bca7c75 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Mon, 6 Apr 2009 22:33:38 -0700 Subject: [SCSI] qla2xxx: Export additional FCoE attributes for application support. Cull and export VN_Port MAC address and VLAN_ID information on supported FCoE ISPs. Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_attr.c | 32 ++++++++++++++++++++++++++++++++ drivers/scsi/qla2xxx/qla_def.h | 5 +++++ drivers/scsi/qla2xxx/qla_mbx.c | 14 +++++++++++++- 3 files changed, 50 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index b09993a06576..5d44e3e6488c 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1088,6 +1088,33 @@ qla2x00_flash_block_size_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size); } +static ssize_t +qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + + if (!IS_QLA81XX(vha->hw)) + return snprintf(buf, PAGE_SIZE, "\n"); + + return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id); +} + +static ssize_t +qla2x00_vn_port_mac_address_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + + if (!IS_QLA81XX(vha->hw)) + return snprintf(buf, PAGE_SIZE, "\n"); + + return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n", + vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4], + vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2], + vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]); +} + static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); @@ -1116,6 +1143,9 @@ static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL); static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL); static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show, NULL); +static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL); +static DEVICE_ATTR(vn_port_mac_address, S_IRUGO, + qla2x00_vn_port_mac_address_show, NULL); struct device_attribute *qla2x00_host_attrs[] = { &dev_attr_driver_version, @@ -1138,6 +1168,8 @@ struct device_attribute *qla2x00_host_attrs[] = { &dev_attr_mpi_version, &dev_attr_phy_version, &dev_attr_flash_block_size, + &dev_attr_vlan_id, + &dev_attr_vn_port_mac_address, NULL, }; diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 714ee67567e1..645cfd9e6cf6 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2618,6 +2618,11 @@ typedef struct scsi_qla_host { uint8_t node_name[WWN_SIZE]; uint8_t port_name[WWN_SIZE]; uint8_t fabric_node_name[WWN_SIZE]; + + uint16_t fcoe_vlan_id; + uint16_t fcoe_fcf_idx; + uint8_t fcoe_vn_port_mac[6]; + uint32_t vp_abort_cnt; struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 14584380ad26..dc5a1fe19210 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -931,6 +931,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_0; mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + if (IS_QLA81XX(vha->hw)) + mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); @@ -952,9 +954,19 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n", vha->host_no, rval)); } else { - /*EMPTY*/ DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n", vha->host_no)); + + if (IS_QLA81XX(vha->hw)) { + vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; + vha->fcoe_fcf_idx = mcp->mb[10]; + vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; + vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff; + vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8; + vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff; + vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8; + vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; + } } return rval; -- cgit v1.2.3 From 7640335ea5b1a2da0d64303e6003012c619ae01a Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Mon, 6 Apr 2009 22:33:39 -0700 Subject: [SCSI] qla2xxx: Correct compilation failures when DEBUG'n' options are enabled. Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_init.c | 10 +++++----- drivers/scsi/qla2xxx/qla_mbx.c | 2 +- drivers/scsi/qla2xxx/qla_sup.c | 19 +++++++++++-------- 3 files changed, 17 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index bd7dd84c0648..9a343ec67567 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -634,7 +634,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha) goto chip_diag_failed; DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n", - ha->host_no)); + vha->host_no)); /* Reset RISC processor. */ WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); @@ -655,7 +655,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha) goto chip_diag_failed; /* Check product ID of chip */ - DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", ha->host_no)); + DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no)); mb[1] = RD_MAILBOX_REG(ha, reg, 1); mb[2] = RD_MAILBOX_REG(ha, reg, 2); @@ -2110,7 +2110,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) goto cleanup_allocation; DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n", - ha->host_no, entries)); + vha->host_no, entries)); DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list, entries * sizeof(struct gid_list_info))); @@ -3587,7 +3587,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) chksum += le32_to_cpu(*dptr++); - DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no)); + DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); /* Bad NVRAM data, set defaults parameters. */ @@ -4305,7 +4305,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) chksum += le32_to_cpu(*dptr++); - DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no)); + DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); /* Bad NVRAM data, set defaults parameters. */ diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index dc5a1fe19210..4f7e94c4daaa 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -3393,7 +3393,7 @@ qla2x00_read_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr, DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, vha->host_no, rval, mcp->mb[0])); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); + DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 152ecfc26cd2..81187a0246cd 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -219,8 +219,8 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data) wait_cnt = NVR_WAIT_CNT; do { if (!--wait_cnt) { - DEBUG9_10(printk("%s(%ld): NVRAM didn't go ready...\n", - __func__, vha->host_no)); + DEBUG9_10(qla_printk(KERN_WARNING, ha, + "NVRAM didn't go ready...\n")); break; } NVRAM_DELAY(); @@ -349,7 +349,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha) wait_cnt = NVR_WAIT_CNT; do { if (!--wait_cnt) { - DEBUG9_10(qla_printk( + DEBUG9_10(qla_printk(KERN_WARNING, ha, "NVRAM didn't go ready...\n")); break; } @@ -408,7 +408,8 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat) wait_cnt = NVR_WAIT_CNT; do { if (!--wait_cnt) { - DEBUG9_10(qla_printk("NVRAM didn't go ready...\n")); + DEBUG9_10(qla_printk(KERN_WARNING, ha, + "NVRAM didn't go ready...\n")); break; } NVRAM_DELAY(); @@ -1079,8 +1080,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, 0xff0000) | ((fdata >> 16) & 0xff)); ret = qla24xx_erase_sector(vha, fdata); if (ret != QLA_SUCCESS) { - DEBUG9(qla_printk("Unable to erase sector: " - "address=%x.\n", faddr)); + DEBUG9(qla_printk(KERN_WARNING, ha, + "Unable to erase sector: address=%x.\n", + faddr)); break; } } @@ -1240,8 +1242,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr, ret = qla24xx_write_flash_dword(ha, nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr)); if (ret != QLA_SUCCESS) { - DEBUG9(qla_printk("Unable to program nvram address=%x " - "data=%x.\n", naddr, *dwptr)); + DEBUG9(qla_printk(KERN_WARNING, ha, + "Unable to program nvram address=%x data=%x.\n", + naddr, *dwptr)); break; } } -- cgit v1.2.3 From 2afa19a9377ca61b9489e44bf50029574fbe63be Mon Sep 17 00:00:00 2001 From: Anirban Chakraborty Date: Mon, 6 Apr 2009 22:33:40 -0700 Subject: [SCSI] qla2xxx: Add QoS support. Set the number of request queues to the module paramater ql2xmaxqueues. Each vport gets a request queue. The QoS value set to the request queues determines priority control for queued IOs. If QoS value is not specified, the vports use the default queue 0. Signed-off-by: Anirban Chakraborty Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_attr.c | 47 +++++--- drivers/scsi/qla2xxx/qla_dbg.c | 7 +- drivers/scsi/qla2xxx/qla_def.h | 23 ++-- drivers/scsi/qla2xxx/qla_gbl.h | 27 +++-- drivers/scsi/qla2xxx/qla_init.c | 30 +++-- drivers/scsi/qla2xxx/qla_iocb.c | 19 ++-- drivers/scsi/qla2xxx/qla_isr.c | 242 ++++++++++++++++------------------------ drivers/scsi/qla2xxx/qla_mbx.c | 63 +++++++---- drivers/scsi/qla2xxx/qla_mid.c | 107 ++++++++---------- drivers/scsi/qla2xxx/qla_os.c | 135 +++++++++++----------- drivers/scsi/qla2xxx/qla_sup.c | 1 + 11 files changed, 333 insertions(+), 368 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 5d44e3e6488c..bda6658d4fbf 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1469,11 +1469,12 @@ static int qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) { int ret = 0; - int cnt = 0; - uint8_t qos = QLA_DEFAULT_QUE_QOS; + uint8_t qos = 0; scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); scsi_qla_host_t *vha = NULL; struct qla_hw_data *ha = base_vha->hw; + uint16_t options = 0; + int cnt; ret = qla24xx_vport_create_req_sanity_check(fc_vport); if (ret) { @@ -1529,23 +1530,35 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) qla24xx_vport_disable(fc_vport, disable); - /* Create a queue pair for the vport */ - if (ha->mqenable) { - if (ha->npiv_info) { - for (; cnt < ha->nvram_npiv_size; cnt++) { - if (ha->npiv_info[cnt].port_name == - vha->port_name && - ha->npiv_info[cnt].node_name == - vha->node_name) { - qos = ha->npiv_info[cnt].q_qos; - break; - } - } + ret = 0; + if (ha->cur_vport_count <= ha->flex_port_count + || ha->max_req_queues == 1 || !ha->npiv_info) + goto vport_queue; + /* Create a request queue in QoS mode for the vport */ + for (cnt = ha->flex_port_count; cnt < ha->nvram_npiv_size; cnt++) { + if (ha->npiv_info[cnt].port_name == vha->port_name && + ha->npiv_info[cnt].node_name == vha->node_name) { + qos = ha->npiv_info[cnt].q_qos; + break; } - qla25xx_create_queues(vha, qos); + } + if (qos) { + ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0, + qos); + if (!ret) + qla_printk(KERN_WARNING, ha, + "Can't create request queue for vp_idx:%d\n", + vha->vp_idx); + else + DEBUG2(qla_printk(KERN_INFO, ha, + "Request Que:%d created for vp_idx:%d\n", + ret, vha->vp_idx)); } +vport_queue: + vha->req = ha->req_q_map[ret]; return 0; + vport_create_failed_2: qla24xx_disable_vp(vha); qla24xx_deallocate_vp_id(vha); @@ -1586,8 +1599,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) vha->host_no, vha->vp_idx, vha)); } - if (ha->mqenable) { - if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS) + if (vha->req->id) { + if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) qla_printk(KERN_WARNING, ha, "Queue delete failed.\n"); } diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 34760f8d4f17..68671a2b8b7f 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -351,7 +351,7 @@ static inline void * qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) { uint32_t cnt, que_idx; - uint8_t req_cnt, rsp_cnt, que_cnt; + uint8_t que_cnt; struct qla2xxx_mq_chain *mq = ptr; struct device_reg_25xxmq __iomem *reg; @@ -363,9 +363,8 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) mq->type = __constant_htonl(DUMP_CHAIN_MQ); mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain)); - req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues); - rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues); - que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt; + que_cnt = ha->max_req_queues > ha->max_rsp_queues ? + ha->max_req_queues : ha->max_rsp_queues; mq->count = htonl(que_cnt); for (cnt = 0; cnt < que_cnt; cnt++) { reg = (struct device_reg_25xxmq *) ((void *) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 645cfd9e6cf6..57d659cf99ee 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -93,6 +93,7 @@ #define LSD(x) ((uint32_t)((uint64_t)(x))) #define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16)) +#define MAKE_HANDLE(x, y) ((uint32_t)((((uint32_t)(x)) << 16) | (uint32_t)(y))) /* * I/O register @@ -179,6 +180,7 @@ #define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */ #define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/ #define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ +#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/ struct req_que; @@ -2008,7 +2010,8 @@ typedef struct vport_params { #define VP_RET_CODE_NOT_FOUND 6 struct qla_hw_data; - +struct req_que; +struct rsp_que; /* * ISP operations */ @@ -2030,10 +2033,9 @@ struct isp_operations { void (*enable_intrs) (struct qla_hw_data *); void (*disable_intrs) (struct qla_hw_data *); - int (*abort_command) (struct scsi_qla_host *, srb_t *, - struct req_que *); - int (*target_reset) (struct fc_port *, unsigned int); - int (*lun_reset) (struct fc_port *, unsigned int); + int (*abort_command) (srb_t *); + int (*target_reset) (struct fc_port *, unsigned int, int); + int (*lun_reset) (struct fc_port *, unsigned int, int); int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t, uint8_t, uint8_t, uint16_t *, uint8_t); int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t, @@ -2079,7 +2081,6 @@ struct isp_operations { #define QLA_PCI_MSIX_CONTROL 0xa2 struct scsi_qla_host; -struct rsp_que; struct qla_msix_entry { int have_irq; @@ -2140,7 +2141,6 @@ struct qla_statistics { #define MBC_INITIALIZE_MULTIQ 0x1f #define QLA_QUE_PAGE 0X1000 #define QLA_MQ_SIZE 32 -#define QLA_MAX_HOST_QUES 16 #define QLA_MAX_QUEUES 256 #define ISP_QUE_REG(ha, id) \ ((ha->mqenable) ? \ @@ -2170,6 +2170,7 @@ struct rsp_que { struct qla_hw_data *hw; struct qla_msix_entry *msix; struct req_que *req; + srb_t *status_srb; /* status continuation entry */ }; /* Request queue data structure */ @@ -2246,7 +2247,8 @@ struct qla_hw_data { struct rsp_que **rsp_q_map; unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; - uint16_t max_queues; + uint8_t max_req_queues; + uint8_t max_rsp_queues; struct qla_npiv_entry *npiv_info; uint16_t nvram_npiv_size; @@ -2532,6 +2534,7 @@ struct qla_hw_data { uint16_t num_vsans; /* number of vsan created */ uint16_t max_npiv_vports; /* 63 or 125 per topoloty */ int cur_vport_count; + uint16_t flex_port_count; struct qla_chip_state_84xx *cs84xx; struct qla_statistics qla_stats; @@ -2591,8 +2594,6 @@ typedef struct scsi_qla_host { #define SWITCH_FOUND BIT_0 #define DFLG_NO_CABLE BIT_1 - srb_t *status_srb; /* Status continuation entry. */ - /* ISP configuration data. */ uint16_t loop_id; /* Host adapter loop id */ @@ -2648,7 +2649,7 @@ typedef struct scsi_qla_host { #define VP_ERR_FAB_LOGOUT 4 #define VP_ERR_ADAP_NORESOURCES 5 struct qla_hw_data *hw; - int req_ques[QLA_MAX_HOST_QUES]; + struct req_que *req; } scsi_qla_host_t; /* diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 528913f6bed9..b12de0176246 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -67,6 +67,7 @@ extern int ql2xextended_error_logging; extern int ql2xqfullrampup; extern int ql2xiidmaenable; extern int ql2xmaxqueues; +extern int ql2xmultique_tag; extern int qla2x00_loop_reset(scsi_qla_host_t *); extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); @@ -165,13 +166,13 @@ extern int qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t); extern int -qla2x00_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *); +qla2x00_abort_command(srb_t *); extern int -qla2x00_abort_target(struct fc_port *, unsigned int); +qla2x00_abort_target(struct fc_port *, unsigned int, int); extern int -qla2x00_lun_reset(struct fc_port *, unsigned int); +qla2x00_lun_reset(struct fc_port *, unsigned int, int); extern int qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *, @@ -236,9 +237,11 @@ extern int qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, dma_addr_t); -extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *); -extern int qla24xx_abort_target(struct fc_port *, unsigned int); -extern int qla24xx_lun_reset(struct fc_port *, unsigned int); +extern int qla24xx_abort_command(srb_t *); +extern int +qla24xx_abort_target(struct fc_port *, unsigned int, int); +extern int +qla24xx_lun_reset(struct fc_port *, unsigned int, int); extern int qla2x00_system_error(scsi_qla_host_t *); @@ -295,8 +298,8 @@ extern irqreturn_t qla2100_intr_handler(int, void *); extern irqreturn_t qla2300_intr_handler(int, void *); extern irqreturn_t qla24xx_intr_handler(int, void *); extern void qla2x00_process_response_queue(struct rsp_que *); -extern void qla24xx_process_response_queue(struct rsp_que *); - +extern void +qla24xx_process_response_queue(struct scsi_qla_host *, struct rsp_que *); extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *); extern void qla2x00_free_irqs(scsi_qla_host_t *); @@ -401,19 +404,21 @@ extern int qla25xx_request_irq(struct rsp_que *); extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *); extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *); extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t, - uint16_t, uint8_t, uint8_t); + uint16_t, int, uint8_t); extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t, - uint16_t); + uint16_t, int); extern int qla25xx_update_req_que(struct scsi_qla_host *, uint8_t, uint8_t); extern void qla2x00_init_response_q_entries(struct rsp_que *); extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *); extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *); extern int qla25xx_create_queues(struct scsi_qla_host *, uint8_t); -extern int qla25xx_delete_queues(struct scsi_qla_host *, uint8_t); +extern int qla25xx_delete_queues(struct scsi_qla_host *); extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t); extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t); extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); +extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *); + #endif /* _QLA_GBL_H */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 9a343ec67567..059909c9f29b 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -786,7 +786,6 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) sizeof(uint32_t); if (ha->mqenable) mq_size = sizeof(struct qla2xxx_mq_chain); - /* Allocate memory for Fibre Channel Event Buffer. */ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) goto try_eft; @@ -850,8 +849,7 @@ cont_alloc: rsp_q_size = rsp->length * sizeof(response_t); dump_size = offsetof(struct qla2xxx_fw_dump, isp); - dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + - eft_size; + dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size; ha->chain_offset = dump_size; dump_size += mq_size + fce_size; @@ -1013,12 +1011,14 @@ qla2x00_init_response_q_entries(struct rsp_que *rsp) uint16_t cnt; response_t *pkt; + rsp->ring_ptr = rsp->ring; + rsp->ring_index = 0; + rsp->status_srb = NULL; pkt = rsp->ring_ptr; for (cnt = 0; cnt < rsp->length; cnt++) { pkt->signature = RESPONSE_PROCESSED; pkt++; } - } /** @@ -1176,7 +1176,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha) if (ha->flags.msix_enabled) { msix = &ha->msix_entries[1]; DEBUG2_17(printk(KERN_INFO - "Reistering vector 0x%x for base que\n", msix->entry)); + "Registering vector 0x%x for base que\n", msix->entry)); icb->msix = cpu_to_le16(msix->entry); } /* Use alternate PCI bus number */ @@ -1230,14 +1230,14 @@ qla2x00_init_rings(scsi_qla_host_t *vha) spin_lock_irqsave(&ha->hardware_lock, flags); /* Clear outstanding commands array. */ - for (que = 0; que < ha->max_queues; que++) { + for (que = 0; que < ha->max_req_queues; que++) { req = ha->req_q_map[que]; if (!req) continue; - for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) + for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) req->outstanding_cmds[cnt] = NULL; - req->current_outstanding_cmd = 0; + req->current_outstanding_cmd = 1; /* Initialize firmware. */ req->ring_ptr = req->ring; @@ -1245,13 +1245,10 @@ qla2x00_init_rings(scsi_qla_host_t *vha) req->cnt = req->length; } - for (que = 0; que < ha->max_queues; que++) { + for (que = 0; que < ha->max_rsp_queues; que++) { rsp = ha->rsp_q_map[que]; if (!rsp) continue; - rsp->ring_ptr = rsp->ring; - rsp->ring_index = 0; - /* Initialize response queue entries */ qla2x00_init_response_q_entries(rsp); } @@ -3180,8 +3177,7 @@ qla2x00_loop_resync(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; uint32_t wait_time; - struct qla_hw_data *ha = vha->hw; - struct req_que *req = ha->req_q_map[vha->req_ques[0]]; + struct req_que *req = vha->req; struct rsp_que *rsp = req->rsp; atomic_set(&vha->loop_state, LOOP_UPDATE); @@ -3448,7 +3444,7 @@ qla25xx_init_queues(struct qla_hw_data *ha) int ret = -1; int i; - for (i = 1; i < ha->max_queues; i++) { + for (i = 1; i < ha->max_rsp_queues; i++) { rsp = ha->rsp_q_map[i]; if (rsp) { rsp->options &= ~BIT_0; @@ -3462,6 +3458,8 @@ qla25xx_init_queues(struct qla_hw_data *ha) "%s Rsp que:%d inited\n", __func__, rsp->id)); } + } + for (i = 1; i < ha->max_req_queues; i++) { req = ha->req_q_map[i]; if (req) { /* Clear outstanding commands array. */ @@ -4165,7 +4163,7 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha) uint16_t mb[MAILBOX_REGISTER_COUNT]; struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); - struct req_que *req = ha->req_q_map[vha->req_ques[0]]; + struct req_que *req = vha->req; struct rsp_que *rsp = req->rsp; if (!vha->vp_idx) diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index a8abbb95730d..94b69d86482d 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -453,6 +453,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, mrk24->lun[2] = MSB(lun); host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); mrk24->vp_index = vha->vp_idx; + mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle); } else { SET_TARGET_ID(ha, mrk->target, loop_id); mrk->lun = cpu_to_le16(lun); @@ -531,9 +532,6 @@ qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req, for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++) *dword_ptr++ = 0; - /* Set system defined field. */ - pkt->sys_define = (uint8_t)req->ring_index; - /* Set entry count. */ pkt->entry_count = 1; @@ -724,19 +722,14 @@ qla24xx_start_scsi(srb_t *sp) struct scsi_cmnd *cmd = sp->cmd; struct scsi_qla_host *vha = sp->fcport->vha; struct qla_hw_data *ha = vha->hw; - uint16_t que_id; /* Setup device pointers. */ ret = 0; - que_id = vha->req_ques[0]; - req = ha->req_q_map[que_id]; + req = vha->req; + rsp = ha->rsp_q_map[0]; sp->que = req; - if (req->rsp) - rsp = req->rsp; - else - rsp = ha->rsp_q_map[que_id]; /* So we know we haven't pci_map'ed anything yet */ tot_dsds = 0; @@ -794,7 +787,7 @@ qla24xx_start_scsi(srb_t *sp) req->cnt -= req_cnt; cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; - cmd_pkt->handle = handle; + cmd_pkt->handle = MAKE_HANDLE(req->id, handle); /* Zero out remaining portion of packet. */ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ @@ -823,6 +816,8 @@ qla24xx_start_scsi(srb_t *sp) /* Set total data segment count. */ cmd_pkt->entry_count = (uint8_t)req_cnt; + /* Specify response queue number where completion should happen */ + cmd_pkt->entry_status = (uint8_t) rsp->id; wmb(); /* Adjust ring index. */ @@ -842,7 +837,7 @@ qla24xx_start_scsi(srb_t *sp) /* Manage unprocessed RIO/ZIO commands in response queue. */ if (vha->flags.process_response_queue && rsp->ring_ptr->signature != RESPONSE_PROCESSED) - qla24xx_process_response_queue(rsp); + qla24xx_process_response_queue(vha, rsp); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index d04981848e56..c8e906c702a1 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -13,10 +13,9 @@ static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); static void qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *, uint32_t); static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); -static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *); +static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, sts_entry_t *); -static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *); /** * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. @@ -51,7 +50,7 @@ qla2100_intr_handler(int irq, void *dev_id) status = 0; spin_lock(&ha->hardware_lock); - vha = qla2x00_get_rsp_host(rsp); + vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { hccr = RD_REG_WORD(®->hccr); if (hccr & HCCR_RISC_PAUSE) { @@ -147,7 +146,7 @@ qla2300_intr_handler(int irq, void *dev_id) status = 0; spin_lock(&ha->hardware_lock); - vha = qla2x00_get_rsp_host(rsp); + vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { stat = RD_REG_DWORD(®->u.isp2300.host_status); if (stat & HSR_RISC_PAUSED) { @@ -685,7 +684,7 @@ skip_rio: vha->host_no)); if (IS_FWI2_CAPABLE(ha)) - qla24xx_process_response_queue(rsp); + qla24xx_process_response_queue(vha, rsp); else qla2x00_process_response_queue(rsp); break; @@ -766,7 +765,7 @@ qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data) struct qla_hw_data *ha = vha->hw; struct req_que *req = NULL; - req = ha->req_q_map[vha->req_ques[0]]; + req = vha->req; if (!req) return; if (req->max_q_depth <= sdev->queue_depth) @@ -858,8 +857,8 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, qla2x00_ramp_up_queue_depth(vha, req, sp); qla2x00_sp_compl(ha, sp); } else { - DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n", - vha->host_no)); + DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion" + " handle(%d)\n", vha->host_no, req->id, index)); qla_printk(KERN_WARNING, ha, "Invalid ISP SCSI completion handle\n"); @@ -881,7 +880,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp) uint16_t handle_cnt; uint16_t cnt; - vha = qla2x00_get_rsp_host(rsp); + vha = pci_get_drvdata(ha->pdev); if (!vha->flags.online) return; @@ -926,7 +925,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp) } break; case STATUS_CONT_TYPE: - qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); + qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); break; default: /* Type Not Supported. */ @@ -945,7 +944,8 @@ qla2x00_process_response_queue(struct rsp_que *rsp) } static inline void -qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len) +qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len, + struct rsp_que *rsp) { struct scsi_cmnd *cp = sp->cmd; @@ -962,7 +962,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len) sp->request_sense_ptr += sense_len; sp->request_sense_length -= sense_len; if (sp->request_sense_length != 0) - sp->fcport->vha->status_srb = sp; + rsp->status_srb = sp; DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no, @@ -992,7 +992,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len; uint8_t *rsp_info, *sense_data; struct qla_hw_data *ha = vha->hw; - struct req_que *req = rsp->req; + uint32_t handle; + uint16_t que; + struct req_que *req; sts = (sts_entry_t *) pkt; sts24 = (struct sts_entry_24xx *) pkt; @@ -1003,18 +1005,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) comp_status = le16_to_cpu(sts->comp_status); scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; } - + handle = (uint32_t) LSW(sts->handle); + que = MSW(sts->handle); + req = ha->req_q_map[que]; /* Fast path completion. */ if (comp_status == CS_COMPLETE && scsi_status == 0) { - qla2x00_process_completed_request(vha, req, sts->handle); + qla2x00_process_completed_request(vha, req, handle); return; } /* Validate handle. */ - if (sts->handle < MAX_OUTSTANDING_COMMANDS) { - sp = req->outstanding_cmds[sts->handle]; - req->outstanding_cmds[sts->handle] = NULL; + if (handle < MAX_OUTSTANDING_COMMANDS) { + sp = req->outstanding_cmds[handle]; + req->outstanding_cmds[handle] = NULL; } else sp = NULL; @@ -1030,7 +1034,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) cp = sp->cmd; if (cp == NULL) { DEBUG2(printk("scsi(%ld): Command already returned back to OS " - "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp)); + "pkt->handle=%d sp=%p.\n", vha->host_no, handle, sp)); qla_printk(KERN_WARNING, ha, "Command is NULL: already returned to OS (sp=%p)\n", sp); @@ -1133,7 +1137,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) if (!(scsi_status & SS_SENSE_LEN_VALID)) break; - qla2x00_handle_sense(sp, sense_data, sense_len); + qla2x00_handle_sense(sp, sense_data, sense_len, rsp); break; case CS_DATA_UNDERRUN: @@ -1192,7 +1196,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) if (!(scsi_status & SS_SENSE_LEN_VALID)) break; - qla2x00_handle_sense(sp, sense_data, sense_len); + qla2x00_handle_sense(sp, sense_data, sense_len, rsp); } else { /* * If RISC reports underrun and target does not report @@ -1334,7 +1338,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) } /* Place command on done queue. */ - if (vha->status_srb == NULL) + if (rsp->status_srb == NULL) qla2x00_sp_compl(ha, sp); } @@ -1346,11 +1350,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) * Extended sense data. */ static void -qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt) +qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) { uint8_t sense_sz = 0; - struct qla_hw_data *ha = vha->hw; - srb_t *sp = vha->status_srb; + struct qla_hw_data *ha = rsp->hw; + srb_t *sp = rsp->status_srb; struct scsi_cmnd *cp; if (sp != NULL && sp->request_sense_length != 0) { @@ -1362,7 +1366,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt) "cmd is NULL: already returned to OS (sp=%p)\n", sp); - vha->status_srb = NULL; + rsp->status_srb = NULL; return; } @@ -1383,7 +1387,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt) /* Place command on done queue. */ if (sp->request_sense_length == 0) { - vha->status_srb = NULL; + rsp->status_srb = NULL; qla2x00_sp_compl(ha, sp); } } @@ -1399,7 +1403,9 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) { srb_t *sp; struct qla_hw_data *ha = vha->hw; - struct req_que *req = rsp->req; + uint32_t handle = LSW(pkt->handle); + uint16_t que = MSW(pkt->handle); + struct req_que *req = ha->req_q_map[que]; #if defined(QL_DEBUG_LEVEL_2) if (pkt->entry_status & RF_INV_E_ORDER) qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); @@ -1417,14 +1423,14 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) #endif /* Validate handle. */ - if (pkt->handle < MAX_OUTSTANDING_COMMANDS) - sp = req->outstanding_cmds[pkt->handle]; + if (handle < MAX_OUTSTANDING_COMMANDS) + sp = req->outstanding_cmds[handle]; else sp = NULL; if (sp) { /* Free outstanding command slot. */ - req->outstanding_cmds[pkt->handle] = NULL; + req->outstanding_cmds[handle] = NULL; /* Bad payload or header */ if (pkt->entry_status & @@ -1486,13 +1492,10 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) * qla24xx_process_response_queue() - Process response queue entries. * @ha: SCSI driver HA context */ -void -qla24xx_process_response_queue(struct rsp_que *rsp) +void qla24xx_process_response_queue(struct scsi_qla_host *vha, + struct rsp_que *rsp) { struct sts_entry_24xx *pkt; - struct scsi_qla_host *vha; - - vha = qla2x00_get_rsp_host(rsp); if (!vha->flags.online) return; @@ -1523,7 +1526,7 @@ qla24xx_process_response_queue(struct rsp_que *rsp) qla2x00_status_entry(vha, rsp, pkt); break; case STATUS_CONT_TYPE: - qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); + qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); break; case VP_RPT_ID_IOCB_TYPE: qla24xx_report_id_acquisition(vha, @@ -1626,7 +1629,7 @@ qla24xx_intr_handler(int irq, void *dev_id) status = 0; spin_lock(&ha->hardware_lock); - vha = qla2x00_get_rsp_host(rsp); + vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { stat = RD_REG_DWORD(®->host_status); if (stat & HSRX_RISC_PAUSED) { @@ -1664,7 +1667,7 @@ qla24xx_intr_handler(int irq, void *dev_id) break; case 0x13: case 0x14: - qla24xx_process_response_queue(rsp); + qla24xx_process_response_queue(vha, rsp); break; default: DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " @@ -1692,6 +1695,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_24xx __iomem *reg; + struct scsi_qla_host *vha; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -1704,7 +1708,8 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) spin_lock_irq(&ha->hardware_lock); - qla24xx_process_response_queue(rsp); + vha = qla25xx_get_host(rsp); + qla24xx_process_response_queue(vha, rsp); WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); spin_unlock_irq(&ha->hardware_lock); @@ -1712,31 +1717,6 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) return IRQ_HANDLED; } -static irqreturn_t -qla25xx_msix_rsp_q(int irq, void *dev_id) -{ - struct qla_hw_data *ha; - struct rsp_que *rsp; - struct device_reg_24xx __iomem *reg; - - rsp = (struct rsp_que *) dev_id; - if (!rsp) { - printk(KERN_INFO - "%s(): NULL response queue pointer\n", __func__); - return IRQ_NONE; - } - ha = rsp->hw; - reg = &ha->iobase->isp24; - - spin_lock_irq(&ha->hardware_lock); - - qla24xx_process_response_queue(rsp); - - spin_unlock_irq(&ha->hardware_lock); - - return IRQ_HANDLED; -} - static irqreturn_t qla24xx_msix_default(int irq, void *dev_id) { @@ -1760,7 +1740,7 @@ qla24xx_msix_default(int irq, void *dev_id) status = 0; spin_lock_irq(&ha->hardware_lock); - vha = qla2x00_get_rsp_host(rsp); + vha = pci_get_drvdata(ha->pdev); do { stat = RD_REG_DWORD(®->host_status); if (stat & HSRX_RISC_PAUSED) { @@ -1798,7 +1778,7 @@ qla24xx_msix_default(int irq, void *dev_id) break; case 0x13: case 0x14: - qla24xx_process_response_queue(rsp); + qla24xx_process_response_queue(vha, rsp); break; default: DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " @@ -1822,31 +1802,13 @@ qla24xx_msix_default(int irq, void *dev_id) /* Interrupt handling helpers. */ struct qla_init_msix_entry { - uint16_t entry; - uint16_t index; const char *name; irq_handler_t handler; }; -static struct qla_init_msix_entry base_queue = { - .entry = 0, - .index = 0, - .name = "qla2xxx (default)", - .handler = qla24xx_msix_default, -}; - -static struct qla_init_msix_entry base_rsp_queue = { - .entry = 1, - .index = 1, - .name = "qla2xxx (rsp_q)", - .handler = qla24xx_msix_rsp_q, -}; - -static struct qla_init_msix_entry multi_rsp_queue = { - .entry = 1, - .index = 1, - .name = "qla2xxx (multi_q)", - .handler = qla25xx_msix_rsp_q, +static struct qla_init_msix_entry msix_entries[2] = { + { "qla2xxx (default)", qla24xx_msix_default }, + { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, }; static void @@ -1873,7 +1835,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) int i, ret; struct msix_entry *entries; struct qla_msix_entry *qentry; - struct qla_init_msix_entry *msix_queue; entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, GFP_KERNEL); @@ -1900,7 +1861,7 @@ msix_failed: ha->msix_count, ret); goto msix_out; } - ha->max_queues = ha->msix_count - 1; + ha->max_rsp_queues = ha->msix_count - 1; } ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * ha->msix_count, GFP_KERNEL); @@ -1918,45 +1879,27 @@ msix_failed: qentry->rsp = NULL; } - /* Enable MSI-X for AENs for queue 0 */ - qentry = &ha->msix_entries[0]; - ret = request_irq(qentry->vector, base_queue.handler, 0, - base_queue.name, rsp); - if (ret) { - qla_printk(KERN_WARNING, ha, + /* Enable MSI-X vectors for the base queue */ + for (i = 0; i < 2; i++) { + qentry = &ha->msix_entries[i]; + ret = request_irq(qentry->vector, msix_entries[i].handler, + 0, msix_entries[i].name, rsp); + if (ret) { + qla_printk(KERN_WARNING, ha, "MSI-X: Unable to register handler -- %x/%d.\n", qentry->vector, ret); - qla24xx_disable_msix(ha); - goto msix_out; + qla24xx_disable_msix(ha); + ha->mqenable = 0; + goto msix_out; + } + qentry->have_irq = 1; + qentry->rsp = rsp; + rsp->msix = qentry; } - qentry->have_irq = 1; - qentry->rsp = rsp; /* Enable MSI-X vector for response queue update for queue 0 */ - if (ha->max_queues > 1 && ha->mqiobase) { + if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) ha->mqenable = 1; - msix_queue = &multi_rsp_queue; - qla_printk(KERN_INFO, ha, - "MQ enabled, Number of Queue Resources: %d \n", - ha->max_queues); - } else { - ha->mqenable = 0; - msix_queue = &base_rsp_queue; - } - - qentry = &ha->msix_entries[1]; - ret = request_irq(qentry->vector, msix_queue->handler, 0, - msix_queue->name, rsp); - if (ret) { - qla_printk(KERN_WARNING, ha, - "MSI-X: Unable to register handler -- %x/%d.\n", - qentry->vector, ret); - qla24xx_disable_msix(ha); - ha->mqenable = 0; - goto msix_out; - } - qentry->have_irq = 1; - qentry->rsp = rsp; msix_out: kfree(entries); @@ -2063,35 +2006,11 @@ qla2x00_free_irqs(scsi_qla_host_t *vha) } } -static struct scsi_qla_host * -qla2x00_get_rsp_host(struct rsp_que *rsp) -{ - srb_t *sp; - struct qla_hw_data *ha = rsp->hw; - struct scsi_qla_host *vha = NULL; - struct sts_entry_24xx *pkt; - struct req_que *req; - - if (rsp->id) { - pkt = (struct sts_entry_24xx *) rsp->ring_ptr; - req = rsp->req; - if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) { - sp = req->outstanding_cmds[pkt->handle]; - if (sp) - vha = sp->fcport->vha; - } - } - if (!vha) - /* handle it in base queue */ - vha = pci_get_drvdata(ha->pdev); - - return vha; -} int qla25xx_request_irq(struct rsp_que *rsp) { struct qla_hw_data *ha = rsp->hw; - struct qla_init_msix_entry *intr = &multi_rsp_queue; + struct qla_init_msix_entry *intr = &msix_entries[2]; struct qla_msix_entry *msix = rsp->msix; int ret; @@ -2106,3 +2025,30 @@ int qla25xx_request_irq(struct rsp_que *rsp) msix->rsp = rsp; return ret; } + +struct scsi_qla_host * +qla25xx_get_host(struct rsp_que *rsp) +{ + srb_t *sp; + struct qla_hw_data *ha = rsp->hw; + struct scsi_qla_host *vha = NULL; + struct sts_entry_24xx *pkt; + struct req_que *req; + uint16_t que; + uint32_t handle; + + pkt = (struct sts_entry_24xx *) rsp->ring_ptr; + que = MSW(pkt->handle); + handle = (uint32_t) LSW(pkt->handle); + req = ha->req_q_map[que]; + if (handle < MAX_OUTSTANDING_COMMANDS) { + sp = req->outstanding_cmds[handle]; + if (sp) + return sp->fcport->vha; + else + goto base_que; + } +base_que: + vha = pci_get_drvdata(ha->pdev); + return vha; +} diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 4f7e94c4daaa..bfdc89f8569b 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -748,20 +748,20 @@ qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, * Kernel context. */ int -qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req) +qla2x00_abort_command(srb_t *sp) { unsigned long flags = 0; - fc_port_t *fcport; int rval; uint32_t handle = 0; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; + fc_port_t *fcport = sp->fcport; + scsi_qla_host_t *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; + struct req_que *req = vha->req; DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no)); - fcport = sp->fcport; - spin_lock_irqsave(&ha->hardware_lock, flags); for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { if (req->outstanding_cmds[handle] == sp) @@ -800,7 +800,7 @@ qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req) } int -qla2x00_abort_target(struct fc_port *fcport, unsigned int l) +qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag) { int rval, rval2; mbx_cmd_t mc; @@ -813,8 +813,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l) l = l; vha = fcport->vha; - req = vha->hw->req_q_map[0]; - rsp = vha->hw->rsp_q_map[0]; + req = vha->hw->req_q_map[tag]; + rsp = vha->hw->rsp_q_map[tag]; mcp->mb[0] = MBC_ABORT_TARGET; mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) { @@ -850,7 +850,7 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l) } int -qla2x00_lun_reset(struct fc_port *fcport, unsigned int l) +qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag) { int rval, rval2; mbx_cmd_t mc; @@ -862,8 +862,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l) DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no)); vha = fcport->vha; - req = vha->hw->req_q_map[0]; - rsp = vha->hw->rsp_q_map[0]; + req = vha->hw->req_q_map[tag]; + rsp = vha->hw->rsp_q_map[tag]; mcp->mb[0] = MBC_LUN_RESET; mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) @@ -1492,9 +1492,14 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, dma_addr_t lg_dma; uint32_t iop[2]; struct qla_hw_data *ha = vha->hw; + struct req_que *req; + struct rsp_que *rsp; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + req = vha->req; + rsp = req->rsp; + lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); if (lg == NULL) { DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n", @@ -1505,6 +1510,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; lg->entry_count = 1; + lg->handle = MAKE_HANDLE(req->id, lg->handle); lg->nport_handle = cpu_to_le16(loop_id); lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI); if (opt & BIT_0) @@ -1753,6 +1759,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, struct logio_entry_24xx *lg; dma_addr_t lg_dma; struct qla_hw_data *ha = vha->hw; + struct req_que *req; + struct rsp_que *rsp; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); @@ -1764,8 +1772,14 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, } memset(lg, 0, sizeof(struct logio_entry_24xx)); + if (ql2xmaxqueues > 1) + req = ha->req_q_map[0]; + else + req = vha->req; + rsp = req->rsp; lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; lg->entry_count = 1; + lg->handle = MAKE_HANDLE(req->id, lg->handle); lg->nport_handle = cpu_to_le16(loop_id); lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); @@ -2204,21 +2218,21 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, } int -qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req) +qla24xx_abort_command(srb_t *sp) { int rval; - fc_port_t *fcport; unsigned long flags = 0; struct abort_entry_24xx *abt; dma_addr_t abt_dma; uint32_t handle; + fc_port_t *fcport = sp->fcport; + struct scsi_qla_host *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; + struct req_que *req = sp->que; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); - fcport = sp->fcport; - spin_lock_irqsave(&ha->hardware_lock, flags); for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { if (req->outstanding_cmds[handle] == sp) @@ -2240,6 +2254,7 @@ qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req) abt->entry_type = ABORT_IOCB_TYPE; abt->entry_count = 1; + abt->handle = MAKE_HANDLE(req->id, abt->handle); abt->nport_handle = cpu_to_le16(fcport->loop_id); abt->handle_to_abort = handle; abt->port_id[0] = fcport->d_id.b.al_pa; @@ -2281,7 +2296,7 @@ struct tsk_mgmt_cmd { static int __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, - unsigned int l) + unsigned int l, int tag) { int rval, rval2; struct tsk_mgmt_cmd *tsk; @@ -2295,8 +2310,8 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, vha = fcport->vha; ha = vha->hw; - req = ha->req_q_map[0]; - rsp = ha->rsp_q_map[0]; + req = vha->req; + rsp = req->rsp; tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); if (tsk == NULL) { DEBUG2_3(printk("%s(%ld): failed to allocate Task Management " @@ -2307,6 +2322,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; tsk->p.tsk.entry_count = 1; + tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle); tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); tsk->p.tsk.control_flags = cpu_to_le32(type); @@ -2353,15 +2369,15 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, } int -qla24xx_abort_target(struct fc_port *fcport, unsigned int l) +qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag) { - return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l); + return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag); } int -qla24xx_lun_reset(struct fc_port *fcport, unsigned int l) +qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag) { - return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l); + return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag); } int @@ -3150,6 +3166,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) WRT_REG_DWORD(®->req_q_in, 0); WRT_REG_DWORD(®->req_q_out, 0); } + req->req_q_in = ®->req_q_in; + req->req_q_out = ®->req_q_out; spin_unlock_irqrestore(&ha->hardware_lock, flags); rval = qla2x00_mailbox_command(vha, mcp); @@ -3176,7 +3194,6 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) mcp->mb[6] = MSW(MSD(rsp->dma)); mcp->mb[7] = LSW(MSD(rsp->dma)); mcp->mb[5] = rsp->length; - mcp->mb[11] = rsp->vp_idx; mcp->mb[14] = rsp->msix->entry; mcp->mb[13] = rsp->rid; @@ -3188,7 +3205,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) mcp->mb[8] = 0; /* que out ptr index */ mcp->mb[9] = 0; - mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7 + mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->flags = MBX_DMA_OUT; diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 51716c7e3008..9c08479c3e1b 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -398,9 +398,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); - memset(vha->req_ques, 0, sizeof(vha->req_ques)); - vha->req_ques[0] = ha->req_q_map[0]->id; - host->can_queue = ha->req_q_map[0]->length + 128; + vha->req = base_vha->req; + host->can_queue = base_vha->req->length + 128; host->this_id = 255; host->cmd_per_lun = 3; host->max_cmd_len = MAX_CMDSZ; @@ -515,76 +514,53 @@ int qla25xx_update_req_que(struct scsi_qla_host *vha, uint8_t que, uint8_t qos) /* Delete all queues for a given vhost */ int -qla25xx_delete_queues(struct scsi_qla_host *vha, uint8_t que_no) +qla25xx_delete_queues(struct scsi_qla_host *vha) { int cnt, ret = 0; struct req_que *req = NULL; struct rsp_que *rsp = NULL; struct qla_hw_data *ha = vha->hw; - if (que_no) { - /* Delete request queue */ - req = ha->req_q_map[que_no]; + /* Delete request queues */ + for (cnt = 1; cnt < ha->max_req_queues; cnt++) { + req = ha->req_q_map[cnt]; if (req) { - rsp = req->rsp; ret = qla25xx_delete_req_que(vha, req); if (ret != QLA_SUCCESS) { qla_printk(KERN_WARNING, ha, - "Couldn't delete req que %d\n", req->id); + "Couldn't delete req que %d\n", + req->id); return ret; } - /* Delete associated response queue */ - if (rsp) { - ret = qla25xx_delete_rsp_que(vha, rsp); - if (ret != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, - "Couldn't delete rsp que %d\n", - rsp->id); - return ret; - } - } } - } else { /* delete all queues of this host */ - for (cnt = 0; cnt < QLA_MAX_HOST_QUES; cnt++) { - /* Delete request queues */ - req = ha->req_q_map[vha->req_ques[cnt]]; - if (req && req->id) { - rsp = req->rsp; - ret = qla25xx_delete_req_que(vha, req); - if (ret != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, - "Couldn't delete req que %d\n", - vha->req_ques[cnt]); - return ret; - } - vha->req_ques[cnt] = ha->req_q_map[0]->id; - /* Delete associated response queue */ - if (rsp && rsp->id) { - ret = qla25xx_delete_rsp_que(vha, rsp); - if (ret != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, - "Couldn't delete rsp que %d\n", - rsp->id); - return ret; - } - } + } + + /* Delete response queues */ + for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { + rsp = ha->rsp_q_map[cnt]; + if (rsp) { + ret = qla25xx_delete_rsp_que(vha, rsp); + if (ret != QLA_SUCCESS) { + qla_printk(KERN_WARNING, ha, + "Couldn't delete rsp que %d\n", + rsp->id); + return ret; } } } - qla_printk(KERN_INFO, ha, "Queues deleted for vport:%d\n", - vha->vp_idx); return ret; } int qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, - uint8_t vp_idx, uint16_t rid, uint8_t rsp_que, uint8_t qos) + uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos) { int ret = 0; struct req_que *req = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); uint16_t que_id = 0; device_reg_t __iomem *reg; + uint32_t cnt; req = kzalloc(sizeof(struct req_que), GFP_KERNEL); if (req == NULL) { @@ -604,8 +580,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, } mutex_lock(&ha->vport_lock); - que_id = find_first_zero_bit(ha->req_qid_map, ha->max_queues); - if (que_id >= ha->max_queues) { + que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues); + if (que_id >= ha->max_req_queues) { mutex_unlock(&ha->vport_lock); qla_printk(KERN_INFO, ha, "No resources to create " "additional request queue\n"); @@ -617,10 +593,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, req->vp_idx = vp_idx; req->qos = qos; - if (ha->rsp_q_map[rsp_que]) { + if (rsp_que < 0) + req->rsp = NULL; + else req->rsp = ha->rsp_q_map[rsp_que]; - req->rsp->req = req; - } /* Use alternate PCI bus number */ if (MSB(req->rid)) options |= BIT_4; @@ -628,13 +604,16 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, if (LSB(req->rid)) options |= BIT_5; req->options = options; + + for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) + req->outstanding_cmds[cnt] = NULL; + req->current_outstanding_cmd = 1; + req->ring_ptr = req->ring; req->ring_index = 0; req->cnt = req->length; req->id = que_id; reg = ISP_QUE_REG(ha, que_id); - req->req_q_in = ®->isp25mq.req_q_in; - req->req_q_out = ®->isp25mq.req_q_out; req->max_q_depth = ha->req_q_map[0]->max_q_depth; mutex_unlock(&ha->vport_lock); @@ -657,7 +636,7 @@ que_failed: /* create response queue */ int qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, - uint8_t vp_idx, uint16_t rid) + uint8_t vp_idx, uint16_t rid, int req) { int ret = 0; struct rsp_que *rsp = NULL; @@ -672,7 +651,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, goto que_failed; } - rsp->length = RESPONSE_ENTRY_CNT_2300; + rsp->length = RESPONSE_ENTRY_CNT_MQ; rsp->ring = dma_alloc_coherent(&ha->pdev->dev, (rsp->length + 1) * sizeof(response_t), &rsp->dma, GFP_KERNEL); @@ -683,8 +662,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, } mutex_lock(&ha->vport_lock); - que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues); - if (que_id >= ha->max_queues) { + que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues); + if (que_id >= ha->max_rsp_queues) { mutex_unlock(&ha->vport_lock); qla_printk(KERN_INFO, ha, "No resources to create " "additional response queue\n"); @@ -708,8 +687,6 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, if (LSB(rsp->rid)) options |= BIT_5; rsp->options = options; - rsp->ring_ptr = rsp->ring; - rsp->ring_index = 0; rsp->id = que_id; reg = ISP_QUE_REG(ha, que_id); rsp->rsp_q_in = ®->isp25mq.rsp_q_in; @@ -728,9 +705,12 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, mutex_unlock(&ha->vport_lock); goto que_failed; } + if (req >= 0) + rsp->req = ha->req_q_map[req]; + else + rsp->req = NULL; qla2x00_init_response_q_entries(rsp); - return rsp->id; que_failed: @@ -744,14 +724,16 @@ qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos) uint16_t options = 0; uint8_t ret = 0; struct qla_hw_data *ha = vha->hw; + struct rsp_que *rsp; options |= BIT_1; - ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0); + ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0, -1); if (!ret) { qla_printk(KERN_WARNING, ha, "Response Que create failed\n"); return ret; } else qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret); + rsp = ha->rsp_q_map[ret]; options = 0; if (qos & BIT_7) @@ -759,10 +741,11 @@ qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos) ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret, qos & ~BIT_7); if (ret) { - vha->req_ques[0] = ret; + vha->req = ha->req_q_map[ret]; qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret); } else qla_printk(KERN_WARNING, ha, "Request Que create failed\n"); + rsp->req = ha->req_q_map[ret]; return ret; } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 29234ba42b42..e2647e02dac9 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -187,7 +187,7 @@ static void qla2x00_sp_free_dma(srb_t *); /* -------------------------------------------------------------------------- */ static int qla2x00_alloc_queues(struct qla_hw_data *ha) { - ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_queues, + ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues, GFP_KERNEL); if (!ha->req_q_map) { qla_printk(KERN_WARNING, ha, @@ -195,7 +195,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha) goto fail_req_map; } - ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_queues, + ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues, GFP_KERNEL); if (!ha->rsp_q_map) { qla_printk(KERN_WARNING, ha, @@ -213,16 +213,8 @@ fail_req_map: return -ENOMEM; } -static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req, - struct rsp_que *rsp) +static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) { - if (rsp && rsp->ring) - dma_free_coherent(&ha->pdev->dev, - (rsp->length + 1) * sizeof(response_t), - rsp->ring, rsp->dma); - - kfree(rsp); - rsp = NULL; if (req && req->ring) dma_free_coherent(&ha->pdev->dev, (req->length + 1) * sizeof(request_t), @@ -232,22 +224,36 @@ static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req, req = NULL; } +static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) +{ + if (rsp && rsp->ring) + dma_free_coherent(&ha->pdev->dev, + (rsp->length + 1) * sizeof(response_t), + rsp->ring, rsp->dma); + + kfree(rsp); + rsp = NULL; +} + static void qla2x00_free_queues(struct qla_hw_data *ha) { struct req_que *req; struct rsp_que *rsp; int cnt; - for (cnt = 0; cnt < ha->max_queues; cnt++) { - rsp = ha->rsp_q_map[cnt]; + for (cnt = 0; cnt < ha->max_req_queues; cnt++) { req = ha->req_q_map[cnt]; - qla2x00_free_que(ha, req, rsp); + qla2x00_free_req_que(ha, req); } - kfree(ha->rsp_q_map); - ha->rsp_q_map = NULL; - kfree(ha->req_q_map); ha->req_q_map = NULL; + + for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { + rsp = ha->rsp_q_map[cnt]; + qla2x00_free_rsp_que(ha, rsp); + } + kfree(ha->rsp_q_map); + ha->rsp_q_map = NULL; } static char * @@ -612,7 +618,7 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha) void qla2x00_abort_fcport_cmds(fc_port_t *fcport) { - int cnt, que, id; + int cnt; unsigned long flags; srb_t *sp; scsi_qla_host_t *vha = fcport->vha; @@ -620,32 +626,27 @@ qla2x00_abort_fcport_cmds(fc_port_t *fcport) struct req_que *req; spin_lock_irqsave(&ha->hardware_lock, flags); - for (que = 0; que < QLA_MAX_HOST_QUES; que++) { - id = vha->req_ques[que]; - req = ha->req_q_map[id]; - if (!req) + req = vha->req; + for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { + sp = req->outstanding_cmds[cnt]; + if (!sp) + continue; + if (sp->fcport != fcport) continue; - for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { - sp = req->outstanding_cmds[cnt]; - if (!sp) - continue; - if (sp->fcport != fcport) - continue; - spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (ha->isp_ops->abort_command(vha, sp, req)) { + spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (ha->isp_ops->abort_command(sp)) { + DEBUG2(qla_printk(KERN_WARNING, ha, + "Abort failed -- %lx\n", + sp->cmd->serial_number)); + } else { + if (qla2x00_eh_wait_on_command(sp->cmd) != + QLA_SUCCESS) DEBUG2(qla_printk(KERN_WARNING, ha, - "Abort failed -- %lx\n", + "Abort failed while waiting -- %lx\n", sp->cmd->serial_number)); - } else { - if (qla2x00_eh_wait_on_command(sp->cmd) != - QLA_SUCCESS) - DEBUG2(qla_printk(KERN_WARNING, ha, - "Abort failed while waiting -- %lx\n", - sp->cmd->serial_number)); - } - spin_lock_irqsave(&ha->hardware_lock, flags); } + spin_lock_irqsave(&ha->hardware_lock, flags); } spin_unlock_irqrestore(&ha->hardware_lock, flags); } @@ -726,7 +727,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) " pid=%ld.\n", __func__, vha->host_no, sp, serial)); spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (ha->isp_ops->abort_command(vha, sp, req)) { + if (ha->isp_ops->abort_command(sp)) { DEBUG2(printk("%s(%ld): abort_command " "mbx failed.\n", __func__, vha->host_no)); ret = FAILED; @@ -820,7 +821,7 @@ static char *reset_errors[] = { static int __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, - struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int)) + struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int, int)) { scsi_qla_host_t *vha = shost_priv(cmd->device->host); fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; @@ -841,7 +842,8 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) goto eh_reset_failed; err = 2; - if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS) + if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1) + != QLA_SUCCESS) goto eh_reset_failed; err = 3; if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, @@ -1065,7 +1067,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) if (fcport->port_type != FCT_TARGET) continue; - ret = ha->isp_ops->target_reset(fcport, 0); + ret = ha->isp_ops->target_reset(fcport, 0, 0); if (ret != QLA_SUCCESS) { DEBUG2_3(printk("%s(%ld): bus_reset failed: " "target_reset=%d d_id=%x.\n", __func__, @@ -1089,7 +1091,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) struct req_que *req; spin_lock_irqsave(&ha->hardware_lock, flags); - for (que = 0; que < ha->max_queues; que++) { + for (que = 0; que < ha->max_req_queues; que++) { req = ha->req_q_map[que]; if (!req) continue; @@ -1124,7 +1126,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev) scsi_qla_host_t *vha = shost_priv(sdev->host); struct qla_hw_data *ha = vha->hw; struct fc_rport *rport = starget_to_rport(sdev->sdev_target); - struct req_que *req = ha->req_q_map[vha->req_ques[0]]; + struct req_que *req = vha->req; if (sdev->tagged_supported) scsi_activate_tcq(sdev, req->max_q_depth); @@ -1572,8 +1574,9 @@ skip_pio: } /* Determine queue resources */ - ha->max_queues = 1; - if (ql2xmaxqueues <= 1 || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) + ha->max_req_queues = ha->max_rsp_queues = 1; + if (ql2xmaxqueues <= 1 && + (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) goto mqiobase_exit; ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), pci_resource_len(ha->pdev, 3)); @@ -1581,20 +1584,17 @@ skip_pio: /* Read MSIX vector size of the board */ pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); ha->msix_count = msix; - /* Max queues are bounded by available msix vectors */ - /* queue 0 uses two msix vectors */ - if (ha->msix_count - 1 < ql2xmaxqueues) - ha->max_queues = ha->msix_count - 1; - else if (ql2xmaxqueues > QLA_MQ_SIZE) - ha->max_queues = QLA_MQ_SIZE; - else - ha->max_queues = ql2xmaxqueues; - qla_printk(KERN_INFO, ha, - "MSI-X vector count: %d\n", msix); - } + if (ql2xmaxqueues > 1) { + ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ? + QLA_MQ_SIZE : ql2xmaxqueues; + DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no" + " of request queues:%d\n", ha->max_req_queues)); + } + } else + qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n"); mqiobase_exit: - ha->msix_count = ha->max_queues + 1; + ha->msix_count = ha->max_rsp_queues + 1; return (0); iospace_error_exit: @@ -1804,14 +1804,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ret = -ENOMEM; qla2x00_mem_free(ha); - qla2x00_free_que(ha, req, rsp); + qla2x00_free_req_que(ha, req); + qla2x00_free_rsp_que(ha, rsp); goto probe_hw_failed; } pci_set_drvdata(pdev, base_vha); host = base_vha->host; - base_vha->req_ques[0] = req->id; + base_vha->req = req; host->can_queue = req->length + 128; if (IS_QLA2XXX_MIDTYPE(ha)) base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx; @@ -1842,7 +1843,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) } ha->rsp_q_map[0] = rsp; ha->req_q_map[0] = req; - + rsp->req = req; + req->rsp = rsp; + set_bit(0, ha->req_qid_map); + set_bit(0, ha->rsp_qid_map); /* FWI2-capable only. */ req->req_q_in = &ha->iobase->isp24.req_q_in; req->req_q_out = &ha->iobase->isp24.req_q_out; @@ -1918,8 +1922,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) return 0; probe_init_failed: - qla2x00_free_que(ha, req, rsp); - ha->max_queues = 0; + qla2x00_free_req_que(ha, req); + qla2x00_free_rsp_que(ha, rsp); + ha->max_req_queues = ha->max_rsp_queues = 0; probe_failed: if (base_vha->timer_active) @@ -2018,6 +2023,8 @@ qla2x00_free_device(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; + qla25xx_delete_queues(vha); + if (ha->flags.fce_enabled) qla2x00_disable_fce_trace(vha, NULL, NULL); diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 81187a0246cd..2a9b3f83ba67 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -920,6 +920,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) entry = data + sizeof(struct qla_npiv_header); cnt = le16_to_cpu(hdr.entries); + ha->flex_port_count = cnt; for (i = 0; cnt; cnt--, entry++, i++) { uint16_t flags; struct fc_vport_identifiers vid; -- cgit v1.2.3 From 68ca949cdb04b4dc71451a999148fbc5f187a220 Mon Sep 17 00:00:00 2001 From: Anirban Chakraborty Date: Mon, 6 Apr 2009 22:33:41 -0700 Subject: [SCSI] qla2xxx: Add CPU affinity support. Set the module parameter ql2xmultique_tag to 1 to enable this feature. In this mode, the total number of response queues created is equal to the number of online cpus. Turning the block layer's rq_affinity mode on enables requests to be routed to the proper cpu and at the same time it enables completion of the IO in a response queue that is affined to the cpu in the request path. Signed-off-by: Anirban Chakraborty Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_attr.c | 4 +-- drivers/scsi/qla2xxx/qla_def.h | 2 ++ drivers/scsi/qla2xxx/qla_iocb.c | 22 ++++++++++-- drivers/scsi/qla2xxx/qla_isr.c | 22 +++++++++++- drivers/scsi/qla2xxx/qla_mbx.c | 10 ++++-- drivers/scsi/qla2xxx/qla_mid.c | 11 ++++++ drivers/scsi/qla2xxx/qla_os.c | 78 +++++++++++++++++++++++++++++++++++++++-- 7 files changed, 140 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index bda6658d4fbf..f3536e56dce4 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1531,7 +1531,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) qla24xx_vport_disable(fc_vport, disable); ret = 0; - if (ha->cur_vport_count <= ha->flex_port_count + if (ha->cur_vport_count <= ha->flex_port_count || ql2xmultique_tag || ha->max_req_queues == 1 || !ha->npiv_info) goto vport_queue; /* Create a request queue in QoS mode for the vport */ @@ -1599,7 +1599,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) vha->host_no, vha->vp_idx, vha)); } - if (vha->req->id) { + if (vha->req->id && !ql2xmultique_tag) { if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) qla_printk(KERN_WARNING, ha, "Queue delete failed.\n"); diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 57d659cf99ee..09190ba411fd 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2171,6 +2171,7 @@ struct rsp_que { struct qla_msix_entry *msix; struct req_que *req; srb_t *status_srb; /* status continuation entry */ + struct work_struct q_work; }; /* Request queue data structure */ @@ -2539,6 +2540,7 @@ struct qla_hw_data { struct qla_chip_state_84xx *cs84xx; struct qla_statistics qla_stats; struct isp_operations *isp_ops; + struct workqueue_struct *wq; }; /* diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 94b69d86482d..7b15ded991cb 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -15,6 +15,7 @@ static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *, struct rsp_que *rsp); static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *); +static void qla25xx_set_que(srb_t *, struct req_que **, struct rsp_que **); /** * qla2x00_get_cmd_direction() - Determine control_flag data direction. * @cmd: SCSI command @@ -726,8 +727,7 @@ qla24xx_start_scsi(srb_t *sp) /* Setup device pointers. */ ret = 0; - req = vha->req; - rsp = ha->rsp_q_map[0]; + qla25xx_set_que(sp, &req, &rsp); sp->que = req; /* So we know we haven't pci_map'ed anything yet */ @@ -850,3 +850,21 @@ queuing_error: return QLA_FUNCTION_FAILED; } + +static void qla25xx_set_que(srb_t *sp, struct req_que **req, + struct rsp_que **rsp) +{ + struct scsi_cmnd *cmd = sp->cmd; + struct scsi_qla_host *vha = sp->fcport->vha; + struct qla_hw_data *ha = sp->fcport->vha->hw; + int affinity = cmd->request->cpu; + + if (ql2xmultique_tag && affinity >= 0 && + affinity < ha->max_rsp_queues - 1) { + *rsp = ha->rsp_q_map[affinity + 1]; + *req = ha->req_q_map[1]; + } else { + *req = vha->req; + *rsp = ha->rsp_q_map[0]; + } +} diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index c8e906c702a1..41e50c2bec0f 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1717,6 +1717,25 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) return IRQ_HANDLED; } +static irqreturn_t +qla25xx_msix_rsp_q(int irq, void *dev_id) +{ + struct qla_hw_data *ha; + struct rsp_que *rsp; + + rsp = (struct rsp_que *) dev_id; + if (!rsp) { + printk(KERN_INFO + "%s(): NULL response queue pointer\n", __func__); + return IRQ_NONE; + } + ha = rsp->hw; + + queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); + + return IRQ_HANDLED; +} + static irqreturn_t qla24xx_msix_default(int irq, void *dev_id) { @@ -1806,9 +1825,10 @@ struct qla_init_msix_entry { irq_handler_t handler; }; -static struct qla_init_msix_entry msix_entries[2] = { +static struct qla_init_msix_entry msix_entries[3] = { { "qla2xxx (default)", qla24xx_msix_default }, { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, + { "qla2xxx (multiq)", qla25xx_msix_rsp_q }, }; static void diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index bfdc89f8569b..366522e8a766 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -1497,7 +1497,10 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); - req = vha->req; + if (ql2xmultique_tag) + req = ha->req_q_map[0]; + else + req = vha->req; rsp = req->rsp; lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); @@ -2311,7 +2314,10 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, vha = fcport->vha; ha = vha->hw; req = vha->req; - rsp = req->rsp; + if (ql2xmultique_tag) + rsp = ha->rsp_q_map[tag + 1]; + else + rsp = req->rsp; tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); if (tsk == NULL) { DEBUG2_3(printk("%s(%ld): failed to allocate Task Management " diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 9c08479c3e1b..650bcef08f2a 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -633,6 +633,15 @@ que_failed: return 0; } +static void qla_do_work(struct work_struct *work) +{ + struct rsp_que *rsp = container_of(work, struct rsp_que, q_work); + struct scsi_qla_host *vha; + + vha = qla25xx_get_host(rsp); + qla24xx_process_response_queue(vha, rsp); +} + /* create response queue */ int qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, @@ -711,6 +720,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, rsp->req = NULL; qla2x00_init_response_q_entries(rsp); + if (rsp->hw->wq) + INIT_WORK(&rsp->q_work, qla_do_work); return rsp->id; que_failed: diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index e2647e02dac9..d6817df95e30 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -96,6 +96,13 @@ MODULE_PARM_DESC(ql2xmaxqueues, "Enables MQ settings " "Default is 1 for single queue. Set it to number \ of queues in MQ mode."); + +int ql2xmultique_tag; +module_param(ql2xmultique_tag, int, S_IRUGO|S_IRUSR); +MODULE_PARM_DESC(ql2xmultique_tag, + "Enables CPU affinity settings for the driver " + "Default is 0 for no affinity of request and response IO. " + "Set it to 1 to turn on the cpu affinity."); /* * SCSI host template entry points */ @@ -256,6 +263,47 @@ static void qla2x00_free_queues(struct qla_hw_data *ha) ha->rsp_q_map = NULL; } +static int qla25xx_setup_mode(struct scsi_qla_host *vha) +{ + uint16_t options = 0; + int ques, req, ret; + struct qla_hw_data *ha = vha->hw; + + if (ql2xmultique_tag) { + /* CPU affinity mode */ + ha->wq = create_workqueue("qla2xxx_wq"); + /* create a request queue for IO */ + options |= BIT_7; + req = qla25xx_create_req_que(ha, options, 0, 0, -1, + QLA_DEFAULT_QUE_QOS); + if (!req) { + qla_printk(KERN_WARNING, ha, + "Can't create request queue\n"); + goto fail; + } + vha->req = ha->req_q_map[req]; + options |= BIT_1; + for (ques = 1; ques < ha->max_rsp_queues; ques++) { + ret = qla25xx_create_rsp_que(ha, options, 0, 0, req); + if (!ret) { + qla_printk(KERN_WARNING, ha, + "Response Queue create failed\n"); + goto fail2; + } + } + DEBUG2(qla_printk(KERN_INFO, ha, + "CPU affinity mode enabled, no. of response" + " queues:%d, no. of request queues:%d\n", + ha->max_rsp_queues, ha->max_req_queues)); + } + return 0; +fail2: + qla25xx_delete_queues(vha); +fail: + ha->mqenable = 0; + return 1; +} + static char * qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str) { @@ -998,6 +1046,9 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) if (qla2x00_vp_abort_isp(vha)) goto eh_host_reset_lock; } else { + if (ha->wq) + flush_workqueue(ha->wq); + set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); if (qla2x00_abort_isp(base_vha)) { clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); @@ -1521,6 +1572,7 @@ qla2x00_iospace_config(struct qla_hw_data *ha) { resource_size_t pio; uint16_t msix; + int cpus; if (pci_request_selected_regions(ha->pdev, ha->bars, QLA2XXX_DRIVER_NAME)) { @@ -1575,7 +1627,7 @@ skip_pio: /* Determine queue resources */ ha->max_req_queues = ha->max_rsp_queues = 1; - if (ql2xmaxqueues <= 1 && + if ((ql2xmaxqueues <= 1 || ql2xmultique_tag < 1) && (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) goto mqiobase_exit; ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), @@ -1584,12 +1636,21 @@ skip_pio: /* Read MSIX vector size of the board */ pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); ha->msix_count = msix; - if (ql2xmaxqueues > 1) { + /* Max queues are bounded by available msix vectors */ + /* queue 0 uses two msix vectors */ + if (ql2xmultique_tag) { + cpus = num_online_cpus(); + ha->max_rsp_queues = (ha->msix_count - 1 - cpus) ? + (cpus + 1) : (ha->msix_count - 1); + ha->max_req_queues = 2; + } else if (ql2xmaxqueues > 1) { ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ? QLA_MQ_SIZE : ql2xmaxqueues; DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no" " of request queues:%d\n", ha->max_req_queues)); } + qla_printk(KERN_INFO, ha, + "MSI-X vector count: %d\n", msix); } else qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n"); @@ -1871,6 +1932,12 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) goto probe_failed; } + if (ha->mqenable) + if (qla25xx_setup_mode(base_vha)) + qla_printk(KERN_WARNING, ha, + "Can't create queues, falling back to single" + " queue mode\n"); + /* * Startup the kernel thread for this host adapter */ @@ -1982,6 +2049,13 @@ qla2x00_remove_one(struct pci_dev *pdev) base_vha->flags.online = 0; + /* Flush the work queue and remove it */ + if (ha->wq) { + flush_workqueue(ha->wq); + destroy_workqueue(ha->wq); + ha->wq = NULL; + } + /* Kill the kernel thread for this host */ if (ha->dpc_thread) { struct task_struct *t = ha->dpc_thread; -- cgit v1.2.3 From 67c2e93ae7465a3e279503ceddd7bd153d74bcf8 Mon Sep 17 00:00:00 2001 From: Anirban Chakraborty Date: Mon, 6 Apr 2009 22:33:42 -0700 Subject: [SCSI] qla2xxx: Remove reference to request queue from scsi request block. srbs used to maintain a reference to the request queue on which it was enqueued. This is no longer required as the request queue pointer is now maintained in the scsi host that issues the srb. Signed-off-by: Anirban Chakraborty Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_def.h | 2 -- drivers/scsi/qla2xxx/qla_init.c | 20 ++++++++++++++++---- drivers/scsi/qla2xxx/qla_iocb.c | 22 ++++++++-------------- drivers/scsi/qla2xxx/qla_mbx.c | 2 +- drivers/scsi/qla2xxx/qla_os.c | 6 ++---- 5 files changed, 27 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 09190ba411fd..6911b9b32feb 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -188,7 +188,6 @@ struct req_que; * SCSI Request Block */ typedef struct srb { - struct req_que *que; struct fc_port *fcport; struct scsi_cmnd *cmd; /* Linux SCSI command pkt */ @@ -2010,7 +2009,6 @@ typedef struct vport_params { #define VP_RET_CODE_NOT_FOUND 6 struct qla_hw_data; -struct req_que; struct rsp_que; /* * ISP operations diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 059909c9f29b..4c14cde3295f 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -3177,8 +3177,14 @@ qla2x00_loop_resync(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; uint32_t wait_time; - struct req_que *req = vha->req; - struct rsp_que *rsp = req->rsp; + struct req_que *req; + struct rsp_que *rsp; + + if (ql2xmultique_tag) + req = vha->hw->req_q_map[0]; + else + req = vha->req; + rsp = req->rsp; atomic_set(&vha->loop_state, LOOP_UPDATE); clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); @@ -4163,13 +4169,19 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha) uint16_t mb[MAILBOX_REGISTER_COUNT]; struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); - struct req_que *req = vha->req; - struct rsp_que *rsp = req->rsp; + struct req_que *req; + struct rsp_que *rsp; if (!vha->vp_idx) return -EINVAL; rval = qla2x00_fw_ready(base_vha); + if (ql2xmultique_tag) + req = ha->req_q_map[0]; + else + req = vha->req; + rsp = req->rsp; + if (rval == QLA_SUCCESS) { clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 7b15ded991cb..b4c6010ee5fa 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -93,9 +93,10 @@ qla2x00_calc_iocbs_64(uint16_t dsds) * Returns a pointer to the Continuation Type 0 IOCB packet. */ static inline cont_entry_t * -qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha) +qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha) { cont_entry_t *cont_pkt; + struct req_que *req = vha->req; /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { @@ -121,10 +122,11 @@ qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha) * Returns a pointer to the continuation type 1 IOCB packet. */ static inline cont_a64_entry_t * -qla2x00_prep_cont_type1_iocb(struct req_que *req, scsi_qla_host_t *vha) +qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha) { cont_a64_entry_t *cont_pkt; + struct req_que *req = vha->req; /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { @@ -160,7 +162,6 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, struct scsi_cmnd *cmd; struct scatterlist *sg; int i; - struct req_que *req; cmd = sp->cmd; @@ -175,8 +176,6 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, } vha = sp->fcport->vha; - req = sp->que; - cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); /* Three DSDs are available in the Command Type 2 IOCB */ @@ -193,7 +192,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, * Seven DSDs are available in the Continuation * Type 0 IOCB. */ - cont_pkt = qla2x00_prep_cont_type0_iocb(req, vha); + cont_pkt = qla2x00_prep_cont_type0_iocb(vha); cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; avail_dsds = 7; } @@ -221,7 +220,6 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, struct scsi_cmnd *cmd; struct scatterlist *sg; int i; - struct req_que *req; cmd = sp->cmd; @@ -236,8 +234,6 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, } vha = sp->fcport->vha; - req = sp->que; - cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); /* Two DSDs are available in the Command Type 3 IOCB */ @@ -255,7 +251,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, * Five DSDs are available in the Continuation * Type 1 IOCB. */ - cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha); + cont_pkt = qla2x00_prep_cont_type1_iocb(vha); cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; avail_dsds = 5; } @@ -354,7 +350,6 @@ qla2x00_start_scsi(srb_t *sp) /* Build command packet */ req->current_outstanding_cmd = handle; req->outstanding_cmds[handle] = sp; - sp->que = req; sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; req->cnt -= req_cnt; @@ -655,7 +650,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, } vha = sp->fcport->vha; - req = sp->que; + req = vha->req; /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { @@ -686,7 +681,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, * Five DSDs are available in the Continuation * Type 1 IOCB. */ - cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha); + cont_pkt = qla2x00_prep_cont_type1_iocb(vha); cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; avail_dsds = 5; } @@ -728,7 +723,6 @@ qla24xx_start_scsi(srb_t *sp) ret = 0; qla25xx_set_que(sp, &req, &rsp); - sp->que = req; /* So we know we haven't pci_map'ed anything yet */ tot_dsds = 0; diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 366522e8a766..258c39d8f448 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -2232,7 +2232,7 @@ qla24xx_abort_command(srb_t *sp) fc_port_t *fcport = sp->fcport; struct scsi_qla_host *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; - struct req_que *req = sp->que; + struct req_que *req = vha->req; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index d6817df95e30..94e53a5fd9aa 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -441,7 +441,6 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport, sp->fcport = fcport; sp->cmd = cmd; - sp->que = ha->req_q_map[0]; sp->flags = 0; CMD_SP(cmd) = (void *)sp; cmd->scsi_done = done; @@ -742,7 +741,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) unsigned long flags; int wait = 0; struct qla_hw_data *ha = vha->hw; - struct req_que *req; + struct req_que *req = vha->req; srb_t *spt; qla2x00_block_error_handler(cmd); @@ -758,7 +757,6 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) spt = (srb_t *) CMD_SP(cmd); if (!spt) return SUCCESS; - req = spt->que; /* Check active list for command command. */ spin_lock_irqsave(&ha->hardware_lock, flags); @@ -826,7 +824,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, return status; spin_lock_irqsave(&ha->hardware_lock, flags); - req = sp->que; + req = vha->req; for (cnt = 1; status == QLA_SUCCESS && cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { sp = req->outstanding_cmds[cnt]; -- cgit v1.2.3 From 8f979751367b9975fe606bce6a64b9d871dcfcfa Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Mon, 6 Apr 2009 22:33:43 -0700 Subject: [SCSI] qla2xxx: Correct typo in read_nvram() callback. Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_attr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index f3536e56dce4..fc30f8e2f467 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -97,7 +97,7 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj, return 0; if (IS_NOCACHE_VPD_TYPE(ha)) - ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_nvram << 2, + ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2, ha->nvram_size); return memory_read_from_buffer(buf, count, &off, ha->nvram, ha->nvram_size); -- cgit v1.2.3 From fc3ea9bcb86a1c5126807f747291563e08405944 Mon Sep 17 00:00:00 2001 From: Harish Zunjarrao Date: Mon, 6 Apr 2009 22:33:44 -0700 Subject: [SCSI] qla2xxx: Correct hard-coded address of a second-port's NVRAM. Signed-off-by: Harish Zunjarrao Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_fw.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 96ccb9642ba0..f389f3da0bab 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -1711,7 +1711,7 @@ struct ex_init_cb_81xx { #define FA_VPD0_ADDR_81 0xD0000 #define FA_VPD1_ADDR_81 0xD0400 #define FA_NVRAM0_ADDR_81 0xD0080 -#define FA_NVRAM1_ADDR_81 0xD0480 +#define FA_NVRAM1_ADDR_81 0xD0180 #define FA_FEATURE_ADDR_81 0xD4000 #define FA_FLASH_DESCR_ADDR_81 0xD8000 #define FA_FLASH_LAYOUT_ADDR_81 0xD8400 -- cgit v1.2.3 From 7d0dba174af217c73931532adf6bffb91d16c40f Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Mon, 6 Apr 2009 22:33:45 -0700 Subject: [SCSI] qla2xxx: Restrict model-name/description device-table usage. Information present in static table is only valid for pre-ISP25xx adapters. Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_init.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 4c14cde3295f..a4a6a146fccd 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1538,6 +1538,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, char *st, *en; uint16_t index; struct qla_hw_data *ha = vha->hw; + int use_tbl = !IS_QLA25XX(ha) && IS_QLA81XX(ha); if (memcmp(model, BINZERO, len) != 0) { strncpy(ha->model_number, model, len); @@ -1550,14 +1551,16 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, } index = (ha->pdev->subsystem_device & 0xff); - if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && + if (use_tbl && + ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && index < QLA_MODEL_NAMES) strncpy(ha->model_desc, qla2x00_model_name[index * 2 + 1], sizeof(ha->model_desc) - 1); } else { index = (ha->pdev->subsystem_device & 0xff); - if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && + if (use_tbl && + ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && index < QLA_MODEL_NAMES) { strcpy(ha->model_number, qla2x00_model_name[index * 2]); -- cgit v1.2.3 From e1f916035f149540e5090207ceafca9ba779084e Mon Sep 17 00:00:00 2001 From: Joe Carnuccio Date: Mon, 6 Apr 2009 22:33:46 -0700 Subject: [SCSI] qla2xxx: Perform an implicit login to the Management Server. Set the conditional plogi option bit whenever logging in the fabric management server (if it is already logged in, it does not need an explicit login; an implicit login suffices). Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_gs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 557f58d5bf88..3dbb9e73e80a 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -1107,7 +1107,7 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha) return ret; ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa, - mb, BIT_1); + mb, BIT_1|BIT_0); if (mb[0] != MBS_COMMAND_COMPLETE) { DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: " "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n", -- cgit v1.2.3 From a13d8ac057705c479b8bf15e5303f18f899502f9 Mon Sep 17 00:00:00 2001 From: Michael Reed Date: Mon, 6 Apr 2009 22:33:47 -0700 Subject: [SCSI] qla2xxx: Conditionally disable automatic queue full tracking. Changing a lun's queue depth (/sys/block/sdX/device/queue_depth) isn't sticky when the device is connected via a QLogic fibre channel adapter. The QLogic qla2xxx fibre channel driver dynamically adjusts a lun's queue depth. If a user has a specific need to limit the number of commands issued to a lun (say a tape drive, or a shared raid where the total commands issued to all luns is limited at the controller level, for example) and writes a limiting value to /sys/block/sdXX/device/queue_depth, the qla2xxx driver will silently and gradually increase the queue depth back to the driver limit of ql2xmaxqdepth. While reducing this value (module parameter) or increasing the interval between ramp ups (ql2xqfullrampup) offers the potential for a work around it would be better to have the option of just disabling the dynamic adjustment of queue depth. This patch implements an "off switch" as a module parameter. Signed-off-by: Michael Reed Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_gbl.h | 1 + drivers/scsi/qla2xxx/qla_isr.c | 10 ++++++++++ drivers/scsi/qla2xxx/qla_os.c | 8 ++++++++ 3 files changed, 19 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index b12de0176246..5347e35e7d61 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -65,6 +65,7 @@ extern int ql2xfdmienable; extern int ql2xallocfwdump; extern int ql2xextended_error_logging; extern int ql2xqfullrampup; +extern int ql2xqfulltracking; extern int ql2xiidmaenable; extern int ql2xmaxqueues; extern int ql2xmultique_tag; diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 41e50c2bec0f..eb35d2050f7a 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -765,6 +765,9 @@ qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data) struct qla_hw_data *ha = vha->hw; struct req_que *req = NULL; + if (!ql2xqfulltracking) + return; + req = vha->req; if (!req) return; @@ -807,6 +810,9 @@ qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req, fc_port_t *fcport; struct scsi_device *sdev; + if (!ql2xqfulltracking) + return; + sdev = sp->cmd->device; if (sdev->queue_depth >= req->max_q_depth) return; @@ -1125,6 +1131,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) scsi_status)); /* Adjust queue depth for all luns on the port. */ + if (!ql2xqfulltracking) + break; fcport->last_queue_full = jiffies; starget_for_each_device(cp->device->sdev_target, fcport, qla2x00_adjust_sdev_qdepth_down); @@ -1183,6 +1191,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) * Adjust queue depth for all luns on the * port. */ + if (!ql2xqfulltracking) + break; fcport->last_queue_full = jiffies; starget_for_each_device( cp->device->sdev_target, fcport, diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 94e53a5fd9aa..155a204ed8e5 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -77,6 +77,14 @@ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xmaxqdepth, "Maximum queue depth to report for target devices."); +int ql2xqfulltracking = 1; +module_param(ql2xqfulltracking, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql2xqfulltracking, + "Controls whether the driver tracks queue full status " + "returns and dynamically adjusts a scsi device's queue " + "depth. Default is 1, perform tracking. Set to 0 to " + "disable dynamic tracking and adjustment of queue depth."); + int ql2xqfullrampup = 120; module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xqfullrampup, -- cgit v1.2.3 From b469a7cbe9bf68939c90f4ac6bc2bb99e47d7229 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Mon, 6 Apr 2009 22:33:48 -0700 Subject: [SCSI] qla2xxx: Don't try to 'stop' firmware if already in ROM code. Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_init.c | 2 +- drivers/scsi/qla2xxx/qla_mbx.c | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index a4a6a146fccd..138ae88e9e4d 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -4153,7 +4153,7 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) ret = qla2x00_stop_firmware(vha); for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && - retries ; retries--) { + ret != QLA_INVALID_COMMAND && retries ; retries--) { ha->isp_ops->reset_chip(vha); if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) continue; diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 258c39d8f448..7d0eeec9ba57 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -2477,6 +2477,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha) if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, vha->host_no, rval)); + if (mcp->mb[0] == MBS_INVALID_COMMAND) + rval = QLA_INVALID_COMMAND; } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } -- cgit v1.2.3 From e337d9070e5821e7c8e5973679bdd34376263bd1 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Mon, 6 Apr 2009 22:33:49 -0700 Subject: [SCSI] qla2xxx: Add an override option to specify ISP firmware load semantics. As it may be useful during debugging to use a specific firmware image. Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_gbl.h | 1 + drivers/scsi/qla2xxx/qla_init.c | 6 ++++++ drivers/scsi/qla2xxx/qla_os.c | 10 ++++++++++ 3 files changed, 17 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 5347e35e7d61..f17d525897a0 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -69,6 +69,7 @@ extern int ql2xqfulltracking; extern int ql2xiidmaenable; extern int ql2xmaxqueues; extern int ql2xmultique_tag; +extern int ql2xfwloadbin; extern int qla2x00_loop_reset(scsi_qla_host_t *); extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 138ae88e9e4d..4952fc19aa20 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -4111,6 +4111,9 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) { int rval; + if (ql2xfwloadbin == 1) + return qla81xx_load_risc(vha, srisc_addr); + /* * FW Load priority: * 1) Firmware via request-firmware interface (.bin file). @@ -4128,6 +4131,9 @@ qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) { int rval; + if (ql2xfwloadbin == 2) + return qla24xx_load_risc(vha, srisc_addr); + /* * FW Load priority: * 1) Firmware residing in flash. diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 155a204ed8e5..88a75d0a8d7e 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -111,6 +111,16 @@ MODULE_PARM_DESC(ql2xmultique_tag, "Enables CPU affinity settings for the driver " "Default is 0 for no affinity of request and response IO. " "Set it to 1 to turn on the cpu affinity."); + +int ql2xfwloadbin; +module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR); +MODULE_PARM_DESC(ql2xfwloadbin, + "Option to specify location from which to load ISP firmware:\n" + " 2 -- load firmware via the request_firmware() (hotplug)\n" + " interface.\n" + " 1 -- load firmware from flash.\n" + " 0 -- use default semantics.\n"); + /* * SCSI host template entry points */ -- cgit v1.2.3 From e5b68a61e15ca8e200c60cfd4dbe1818e6beb4e1 Mon Sep 17 00:00:00 2001 From: Anirban Chakraborty Date: Mon, 6 Apr 2009 22:33:50 -0700 Subject: [SCSI] qla2xxx: Use port number to compute nvram/vpd parameter offsets. Read adapter's physical port number from interrupt pin register and use it instead of pci function number to offset into the nvram to obtain the port's configuration parameters. Signed-off-by: Anirban Chakraborty Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_def.h | 4 ++++ drivers/scsi/qla2xxx/qla_init.c | 19 ++++++++++--------- drivers/scsi/qla2xxx/qla_os.c | 7 +++++++ drivers/scsi/qla2xxx/qla_sup.c | 18 +++++++++--------- 4 files changed, 30 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 6911b9b32feb..721bae94e437 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2222,6 +2222,7 @@ struct qla_hw_data { uint32_t fce_enabled :1; uint32_t fac_supported :1; uint32_t chip_reset_done :1; + uint32_t port0 :1; } flags; /* This spinlock is used to protect "io transactions", you must @@ -2256,6 +2257,9 @@ struct qla_hw_data { #define FLOGI_MID_SUPPORT BIT_10 #define FLOGI_VSAN_SUPPORT BIT_12 #define FLOGI_SP_SUPPORT BIT_13 + + uint8_t port_no; /* Physical port of adapter */ + /* Timeout timers. */ uint8_t loop_down_abort_time; /* port down timer */ atomic_t loop_down_timer; /* loop down timer */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 4952fc19aa20..33e924810666 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -3573,14 +3573,15 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) nv = ha->nvram; /* Determine NVRAM starting address. */ - ha->nvram_size = sizeof(struct nvram_24xx); - ha->nvram_base = FA_NVRAM_FUNC0_ADDR; - ha->vpd_size = FA_NVRAM_VPD_SIZE; - ha->vpd_base = FA_NVRAM_VPD0_ADDR; - if (PCI_FUNC(ha->pdev->devfn)) { + if (ha->flags.port0) { + ha->nvram_base = FA_NVRAM_FUNC0_ADDR; + ha->vpd_base = FA_NVRAM_VPD0_ADDR; + } else { ha->nvram_base = FA_NVRAM_FUNC1_ADDR; ha->vpd_base = FA_NVRAM_VPD1_ADDR; } + ha->nvram_size = sizeof(struct nvram_24xx); + ha->vpd_size = FA_NVRAM_VPD_SIZE; /* Get VPD data into cache */ ha->vpd = ha->nvram + VPD_OFFSET; @@ -3619,7 +3620,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) nv->exchange_count = __constant_cpu_to_le16(0); nv->hard_address = __constant_cpu_to_le16(124); nv->port_name[0] = 0x21; - nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn); + nv->port_name[1] = 0x00 + ha->port_no; nv->port_name[2] = 0x00; nv->port_name[3] = 0xe0; nv->port_name[4] = 0x8b; @@ -4348,7 +4349,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); nv->exchange_count = __constant_cpu_to_le16(0); nv->port_name[0] = 0x21; - nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn); + nv->port_name[1] = 0x00 + ha->port_no; nv->port_name[2] = 0x00; nv->port_name[3] = 0xe0; nv->port_name[4] = 0x8b; @@ -4382,7 +4383,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) nv->enode_mac[2] = 0x03; nv->enode_mac[3] = 0x04; nv->enode_mac[4] = 0x05; - nv->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn); + nv->enode_mac[5] = 0x06 + ha->port_no; rval = 1; } @@ -4415,7 +4416,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) icb->enode_mac[2] = 0x03; icb->enode_mac[3] = 0x04; icb->enode_mac[4] = 0x05; - icb->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn); + icb->enode_mac[5] = 0x06 + ha->port_no; } /* Use extended-initialization control block. */ diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 88a75d0a8d7e..f4f535536952 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1581,6 +1581,13 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; } + + /* Get adapter physical port no from interrupt pin register. */ + pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no); + if (ha->port_no & 1) + ha->flags.port0 = 1; + else + ha->flags.port0 = 0; } static int diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 2a9b3f83ba67..22f97eb50cf9 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -702,30 +702,30 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) break; case FLT_REG_VPD_0: ha->flt_region_vpd_nvram = start; - if (!(PCI_FUNC(ha->pdev->devfn) & 1)) + if (ha->flags.port0) ha->flt_region_vpd = start; break; case FLT_REG_VPD_1: - if (PCI_FUNC(ha->pdev->devfn) & 1) + if (!ha->flags.port0) ha->flt_region_vpd = start; break; case FLT_REG_NVRAM_0: - if (!(PCI_FUNC(ha->pdev->devfn) & 1)) + if (ha->flags.port0) ha->flt_region_nvram = start; break; case FLT_REG_NVRAM_1: - if (PCI_FUNC(ha->pdev->devfn) & 1) + if (!ha->flags.port0) ha->flt_region_nvram = start; break; case FLT_REG_FDT: ha->flt_region_fdt = start; break; case FLT_REG_NPIV_CONF_0: - if (!(PCI_FUNC(ha->pdev->devfn) & 1)) + if (ha->flags.port0) ha->flt_region_npiv_conf = start; break; case FLT_REG_NPIV_CONF_1: - if (PCI_FUNC(ha->pdev->devfn) & 1) + if (!ha->flags.port0) ha->flt_region_npiv_conf = start; break; } @@ -745,12 +745,12 @@ no_flash_data: ha->flt_region_fw = def_fw[def]; ha->flt_region_boot = def_boot[def]; ha->flt_region_vpd_nvram = def_vpd_nvram[def]; - ha->flt_region_vpd = !(PCI_FUNC(ha->pdev->devfn) & 1) ? + ha->flt_region_vpd = ha->flags.port0 ? def_vpd0[def]: def_vpd1[def]; - ha->flt_region_nvram = !(PCI_FUNC(ha->pdev->devfn) & 1) ? + ha->flt_region_nvram = ha->flags.port0 ? def_nvram0[def]: def_nvram1[def]; ha->flt_region_fdt = def_fdt[def]; - ha->flt_region_npiv_conf = !(PCI_FUNC(ha->pdev->devfn) & 1) ? + ha->flt_region_npiv_conf = ha->flags.port0 ? def_npiv_conf0[def]: def_npiv_conf1[def]; done: DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x " -- cgit v1.2.3 From a03706017e443ced6e354d434142989c9e8653d7 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Mon, 6 Apr 2009 22:33:51 -0700 Subject: [SCSI] qla2xxx: Update version number to 8.03.01-k2. Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 19d1afc3a343..a1094e7d2b44 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.03.01-k1" +#define QLA2XXX_VERSION "8.03.01-k2" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 3 -- cgit v1.2.3 From 2ad52f473bbc1aa5b33c4a329b8a359f125e19d1 Mon Sep 17 00:00:00 2001 From: Jeff Garzik Date: Fri, 8 May 2009 16:35:37 -0400 Subject: [SCSI] mvsas: move into new directory drivers/scsi/mvsas/ Zero functional changes, just file movement. This commit prepares for the upcoming integration of the Marvell-provided driver update that splits the driver into support for both 64xx and 94xx chip families. Signed-off-by: Jeff Garzik Signed-off-by: James Bottomley --- drivers/scsi/Kconfig | 11 +- drivers/scsi/Makefile | 2 +- drivers/scsi/mvsas.c | 3222 ------------------------------------------- drivers/scsi/mvsas/Kconfig | 35 + drivers/scsi/mvsas/Makefile | 26 + drivers/scsi/mvsas/mv_sas.c | 3222 +++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 3285 insertions(+), 3233 deletions(-) delete mode 100644 drivers/scsi/mvsas.c create mode 100644 drivers/scsi/mvsas/Kconfig create mode 100644 drivers/scsi/mvsas/Makefile create mode 100644 drivers/scsi/mvsas/mv_sas.c (limited to 'drivers') diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index fb2740789b68..6e8106a70b3d 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -508,6 +508,7 @@ config SCSI_AIC7XXX_OLD source "drivers/scsi/aic7xxx/Kconfig.aic79xx" source "drivers/scsi/aic94xx/Kconfig" +source "drivers/scsi/mvsas/Kconfig" config SCSI_DPT_I2O tristate "Adaptec I2O RAID support " @@ -1050,16 +1051,6 @@ config SCSI_IZIP_SLOW_CTR Generally, saying N is fine. -config SCSI_MVSAS - tristate "Marvell 88SE6440 SAS/SATA support" - depends on PCI && SCSI - select SCSI_SAS_LIBSAS - help - This driver supports Marvell SAS/SATA PCI devices. - - To compiler this driver as a module, choose M here: the module - will be called mvsas. - config SCSI_NCR53C406A tristate "NCR53c406a SCSI support" depends on ISA && SCSI diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index a5049cfb40ed..8795c309963e 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -126,7 +126,7 @@ obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/ obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/ obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o obj-$(CONFIG_SCSI_STEX) += stex.o -obj-$(CONFIG_SCSI_MVSAS) += mvsas.o +obj-$(CONFIG_SCSI_MVSAS) += mvsas/ obj-$(CONFIG_PS3_ROM) += ps3rom.o obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/ diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c deleted file mode 100644 index e4acebd10d1b..000000000000 --- a/drivers/scsi/mvsas.c +++ /dev/null @@ -1,3222 +0,0 @@ -/* - mvsas.c - Marvell 88SE6440 SAS/SATA support - - Copyright 2007 Red Hat, Inc. - Copyright 2008 Marvell. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2, - or (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public - License along with this program; see the file COPYING. If not, - write to the Free Software Foundation, 675 Mass Ave, Cambridge, - MA 02139, USA. - - --------------------------------------------------------------- - - Random notes: - * hardware supports controlling the endian-ness of data - structures. this permits elimination of all the le32_to_cpu() - and cpu_to_le32() conversions. - - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define DRV_NAME "mvsas" -#define DRV_VERSION "0.5.2" -#define _MV_DUMP 0 -#define MVS_DISABLE_NVRAM -#define MVS_DISABLE_MSI - -#define mr32(reg) readl(regs + MVS_##reg) -#define mw32(reg,val) writel((val), regs + MVS_##reg) -#define mw32_f(reg,val) do { \ - writel((val), regs + MVS_##reg); \ - readl(regs + MVS_##reg); \ - } while (0) - -#define MVS_ID_NOT_MAPPED 0x7f -#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width) - -/* offset for D2H FIS in the Received FIS List Structure */ -#define SATA_RECEIVED_D2H_FIS(reg_set) \ - ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40) -#define SATA_RECEIVED_PIO_FIS(reg_set) \ - ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20) -#define UNASSOC_D2H_FIS(id) \ - ((void *) mvi->rx_fis + 0x100 * id) - -#define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \ - for ((__mc) = (__lseq_mask), (__lseq) = 0; \ - (__mc) != 0 && __rest; \ - (++__lseq), (__mc) >>= 1) - -/* driver compile-time configuration */ -enum driver_configuration { - MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */ - MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */ - /* software requires power-of-2 - ring size */ - - MVS_SLOTS = 512, /* command slots */ - MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */ - MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */ - MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */ - MVS_OAF_SZ = 64, /* Open address frame buffer size */ - - MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */ - - MVS_QUEUE_SIZE = 30, /* Support Queue depth */ - MVS_CAN_QUEUE = MVS_SLOTS - 1, /* SCSI Queue depth */ -}; - -/* unchangeable hardware details */ -enum hardware_details { - MVS_MAX_PHYS = 8, /* max. possible phys */ - MVS_MAX_PORTS = 8, /* max. possible ports */ - MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100), -}; - -/* peripheral registers (BAR2) */ -enum peripheral_registers { - SPI_CTL = 0x10, /* EEPROM control */ - SPI_CMD = 0x14, /* EEPROM command */ - SPI_DATA = 0x18, /* EEPROM data */ -}; - -enum peripheral_register_bits { - TWSI_RDY = (1U << 7), /* EEPROM interface ready */ - TWSI_RD = (1U << 4), /* EEPROM read access */ - - SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */ -}; - -/* enhanced mode registers (BAR4) */ -enum hw_registers { - MVS_GBL_CTL = 0x04, /* global control */ - MVS_GBL_INT_STAT = 0x08, /* global irq status */ - MVS_GBL_PI = 0x0C, /* ports implemented bitmask */ - MVS_GBL_PORT_TYPE = 0xa0, /* port type */ - - MVS_CTL = 0x100, /* SAS/SATA port configuration */ - MVS_PCS = 0x104, /* SAS/SATA port control/status */ - MVS_CMD_LIST_LO = 0x108, /* cmd list addr */ - MVS_CMD_LIST_HI = 0x10C, - MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */ - MVS_RX_FIS_HI = 0x114, - - MVS_TX_CFG = 0x120, /* TX configuration */ - MVS_TX_LO = 0x124, /* TX (delivery) ring addr */ - MVS_TX_HI = 0x128, - - MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */ - MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */ - MVS_RX_CFG = 0x134, /* RX configuration */ - MVS_RX_LO = 0x138, /* RX (completion) ring addr */ - MVS_RX_HI = 0x13C, - MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */ - - MVS_INT_COAL = 0x148, /* Int coalescing config */ - MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */ - MVS_INT_STAT = 0x150, /* Central int status */ - MVS_INT_MASK = 0x154, /* Central int enable */ - MVS_INT_STAT_SRS = 0x158, /* SATA register set status */ - MVS_INT_MASK_SRS = 0x15C, - - /* ports 1-3 follow after this */ - MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */ - MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */ - MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */ - MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */ - - /* ports 1-3 follow after this */ - MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */ - MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */ - - MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */ - MVS_CMD_DATA = 0x1BC, /* Command register port (data) */ - - /* ports 1-3 follow after this */ - MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */ - MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */ - MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */ - MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */ - - /* ports 1-3 follow after this */ - MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */ - MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */ - MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */ - MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */ -}; - -enum hw_register_bits { - /* MVS_GBL_CTL */ - INT_EN = (1U << 1), /* Global int enable */ - HBA_RST = (1U << 0), /* HBA reset */ - - /* MVS_GBL_INT_STAT */ - INT_XOR = (1U << 4), /* XOR engine event */ - INT_SAS_SATA = (1U << 0), /* SAS/SATA event */ - - /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */ - SATA_TARGET = (1U << 16), /* port0 SATA target enable */ - MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */ - MODE_AUTO_DET_PORT6 = (1U << 14), - MODE_AUTO_DET_PORT5 = (1U << 13), - MODE_AUTO_DET_PORT4 = (1U << 12), - MODE_AUTO_DET_PORT3 = (1U << 11), - MODE_AUTO_DET_PORT2 = (1U << 10), - MODE_AUTO_DET_PORT1 = (1U << 9), - MODE_AUTO_DET_PORT0 = (1U << 8), - MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 | - MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 | - MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 | - MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7, - MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */ - MODE_SAS_PORT6_MASK = (1U << 6), - MODE_SAS_PORT5_MASK = (1U << 5), - MODE_SAS_PORT4_MASK = (1U << 4), - MODE_SAS_PORT3_MASK = (1U << 3), - MODE_SAS_PORT2_MASK = (1U << 2), - MODE_SAS_PORT1_MASK = (1U << 1), - MODE_SAS_PORT0_MASK = (1U << 0), - MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK | - MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK | - MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK | - MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK, - - /* SAS_MODE value may be - * dictated (in hw) by values - * of SATA_TARGET & AUTO_DET - */ - - /* MVS_TX_CFG */ - TX_EN = (1U << 16), /* Enable TX */ - TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */ - - /* MVS_RX_CFG */ - RX_EN = (1U << 16), /* Enable RX */ - RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */ - - /* MVS_INT_COAL */ - COAL_EN = (1U << 16), /* Enable int coalescing */ - - /* MVS_INT_STAT, MVS_INT_MASK */ - CINT_I2C = (1U << 31), /* I2C event */ - CINT_SW0 = (1U << 30), /* software event 0 */ - CINT_SW1 = (1U << 29), /* software event 1 */ - CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */ - CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */ - CINT_MEM = (1U << 26), /* int mem parity err */ - CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */ - CINT_SRS = (1U << 3), /* SRS event */ - CINT_CI_STOP = (1U << 1), /* cmd issue stopped */ - CINT_DONE = (1U << 0), /* cmd completion */ - - /* shl for ports 1-3 */ - CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */ - CINT_PORT = (1U << 8), /* port0 event */ - CINT_PORT_MASK_OFFSET = 8, - CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET), - - /* TX (delivery) ring bits */ - TXQ_CMD_SHIFT = 29, - TXQ_CMD_SSP = 1, /* SSP protocol */ - TXQ_CMD_SMP = 2, /* SMP protocol */ - TXQ_CMD_STP = 3, /* STP/SATA protocol */ - TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */ - TXQ_CMD_SLOT_RESET = 7, /* reset command slot */ - TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */ - TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */ - TXQ_SRS_SHIFT = 20, /* SATA register set */ - TXQ_SRS_MASK = 0x7f, - TXQ_PHY_SHIFT = 12, /* PHY bitmap */ - TXQ_PHY_MASK = 0xff, - TXQ_SLOT_MASK = 0xfff, /* slot number */ - - /* RX (completion) ring bits */ - RXQ_GOOD = (1U << 23), /* Response good */ - RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */ - RXQ_CMD_RX = (1U << 20), /* target cmd received */ - RXQ_ATTN = (1U << 19), /* attention */ - RXQ_RSP = (1U << 18), /* response frame xfer'd */ - RXQ_ERR = (1U << 17), /* err info rec xfer'd */ - RXQ_DONE = (1U << 16), /* cmd complete */ - RXQ_SLOT_MASK = 0xfff, /* slot number */ - - /* mvs_cmd_hdr bits */ - MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */ - MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */ - - /* SSP initiator only */ - MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */ - - /* SSP initiator or target */ - MCH_SSP_FR_TASK = 0x1, /* TASK frame */ - - /* SSP target only */ - MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */ - MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */ - MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */ - MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */ - - MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */ - MCH_FBURST = (1U << 11), /* first burst (SSP) */ - MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */ - MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */ - MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */ - MCH_RESET = (1U << 7), /* Reset (STP/SATA) */ - MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */ - MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */ - MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */ - MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/ - - CCTL_RST = (1U << 5), /* port logic reset */ - - /* 0(LSB first), 1(MSB first) */ - CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */ - CCTL_ENDIAN_RSP = (1U << 2), /* response frame */ - CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */ - CCTL_ENDIAN_CMD = (1U << 0), /* command table */ - - /* MVS_Px_SER_CTLSTAT (per-phy control) */ - PHY_SSP_RST = (1U << 3), /* reset SSP link layer */ - PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */ - PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */ - PHY_RST = (1U << 0), /* phy reset */ - PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8), - PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12), - PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16), - PHY_NEG_SPP_PHYS_LINK_RATE_MASK = - (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), - PHY_READY_MASK = (1U << 20), - - /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */ - PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */ - PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */ - PHYEV_AN = (1U << 18), /* SATA async notification */ - PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */ - PHYEV_SIG_FIS = (1U << 16), /* signature FIS */ - PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */ - PHYEV_IU_BIG = (1U << 11), /* IU too long err */ - PHYEV_IU_SMALL = (1U << 10), /* IU too short err */ - PHYEV_UNK_TAG = (1U << 9), /* unknown tag */ - PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */ - PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */ - PHYEV_PORT_SEL = (1U << 6), /* port selector present */ - PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */ - PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */ - PHYEV_ID_FAIL = (1U << 3), /* identify failed */ - PHYEV_ID_DONE = (1U << 2), /* identify done */ - PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */ - PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */ - - /* MVS_PCS */ - PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */ - PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */ - PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */ - PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */ - PCS_RSP_RX_EN = (1U << 7), /* raw response rx */ - PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */ - PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */ - PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */ - PCS_CMD_RST = (1U << 1), /* reset cmd issue */ - PCS_CMD_EN = (1U << 0), /* enable cmd issue */ - - /* Port n Attached Device Info */ - PORT_DEV_SSP_TRGT = (1U << 19), - PORT_DEV_SMP_TRGT = (1U << 18), - PORT_DEV_STP_TRGT = (1U << 17), - PORT_DEV_SSP_INIT = (1U << 11), - PORT_DEV_SMP_INIT = (1U << 10), - PORT_DEV_STP_INIT = (1U << 9), - PORT_PHY_ID_MASK = (0xFFU << 24), - PORT_DEV_TRGT_MASK = (0x7U << 17), - PORT_DEV_INIT_MASK = (0x7U << 9), - PORT_DEV_TYPE_MASK = (0x7U << 0), - - /* Port n PHY Status */ - PHY_RDY = (1U << 2), - PHY_DW_SYNC = (1U << 1), - PHY_OOB_DTCTD = (1U << 0), - - /* VSR */ - /* PHYMODE 6 (CDB) */ - PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */ - PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */ - PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/ - PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */ - PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */ - PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */ - PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */ - PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */ - PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */ - PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */ - PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */ - PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */ - PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */ - PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */ -}; - -enum mvs_info_flags { - MVF_MSI = (1U << 0), /* MSI is enabled */ - MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */ -}; - -enum sas_cmd_port_registers { - CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */ - CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */ - CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */ - CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */ - CMD_OOB_SPACE = 0x110, /* OOB space control register */ - CMD_OOB_BURST = 0x114, /* OOB burst control register */ - CMD_PHY_TIMER = 0x118, /* PHY timer control register */ - CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */ - CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */ - CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */ - CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */ - CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */ - CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */ - CMD_ID_TEST = 0x134, /* ID test register */ - CMD_PL_TIMER = 0x138, /* PL timer register */ - CMD_WD_TIMER = 0x13c, /* WD timer register */ - CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */ - CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */ - CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */ - CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */ - CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */ - CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */ - CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */ - CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */ - CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */ - CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */ - CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */ - CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */ - CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */ - CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */ - CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */ - CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */ - CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */ - CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */ - CMD_RESET_COUNT = 0x188, /* Reset Count */ - CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */ - CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */ - CMD_PHY_CTL = 0x194, /* PHY Control and Status */ - CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */ - CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */ - CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */ - CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */ - CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */ - CMD_HOST_CTL = 0x1AC, /* Host Control Status */ - CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */ - CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */ - CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */ - CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */ - CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */ - CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */ -}; - -/* SAS/SATA configuration port registers, aka phy registers */ -enum sas_sata_config_port_regs { - PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */ - PHYR_ADDR_LO = 0x04, /* my SAS address (low) */ - PHYR_ADDR_HI = 0x08, /* my SAS address (high) */ - PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */ - PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */ - PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */ - PHYR_SATA_CTL = 0x18, /* SATA control */ - PHYR_PHY_STAT = 0x1C, /* PHY status */ - PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */ - PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */ - PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */ - PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */ - PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */ - PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */ - PHYR_WIDE_PORT = 0x38, /* wide port participating */ - PHYR_CURRENT0 = 0x80, /* current connection info 0 */ - PHYR_CURRENT1 = 0x84, /* current connection info 1 */ - PHYR_CURRENT2 = 0x88, /* current connection info 2 */ -}; - -/* SAS/SATA Vendor Specific Port Registers */ -enum sas_sata_vsp_regs { - VSR_PHY_STAT = 0x00, /* Phy Status */ - VSR_PHY_MODE1 = 0x01, /* phy tx */ - VSR_PHY_MODE2 = 0x02, /* tx scc */ - VSR_PHY_MODE3 = 0x03, /* pll */ - VSR_PHY_MODE4 = 0x04, /* VCO */ - VSR_PHY_MODE5 = 0x05, /* Rx */ - VSR_PHY_MODE6 = 0x06, /* CDR */ - VSR_PHY_MODE7 = 0x07, /* Impedance */ - VSR_PHY_MODE8 = 0x08, /* Voltage */ - VSR_PHY_MODE9 = 0x09, /* Test */ - VSR_PHY_MODE10 = 0x0A, /* Power */ - VSR_PHY_MODE11 = 0x0B, /* Phy Mode */ - VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */ - VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */ -}; - -enum pci_cfg_registers { - PCR_PHY_CTL = 0x40, - PCR_PHY_CTL2 = 0x90, - PCR_DEV_CTRL = 0xE8, -}; - -enum pci_cfg_register_bits { - PCTL_PWR_ON = (0xFU << 24), - PCTL_OFF = (0xFU << 12), - PRD_REQ_SIZE = (0x4000), - PRD_REQ_MASK = (0x00007000), -}; - -enum nvram_layout_offsets { - NVR_SIG = 0x00, /* 0xAA, 0x55 */ - NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */ -}; - -enum chip_flavors { - chip_6320, - chip_6440, - chip_6480, -}; - -enum port_type { - PORT_TYPE_SAS = (1L << 1), - PORT_TYPE_SATA = (1L << 0), -}; - -/* Command Table Format */ -enum ct_format { - /* SSP */ - SSP_F_H = 0x00, - SSP_F_IU = 0x18, - SSP_F_MAX = 0x4D, - /* STP */ - STP_CMD_FIS = 0x00, - STP_ATAPI_CMD = 0x40, - STP_F_MAX = 0x10, - /* SMP */ - SMP_F_T = 0x00, - SMP_F_DEP = 0x01, - SMP_F_MAX = 0x101, -}; - -enum status_buffer { - SB_EIR_OFF = 0x00, /* Error Information Record */ - SB_RFB_OFF = 0x08, /* Response Frame Buffer */ - SB_RFB_MAX = 0x400, /* RFB size*/ -}; - -enum error_info_rec { - CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */ - CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */ - RSP_OVER = (1U << 29), /* rsp buffer overflow */ - RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */ - UNK_FIS = (1U << 27), /* unknown FIS */ - DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */ - SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */ - TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */ - R_ERR = (1U << 23), /* SATA returned R_ERR prim */ - RD_OFS = (1U << 20), /* Read DATA frame invalid offset */ - XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */ - UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */ - DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */ - INTERLOCK = (1U << 15), /* interlock error */ - NAK = (1U << 14), /* NAK rx'd */ - ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */ - CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */ - OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */ - PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */ - NO_DEST = (1U << 9), /* I_T nexus lost, no destination */ - STP_RES_BSY = (1U << 8), /* STP resources busy */ - BREAK = (1U << 7), /* break received */ - BAD_DEST = (1U << 6), /* bad destination */ - BAD_PROTO = (1U << 5), /* protocol not supported */ - BAD_RATE = (1U << 4), /* cxn rate not supported */ - WRONG_DEST = (1U << 3), /* wrong destination error */ - CREDIT_TO = (1U << 2), /* credit timeout */ - WDOG_TO = (1U << 1), /* watchdog timeout */ - BUF_PAR = (1U << 0), /* buffer parity error */ -}; - -enum error_info_rec_2 { - SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */ - GRD_CHK_ERR = (1U << 14), /* Guard Check Error */ - APP_CHK_ERR = (1U << 13), /* Application Check error */ - REF_CHK_ERR = (1U << 12), /* Reference Check Error */ - USR_BLK_NM = (1U << 0), /* User Block Number */ -}; - -struct mvs_chip_info { - u32 n_phy; - u32 srs_sz; - u32 slot_width; -}; - -struct mvs_err_info { - __le32 flags; - __le32 flags2; -}; - -struct mvs_prd { - __le64 addr; /* 64-bit buffer address */ - __le32 reserved; - __le32 len; /* 16-bit length */ -}; - -struct mvs_cmd_hdr { - __le32 flags; /* PRD tbl len; SAS, SATA ctl */ - __le32 lens; /* cmd, max resp frame len */ - __le32 tags; /* targ port xfer tag; tag */ - __le32 data_len; /* data xfer len */ - __le64 cmd_tbl; /* command table address */ - __le64 open_frame; /* open addr frame address */ - __le64 status_buf; /* status buffer address */ - __le64 prd_tbl; /* PRD tbl address */ - __le32 reserved[4]; -}; - -struct mvs_port { - struct asd_sas_port sas_port; - u8 port_attached; - u8 taskfileset; - u8 wide_port_phymap; - struct list_head list; -}; - -struct mvs_phy { - struct mvs_port *port; - struct asd_sas_phy sas_phy; - struct sas_identify identify; - struct scsi_device *sdev; - u64 dev_sas_addr; - u64 att_dev_sas_addr; - u32 att_dev_info; - u32 dev_info; - u32 phy_type; - u32 phy_status; - u32 irq_status; - u32 frame_rcvd_size; - u8 frame_rcvd[32]; - u8 phy_attached; - enum sas_linkrate minimum_linkrate; - enum sas_linkrate maximum_linkrate; -}; - -struct mvs_slot_info { - struct list_head list; - struct sas_task *task; - u32 n_elem; - u32 tx; - - /* DMA buffer for storing cmd tbl, open addr frame, status buffer, - * and PRD table - */ - void *buf; - dma_addr_t buf_dma; -#if _MV_DUMP - u32 cmd_size; -#endif - - void *response; - struct mvs_port *port; -}; - -struct mvs_info { - unsigned long flags; - - spinlock_t lock; /* host-wide lock */ - struct pci_dev *pdev; /* our device */ - void __iomem *regs; /* enhanced mode registers */ - void __iomem *peri_regs; /* peripheral registers */ - - u8 sas_addr[SAS_ADDR_SIZE]; - struct sas_ha_struct sas; /* SCSI/SAS glue */ - struct Scsi_Host *shost; - - __le32 *tx; /* TX (delivery) DMA ring */ - dma_addr_t tx_dma; - u32 tx_prod; /* cached next-producer idx */ - - __le32 *rx; /* RX (completion) DMA ring */ - dma_addr_t rx_dma; - u32 rx_cons; /* RX consumer idx */ - - __le32 *rx_fis; /* RX'd FIS area */ - dma_addr_t rx_fis_dma; - - struct mvs_cmd_hdr *slot; /* DMA command header slots */ - dma_addr_t slot_dma; - - const struct mvs_chip_info *chip; - - u8 tags[MVS_SLOTS]; - struct mvs_slot_info slot_info[MVS_SLOTS]; - /* further per-slot information */ - struct mvs_phy phy[MVS_MAX_PHYS]; - struct mvs_port port[MVS_MAX_PHYS]; -#ifdef MVS_USE_TASKLET - struct tasklet_struct tasklet; -#endif -}; - -static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, - void *funcdata); -static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port); -static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val); -static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port); -static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val); -static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val); -static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port); - -static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i); -static void mvs_detect_porttype(struct mvs_info *mvi, int i); -static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st); -static void mvs_release_task(struct mvs_info *mvi, int phy_no); - -static int mvs_scan_finished(struct Scsi_Host *, unsigned long); -static void mvs_scan_start(struct Scsi_Host *); -static int mvs_slave_configure(struct scsi_device *sdev); - -static struct scsi_transport_template *mvs_stt; - -static const struct mvs_chip_info mvs_chips[] = { - [chip_6320] = { 2, 16, 9 }, - [chip_6440] = { 4, 16, 9 }, - [chip_6480] = { 8, 32, 10 }, -}; - -static struct scsi_host_template mvs_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .queuecommand = sas_queuecommand, - .target_alloc = sas_target_alloc, - .slave_configure = mvs_slave_configure, - .slave_destroy = sas_slave_destroy, - .scan_finished = mvs_scan_finished, - .scan_start = mvs_scan_start, - .change_queue_depth = sas_change_queue_depth, - .change_queue_type = sas_change_queue_type, - .bios_param = sas_bios_param, - .can_queue = 1, - .cmd_per_lun = 1, - .this_id = -1, - .sg_tablesize = SG_ALL, - .max_sectors = SCSI_DEFAULT_MAX_SECTORS, - .use_clustering = ENABLE_CLUSTERING, - .eh_device_reset_handler = sas_eh_device_reset_handler, - .eh_bus_reset_handler = sas_eh_bus_reset_handler, - .slave_alloc = sas_slave_alloc, - .target_destroy = sas_target_destroy, - .ioctl = sas_ioctl, -}; - -static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) -{ - u32 i; - u32 run; - u32 offset; - - offset = 0; - while (size) { - printk("%08X : ", baseaddr + offset); - if (size >= 16) - run = 16; - else - run = size; - size -= run; - for (i = 0; i < 16; i++) { - if (i < run) - printk("%02X ", (u32)data[i]); - else - printk(" "); - } - printk(": "); - for (i = 0; i < run; i++) - printk("%c", isalnum(data[i]) ? data[i] : '.'); - printk("\n"); - data = &data[16]; - offset += run; - } - printk("\n"); -} - -#if _MV_DUMP -static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag, - enum sas_protocol proto) -{ - u32 offset; - struct pci_dev *pdev = mvi->pdev; - struct mvs_slot_info *slot = &mvi->slot_info[tag]; - - offset = slot->cmd_size + MVS_OAF_SZ + - sizeof(struct mvs_prd) * slot->n_elem; - dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n", - tag); - mvs_hexdump(32, (u8 *) slot->response, - (u32) slot->buf_dma + offset); -} -#endif - -static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag, - enum sas_protocol proto) -{ -#if _MV_DUMP - u32 sz, w_ptr; - u64 addr; - void __iomem *regs = mvi->regs; - struct pci_dev *pdev = mvi->pdev; - struct mvs_slot_info *slot = &mvi->slot_info[tag]; - - /*Delivery Queue */ - sz = mr32(TX_CFG) & TX_RING_SZ_MASK; - w_ptr = slot->tx; - addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO); - dev_printk(KERN_DEBUG, &pdev->dev, - "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr); - dev_printk(KERN_DEBUG, &pdev->dev, - "Delivery Queue Base Address=0x%llX (PA)" - "(tx_dma=0x%llX), Entry=%04d\n", - addr, mvi->tx_dma, w_ptr); - mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]), - (u32) mvi->tx_dma + sizeof(u32) * w_ptr); - /*Command List */ - addr = mvi->slot_dma; - dev_printk(KERN_DEBUG, &pdev->dev, - "Command List Base Address=0x%llX (PA)" - "(slot_dma=0x%llX), Header=%03d\n", - addr, slot->buf_dma, tag); - dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag); - /*mvs_cmd_hdr */ - mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]), - (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr)); - /*1.command table area */ - dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n"); - mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma); - /*2.open address frame area */ - dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n"); - mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size, - (u32) slot->buf_dma + slot->cmd_size); - /*3.status buffer */ - mvs_hba_sb_dump(mvi, tag, proto); - /*4.PRD table */ - dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n"); - mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem, - (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ, - (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ); -#endif -} - -static void mvs_hba_cq_dump(struct mvs_info *mvi) -{ -#if (_MV_DUMP > 2) - u64 addr; - void __iomem *regs = mvi->regs; - struct pci_dev *pdev = mvi->pdev; - u32 entry = mvi->rx_cons + 1; - u32 rx_desc = le32_to_cpu(mvi->rx[entry]); - - /*Completion Queue */ - addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO); - dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n", - mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task); - dev_printk(KERN_DEBUG, &pdev->dev, - "Completion List Base Address=0x%llX (PA), " - "CQ_Entry=%04d, CQ_WP=0x%08X\n", - addr, entry - 1, mvi->rx[0]); - mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc), - mvi->rx_dma + sizeof(u32) * entry); -#endif -} - -static void mvs_hba_interrupt_enable(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs; - u32 tmp; - - tmp = mr32(GBL_CTL); - - mw32(GBL_CTL, tmp | INT_EN); -} - -static void mvs_hba_interrupt_disable(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs; - u32 tmp; - - tmp = mr32(GBL_CTL); - - mw32(GBL_CTL, tmp & ~INT_EN); -} - -static int mvs_int_rx(struct mvs_info *mvi, bool self_clear); - -/* move to PCI layer or libata core? */ -static int pci_go_64(struct pci_dev *pdev) -{ - int rc; - - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (rc) { - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "64-bit DMA enable failed\n"); - return rc; - } - } - } else { - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "32-bit DMA enable failed\n"); - return rc; - } - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "32-bit consistent DMA enable failed\n"); - return rc; - } - } - - return rc; -} - -static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) -{ - if (task->lldd_task) { - struct mvs_slot_info *slot; - slot = (struct mvs_slot_info *) task->lldd_task; - *tag = slot - mvi->slot_info; - return 1; - } - return 0; -} - -static void mvs_tag_clear(struct mvs_info *mvi, u32 tag) -{ - void *bitmap = (void *) &mvi->tags; - clear_bit(tag, bitmap); -} - -static void mvs_tag_free(struct mvs_info *mvi, u32 tag) -{ - mvs_tag_clear(mvi, tag); -} - -static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) -{ - void *bitmap = (void *) &mvi->tags; - set_bit(tag, bitmap); -} - -static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) -{ - unsigned int index, tag; - void *bitmap = (void *) &mvi->tags; - - index = find_first_zero_bit(bitmap, MVS_SLOTS); - tag = index; - if (tag >= MVS_SLOTS) - return -SAS_QUEUE_FULL; - mvs_tag_set(mvi, tag); - *tag_out = tag; - return 0; -} - -static void mvs_tag_init(struct mvs_info *mvi) -{ - int i; - for (i = 0; i < MVS_SLOTS; ++i) - mvs_tag_clear(mvi, i); -} - -#ifndef MVS_DISABLE_NVRAM -static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data) -{ - int timeout = 1000; - - if (addr & ~SPI_ADDR_MASK) - return -EINVAL; - - writel(addr, regs + SPI_CMD); - writel(TWSI_RD, regs + SPI_CTL); - - while (timeout-- > 0) { - if (readl(regs + SPI_CTL) & TWSI_RDY) { - *data = readl(regs + SPI_DATA); - return 0; - } - - udelay(10); - } - - return -EBUSY; -} - -static int mvs_eep_read_buf(void __iomem *regs, u32 addr, - void *buf, u32 buflen) -{ - u32 addr_end, tmp_addr, i, j; - u32 tmp = 0; - int rc; - u8 *tmp8, *buf8 = buf; - - addr_end = addr + buflen; - tmp_addr = ALIGN(addr, 4); - if (addr > 0xff) - return -EINVAL; - - j = addr & 0x3; - if (j) { - rc = mvs_eep_read(regs, tmp_addr, &tmp); - if (rc) - return rc; - - tmp8 = (u8 *)&tmp; - for (i = j; i < 4; i++) - *buf8++ = tmp8[i]; - - tmp_addr += 4; - } - - for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) { - rc = mvs_eep_read(regs, tmp_addr, &tmp); - if (rc) - return rc; - - memcpy(buf8, &tmp, 4); - buf8 += 4; - } - - if (tmp_addr < addr_end) { - rc = mvs_eep_read(regs, tmp_addr, &tmp); - if (rc) - return rc; - - tmp8 = (u8 *)&tmp; - j = addr_end - tmp_addr; - for (i = 0; i < j; i++) - *buf8++ = tmp8[i]; - - tmp_addr += 4; - } - - return 0; -} -#endif - -static int mvs_nvram_read(struct mvs_info *mvi, u32 addr, - void *buf, u32 buflen) -{ -#ifndef MVS_DISABLE_NVRAM - void __iomem *regs = mvi->regs; - int rc, i; - u32 sum; - u8 hdr[2], *tmp; - const char *msg; - - rc = mvs_eep_read_buf(regs, addr, &hdr, 2); - if (rc) { - msg = "nvram hdr read failed"; - goto err_out; - } - rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen); - if (rc) { - msg = "nvram read failed"; - goto err_out; - } - - if (hdr[0] != 0x5A) { - /* entry id */ - msg = "invalid nvram entry id"; - rc = -ENOENT; - goto err_out; - } - - tmp = buf; - sum = ((u32)hdr[0]) + ((u32)hdr[1]); - for (i = 0; i < buflen; i++) - sum += ((u32)tmp[i]); - - if (sum) { - msg = "nvram checksum failure"; - rc = -EILSEQ; - goto err_out; - } - - return 0; - -err_out: - dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg); - return rc; -#else - /* FIXME , For SAS target mode */ - memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8); - return 0; -#endif -} - -static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) -{ - struct mvs_phy *phy = &mvi->phy[i]; - struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i]; - - if (!phy->phy_attached) - return; - - if (sas_phy->phy) { - struct sas_phy *sphy = sas_phy->phy; - - sphy->negotiated_linkrate = sas_phy->linkrate; - sphy->minimum_linkrate = phy->minimum_linkrate; - sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; - sphy->maximum_linkrate = phy->maximum_linkrate; - sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS; - } - - if (phy->phy_type & PORT_TYPE_SAS) { - struct sas_identify_frame *id; - - id = (struct sas_identify_frame *)phy->frame_rcvd; - id->dev_type = phy->identify.device_type; - id->initiator_bits = SAS_PROTOCOL_ALL; - id->target_bits = phy->identify.target_port_protocols; - } else if (phy->phy_type & PORT_TYPE_SATA) { - /* TODO */ - } - mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size; - mvi->sas.notify_port_event(mvi->sas.sas_phy[i], - PORTE_BYTES_DMAED); -} - -static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time) -{ - /* give the phy enabling interrupt event time to come in (1s - * is empirically about all it takes) */ - if (time < HZ) - return 0; - /* Wait for discovery to finish */ - scsi_flush_work(shost); - return 1; -} - -static void mvs_scan_start(struct Scsi_Host *shost) -{ - int i; - struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha; - - for (i = 0; i < mvi->chip->n_phy; ++i) { - mvs_bytes_dmaed(mvi, i); - } -} - -static int mvs_slave_configure(struct scsi_device *sdev) -{ - struct domain_device *dev = sdev_to_domain_dev(sdev); - int ret = sas_slave_configure(sdev); - - if (ret) - return ret; - - if (dev_is_sata(dev)) { - /* struct ata_port *ap = dev->sata_dev.ap; */ - /* struct ata_device *adev = ap->link.device; */ - - /* clamp at no NCQ for the time being */ - /* adev->flags |= ATA_DFLAG_NCQ_OFF; */ - scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1); - } - return 0; -} - -static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) -{ - struct pci_dev *pdev = mvi->pdev; - struct sas_ha_struct *sas_ha = &mvi->sas; - struct mvs_phy *phy = &mvi->phy[phy_no]; - struct asd_sas_phy *sas_phy = &phy->sas_phy; - - phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no); - /* - * events is port event now , - * we need check the interrupt status which belongs to per port. - */ - dev_printk(KERN_DEBUG, &pdev->dev, - "Port %d Event = %X\n", - phy_no, phy->irq_status); - - if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) { - mvs_release_task(mvi, phy_no); - if (!mvs_is_phy_ready(mvi, phy_no)) { - sas_phy_disconnected(sas_phy); - sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); - dev_printk(KERN_INFO, &pdev->dev, - "Port %d Unplug Notice\n", phy_no); - - } else - mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL); - } - if (!(phy->irq_status & PHYEV_DEC_ERR)) { - if (phy->irq_status & PHYEV_COMWAKE) { - u32 tmp = mvs_read_port_irq_mask(mvi, phy_no); - mvs_write_port_irq_mask(mvi, phy_no, - tmp | PHYEV_SIG_FIS); - } - if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { - phy->phy_status = mvs_is_phy_ready(mvi, phy_no); - if (phy->phy_status) { - mvs_detect_porttype(mvi, phy_no); - - if (phy->phy_type & PORT_TYPE_SATA) { - u32 tmp = mvs_read_port_irq_mask(mvi, - phy_no); - tmp &= ~PHYEV_SIG_FIS; - mvs_write_port_irq_mask(mvi, - phy_no, tmp); - } - - mvs_update_phyinfo(mvi, phy_no, 0); - sas_ha->notify_phy_event(sas_phy, - PHYE_OOB_DONE); - mvs_bytes_dmaed(mvi, phy_no); - } else { - dev_printk(KERN_DEBUG, &pdev->dev, - "plugin interrupt but phy is gone\n"); - mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, - NULL); - } - } else if (phy->irq_status & PHYEV_BROAD_CH) { - mvs_release_task(mvi, phy_no); - sas_ha->notify_port_event(sas_phy, - PORTE_BROADCAST_RCVD); - } - } - mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status); -} - -static void mvs_int_sata(struct mvs_info *mvi) -{ - u32 tmp; - void __iomem *regs = mvi->regs; - tmp = mr32(INT_STAT_SRS); - mw32(INT_STAT_SRS, tmp & 0xFFFF); -} - -static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task, - u32 slot_idx) -{ - void __iomem *regs = mvi->regs; - struct domain_device *dev = task->dev; - struct asd_sas_port *sas_port = dev->port; - struct mvs_port *port = mvi->slot_info[slot_idx].port; - u32 reg_set, phy_mask; - - if (!sas_protocol_ata(task->task_proto)) { - reg_set = 0; - phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap : - sas_port->phy_mask; - } else { - reg_set = port->taskfileset; - phy_mask = sas_port->phy_mask; - } - mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx | - (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) | - (phy_mask << TXQ_PHY_SHIFT) | - (reg_set << TXQ_SRS_SHIFT)); - - mw32(TX_PROD_IDX, mvi->tx_prod); - mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); -} - -static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, - u32 slot_idx, int err) -{ - struct mvs_port *port = mvi->slot_info[slot_idx].port; - struct task_status_struct *tstat = &task->task_status; - struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; - int stat = SAM_GOOD; - - resp->frame_len = sizeof(struct dev_to_host_fis); - memcpy(&resp->ending_fis[0], - SATA_RECEIVED_D2H_FIS(port->taskfileset), - sizeof(struct dev_to_host_fis)); - tstat->buf_valid_size = sizeof(*resp); - if (unlikely(err)) - stat = SAS_PROTO_RESPONSE; - return stat; -} - -static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) -{ - u32 slot_idx = rx_desc & RXQ_SLOT_MASK; - mvs_tag_clear(mvi, slot_idx); -} - -static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, - struct mvs_slot_info *slot, u32 slot_idx) -{ - if (!sas_protocol_ata(task->task_proto)) - if (slot->n_elem) - pci_unmap_sg(mvi->pdev, task->scatter, - slot->n_elem, task->data_dir); - - switch (task->task_proto) { - case SAS_PROTOCOL_SMP: - pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1, - PCI_DMA_FROMDEVICE); - pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1, - PCI_DMA_TODEVICE); - break; - - case SAS_PROTOCOL_SATA: - case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SSP: - default: - /* do nothing */ - break; - } - list_del(&slot->list); - task->lldd_task = NULL; - slot->task = NULL; - slot->port = NULL; -} - -static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, - u32 slot_idx) -{ - struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; - u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response)); - u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4)); - int stat = SAM_CHECK_COND; - - if (err_dw1 & SLOT_BSY_ERR) { - stat = SAS_QUEUE_FULL; - mvs_slot_reset(mvi, task, slot_idx); - } - switch (task->task_proto) { - case SAS_PROTOCOL_SSP: - break; - case SAS_PROTOCOL_SMP: - break; - case SAS_PROTOCOL_SATA: - case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: - if (err_dw0 & TFILE_ERR) - stat = mvs_sata_done(mvi, task, slot_idx, 1); - break; - default: - break; - } - - mvs_hexdump(16, (u8 *) slot->response, 0); - return stat; -} - -static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) -{ - u32 slot_idx = rx_desc & RXQ_SLOT_MASK; - struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; - struct sas_task *task = slot->task; - struct task_status_struct *tstat; - struct mvs_port *port; - bool aborted; - void *to; - - if (unlikely(!task || !task->lldd_task)) - return -1; - - mvs_hba_cq_dump(mvi); - - spin_lock(&task->task_state_lock); - aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; - if (!aborted) { - task->task_state_flags &= - ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); - task->task_state_flags |= SAS_TASK_STATE_DONE; - } - spin_unlock(&task->task_state_lock); - - if (aborted) { - mvs_slot_task_free(mvi, task, slot, slot_idx); - mvs_slot_free(mvi, rx_desc); - return -1; - } - - port = slot->port; - tstat = &task->task_status; - memset(tstat, 0, sizeof(*tstat)); - tstat->resp = SAS_TASK_COMPLETE; - - if (unlikely(!port->port_attached || flags)) { - mvs_slot_err(mvi, task, slot_idx); - if (!sas_protocol_ata(task->task_proto)) - tstat->stat = SAS_PHY_DOWN; - goto out; - } - - /* error info record present */ - if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { - tstat->stat = mvs_slot_err(mvi, task, slot_idx); - goto out; - } - - switch (task->task_proto) { - case SAS_PROTOCOL_SSP: - /* hw says status == 0, datapres == 0 */ - if (rx_desc & RXQ_GOOD) { - tstat->stat = SAM_GOOD; - tstat->resp = SAS_TASK_COMPLETE; - } - /* response frame present */ - else if (rx_desc & RXQ_RSP) { - struct ssp_response_iu *iu = - slot->response + sizeof(struct mvs_err_info); - sas_ssp_task_response(&mvi->pdev->dev, task, iu); - } - - /* should never happen? */ - else - tstat->stat = SAM_CHECK_COND; - break; - - case SAS_PROTOCOL_SMP: { - struct scatterlist *sg_resp = &task->smp_task.smp_resp; - tstat->stat = SAM_GOOD; - to = kmap_atomic(sg_page(sg_resp), KM_IRQ0); - memcpy(to + sg_resp->offset, - slot->response + sizeof(struct mvs_err_info), - sg_dma_len(sg_resp)); - kunmap_atomic(to, KM_IRQ0); - break; - } - - case SAS_PROTOCOL_SATA: - case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { - tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0); - break; - } - - default: - tstat->stat = SAM_CHECK_COND; - break; - } - -out: - mvs_slot_task_free(mvi, task, slot, slot_idx); - if (unlikely(tstat->stat != SAS_QUEUE_FULL)) - mvs_slot_free(mvi, rx_desc); - - spin_unlock(&mvi->lock); - task->task_done(task); - spin_lock(&mvi->lock); - return tstat->stat; -} - -static void mvs_release_task(struct mvs_info *mvi, int phy_no) -{ - struct list_head *pos, *n; - struct mvs_slot_info *slot; - struct mvs_phy *phy = &mvi->phy[phy_no]; - struct mvs_port *port = phy->port; - u32 rx_desc; - - if (!port) - return; - - list_for_each_safe(pos, n, &port->list) { - slot = container_of(pos, struct mvs_slot_info, list); - rx_desc = (u32) (slot - mvi->slot_info); - mvs_slot_complete(mvi, rx_desc, 1); - } -} - -static void mvs_int_full(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs; - u32 tmp, stat; - int i; - - stat = mr32(INT_STAT); - - mvs_int_rx(mvi, false); - - for (i = 0; i < MVS_MAX_PORTS; i++) { - tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED); - if (tmp) - mvs_int_port(mvi, i, tmp); - } - - if (stat & CINT_SRS) - mvs_int_sata(mvi); - - mw32(INT_STAT, stat); -} - -static int mvs_int_rx(struct mvs_info *mvi, bool self_clear) -{ - void __iomem *regs = mvi->regs; - u32 rx_prod_idx, rx_desc; - bool attn = false; - struct pci_dev *pdev = mvi->pdev; - - /* the first dword in the RX ring is special: it contains - * a mirror of the hardware's RX producer index, so that - * we don't have to stall the CPU reading that register. - * The actual RX ring is offset by one dword, due to this. - */ - rx_prod_idx = mvi->rx_cons; - mvi->rx_cons = le32_to_cpu(mvi->rx[0]); - if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */ - return 0; - - /* The CMPL_Q may come late, read from register and try again - * note: if coalescing is enabled, - * it will need to read from register every time for sure - */ - if (mvi->rx_cons == rx_prod_idx) - mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK; - - if (mvi->rx_cons == rx_prod_idx) - return 0; - - while (mvi->rx_cons != rx_prod_idx) { - - /* increment our internal RX consumer pointer */ - rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); - - rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); - - if (likely(rx_desc & RXQ_DONE)) - mvs_slot_complete(mvi, rx_desc, 0); - if (rx_desc & RXQ_ATTN) { - attn = true; - dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n", - rx_desc); - } else if (rx_desc & RXQ_ERR) { - if (!(rx_desc & RXQ_DONE)) - mvs_slot_complete(mvi, rx_desc, 0); - dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n", - rx_desc); - } else if (rx_desc & RXQ_SLOT_RESET) { - dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n", - rx_desc); - mvs_slot_free(mvi, rx_desc); - } - } - - if (attn && self_clear) - mvs_int_full(mvi); - - return 0; -} - -#ifdef MVS_USE_TASKLET -static void mvs_tasklet(unsigned long data) -{ - struct mvs_info *mvi = (struct mvs_info *) data; - unsigned long flags; - - spin_lock_irqsave(&mvi->lock, flags); - -#ifdef MVS_DISABLE_MSI - mvs_int_full(mvi); -#else - mvs_int_rx(mvi, true); -#endif - spin_unlock_irqrestore(&mvi->lock, flags); -} -#endif - -static irqreturn_t mvs_interrupt(int irq, void *opaque) -{ - struct mvs_info *mvi = opaque; - void __iomem *regs = mvi->regs; - u32 stat; - - stat = mr32(GBL_INT_STAT); - - if (stat == 0 || stat == 0xffffffff) - return IRQ_NONE; - - /* clear CMD_CMPLT ASAP */ - mw32_f(INT_STAT, CINT_DONE); - -#ifndef MVS_USE_TASKLET - spin_lock(&mvi->lock); - - mvs_int_full(mvi); - - spin_unlock(&mvi->lock); -#else - tasklet_schedule(&mvi->tasklet); -#endif - return IRQ_HANDLED; -} - -#ifndef MVS_DISABLE_MSI -static irqreturn_t mvs_msi_interrupt(int irq, void *opaque) -{ - struct mvs_info *mvi = opaque; - -#ifndef MVS_USE_TASKLET - spin_lock(&mvi->lock); - - mvs_int_rx(mvi, true); - - spin_unlock(&mvi->lock); -#else - tasklet_schedule(&mvi->tasklet); -#endif - return IRQ_HANDLED; -} -#endif - -struct mvs_task_exec_info { - struct sas_task *task; - struct mvs_cmd_hdr *hdr; - struct mvs_port *port; - u32 tag; - int n_elem; -}; - -static int mvs_task_prep_smp(struct mvs_info *mvi, - struct mvs_task_exec_info *tei) -{ - int elem, rc, i; - struct sas_task *task = tei->task; - struct mvs_cmd_hdr *hdr = tei->hdr; - struct scatterlist *sg_req, *sg_resp; - u32 req_len, resp_len, tag = tei->tag; - void *buf_tmp; - u8 *buf_oaf; - dma_addr_t buf_tmp_dma; - struct mvs_prd *buf_prd; - struct scatterlist *sg; - struct mvs_slot_info *slot = &mvi->slot_info[tag]; - struct asd_sas_port *sas_port = task->dev->port; - u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); -#if _MV_DUMP - u8 *buf_cmd; - void *from; -#endif - /* - * DMA-map SMP request, response buffers - */ - sg_req = &task->smp_task.smp_req; - elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE); - if (!elem) - return -ENOMEM; - req_len = sg_dma_len(sg_req); - - sg_resp = &task->smp_task.smp_resp; - elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE); - if (!elem) { - rc = -ENOMEM; - goto err_out; - } - resp_len = sg_dma_len(sg_resp); - - /* must be in dwords */ - if ((req_len & 0x3) || (resp_len & 0x3)) { - rc = -EINVAL; - goto err_out_2; - } - - /* - * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs - */ - - /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ - buf_tmp = slot->buf; - buf_tmp_dma = slot->buf_dma; - -#if _MV_DUMP - buf_cmd = buf_tmp; - hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); - buf_tmp += req_len; - buf_tmp_dma += req_len; - slot->cmd_size = req_len; -#else - hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req)); -#endif - - /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ - buf_oaf = buf_tmp; - hdr->open_frame = cpu_to_le64(buf_tmp_dma); - - buf_tmp += MVS_OAF_SZ; - buf_tmp_dma += MVS_OAF_SZ; - - /* region 3: PRD table ********************************************* */ - buf_prd = buf_tmp; - if (tei->n_elem) - hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); - else - hdr->prd_tbl = 0; - - i = sizeof(struct mvs_prd) * tei->n_elem; - buf_tmp += i; - buf_tmp_dma += i; - - /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ - slot->response = buf_tmp; - hdr->status_buf = cpu_to_le64(buf_tmp_dma); - - /* - * Fill in TX ring and command slot header - */ - slot->tx = mvi->tx_prod; - mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) | - TXQ_MODE_I | tag | - (sas_port->phy_mask << TXQ_PHY_SHIFT)); - - hdr->flags |= flags; - hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4)); - hdr->tags = cpu_to_le32(tag); - hdr->data_len = 0; - - /* generate open address frame hdr (first 12 bytes) */ - buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */ - buf_oaf[1] = task->dev->linkrate & 0xf; - *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */ - memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); - - /* fill in PRD (scatter/gather) table, if any */ - for_each_sg(task->scatter, sg, tei->n_elem, i) { - buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); - buf_prd->len = cpu_to_le32(sg_dma_len(sg)); - buf_prd++; - } - -#if _MV_DUMP - /* copy cmd table */ - from = kmap_atomic(sg_page(sg_req), KM_IRQ0); - memcpy(buf_cmd, from + sg_req->offset, req_len); - kunmap_atomic(from, KM_IRQ0); -#endif - return 0; - -err_out_2: - pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1, - PCI_DMA_FROMDEVICE); -err_out: - pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1, - PCI_DMA_TODEVICE); - return rc; -} - -static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port) -{ - void __iomem *regs = mvi->regs; - u32 tmp, offs; - u8 *tfs = &port->taskfileset; - - if (*tfs == MVS_ID_NOT_MAPPED) - return; - - offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT); - if (*tfs < 16) { - tmp = mr32(PCS); - mw32(PCS, tmp & ~offs); - } else { - tmp = mr32(CTL); - mw32(CTL, tmp & ~offs); - } - - tmp = mr32(INT_STAT_SRS) & (1U << *tfs); - if (tmp) - mw32(INT_STAT_SRS, tmp); - - *tfs = MVS_ID_NOT_MAPPED; -} - -static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port) -{ - int i; - u32 tmp, offs; - void __iomem *regs = mvi->regs; - - if (port->taskfileset != MVS_ID_NOT_MAPPED) - return 0; - - tmp = mr32(PCS); - - for (i = 0; i < mvi->chip->srs_sz; i++) { - if (i == 16) - tmp = mr32(CTL); - offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT); - if (!(tmp & offs)) { - port->taskfileset = i; - - if (i < 16) - mw32(PCS, tmp | offs); - else - mw32(CTL, tmp | offs); - tmp = mr32(INT_STAT_SRS) & (1U << i); - if (tmp) - mw32(INT_STAT_SRS, tmp); - return 0; - } - } - return MVS_ID_NOT_MAPPED; -} - -static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag) -{ - struct ata_queued_cmd *qc = task->uldd_task; - - if (qc) { - if (qc->tf.command == ATA_CMD_FPDMA_WRITE || - qc->tf.command == ATA_CMD_FPDMA_READ) { - *tag = qc->tag; - return 1; - } - } - - return 0; -} - -static int mvs_task_prep_ata(struct mvs_info *mvi, - struct mvs_task_exec_info *tei) -{ - struct sas_task *task = tei->task; - struct domain_device *dev = task->dev; - struct mvs_cmd_hdr *hdr = tei->hdr; - struct asd_sas_port *sas_port = dev->port; - struct mvs_slot_info *slot; - struct scatterlist *sg; - struct mvs_prd *buf_prd; - struct mvs_port *port = tei->port; - u32 tag = tei->tag; - u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); - void *buf_tmp; - u8 *buf_cmd, *buf_oaf; - dma_addr_t buf_tmp_dma; - u32 i, req_len, resp_len; - const u32 max_resp_len = SB_RFB_MAX; - - if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED) - return -EBUSY; - - slot = &mvi->slot_info[tag]; - slot->tx = mvi->tx_prod; - mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | - (TXQ_CMD_STP << TXQ_CMD_SHIFT) | - (sas_port->phy_mask << TXQ_PHY_SHIFT) | - (port->taskfileset << TXQ_SRS_SHIFT)); - - if (task->ata_task.use_ncq) - flags |= MCH_FPDMA; - if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) { - if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI) - flags |= MCH_ATAPI; - } - - /* FIXME: fill in port multiplier number */ - - hdr->flags = cpu_to_le32(flags); - - /* FIXME: the low order order 5 bits for the TAG if enable NCQ */ - if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr->tags)) - task->ata_task.fis.sector_count |= hdr->tags << 3; - else - hdr->tags = cpu_to_le32(tag); - hdr->data_len = cpu_to_le32(task->total_xfer_len); - - /* - * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs - */ - - /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */ - buf_cmd = buf_tmp = slot->buf; - buf_tmp_dma = slot->buf_dma; - - hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); - - buf_tmp += MVS_ATA_CMD_SZ; - buf_tmp_dma += MVS_ATA_CMD_SZ; -#if _MV_DUMP - slot->cmd_size = MVS_ATA_CMD_SZ; -#endif - - /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ - /* used for STP. unused for SATA? */ - buf_oaf = buf_tmp; - hdr->open_frame = cpu_to_le64(buf_tmp_dma); - - buf_tmp += MVS_OAF_SZ; - buf_tmp_dma += MVS_OAF_SZ; - - /* region 3: PRD table ********************************************* */ - buf_prd = buf_tmp; - if (tei->n_elem) - hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); - else - hdr->prd_tbl = 0; - - i = sizeof(struct mvs_prd) * tei->n_elem; - buf_tmp += i; - buf_tmp_dma += i; - - /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ - /* FIXME: probably unused, for SATA. kept here just in case - * we get a STP/SATA error information record - */ - slot->response = buf_tmp; - hdr->status_buf = cpu_to_le64(buf_tmp_dma); - - req_len = sizeof(struct host_to_dev_fis); - resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ - - sizeof(struct mvs_err_info) - i; - - /* request, response lengths */ - resp_len = min(resp_len, max_resp_len); - hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); - - task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ - /* fill in command FIS and ATAPI CDB */ - memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); - if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) - memcpy(buf_cmd + STP_ATAPI_CMD, - task->ata_task.atapi_packet, 16); - - /* generate open address frame hdr (first 12 bytes) */ - buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */ - buf_oaf[1] = task->dev->linkrate & 0xf; - *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag); - memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); - - /* fill in PRD (scatter/gather) table, if any */ - for_each_sg(task->scatter, sg, tei->n_elem, i) { - buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); - buf_prd->len = cpu_to_le32(sg_dma_len(sg)); - buf_prd++; - } - - return 0; -} - -static int mvs_task_prep_ssp(struct mvs_info *mvi, - struct mvs_task_exec_info *tei) -{ - struct sas_task *task = tei->task; - struct mvs_cmd_hdr *hdr = tei->hdr; - struct mvs_port *port = tei->port; - struct mvs_slot_info *slot; - struct scatterlist *sg; - struct mvs_prd *buf_prd; - struct ssp_frame_hdr *ssp_hdr; - void *buf_tmp; - u8 *buf_cmd, *buf_oaf, fburst = 0; - dma_addr_t buf_tmp_dma; - u32 flags; - u32 resp_len, req_len, i, tag = tei->tag; - const u32 max_resp_len = SB_RFB_MAX; - u8 phy_mask; - - slot = &mvi->slot_info[tag]; - - phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap : - task->dev->port->phy_mask; - slot->tx = mvi->tx_prod; - mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | - (TXQ_CMD_SSP << TXQ_CMD_SHIFT) | - (phy_mask << TXQ_PHY_SHIFT)); - - flags = MCH_RETRY; - if (task->ssp_task.enable_first_burst) { - flags |= MCH_FBURST; - fburst = (1 << 7); - } - hdr->flags = cpu_to_le32(flags | - (tei->n_elem << MCH_PRD_LEN_SHIFT) | - (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT)); - - hdr->tags = cpu_to_le32(tag); - hdr->data_len = cpu_to_le32(task->total_xfer_len); - - /* - * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs - */ - - /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ - buf_cmd = buf_tmp = slot->buf; - buf_tmp_dma = slot->buf_dma; - - hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); - - buf_tmp += MVS_SSP_CMD_SZ; - buf_tmp_dma += MVS_SSP_CMD_SZ; -#if _MV_DUMP - slot->cmd_size = MVS_SSP_CMD_SZ; -#endif - - /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ - buf_oaf = buf_tmp; - hdr->open_frame = cpu_to_le64(buf_tmp_dma); - - buf_tmp += MVS_OAF_SZ; - buf_tmp_dma += MVS_OAF_SZ; - - /* region 3: PRD table ********************************************* */ - buf_prd = buf_tmp; - if (tei->n_elem) - hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); - else - hdr->prd_tbl = 0; - - i = sizeof(struct mvs_prd) * tei->n_elem; - buf_tmp += i; - buf_tmp_dma += i; - - /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ - slot->response = buf_tmp; - hdr->status_buf = cpu_to_le64(buf_tmp_dma); - - resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ - - sizeof(struct mvs_err_info) - i; - resp_len = min(resp_len, max_resp_len); - - req_len = sizeof(struct ssp_frame_hdr) + 28; - - /* request, response lengths */ - hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); - - /* generate open address frame hdr (first 12 bytes) */ - buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */ - buf_oaf[1] = task->dev->linkrate & 0xf; - *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag); - memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); - - /* fill in SSP frame header (Command Table.SSP frame header) */ - ssp_hdr = (struct ssp_frame_hdr *)buf_cmd; - ssp_hdr->frame_type = SSP_COMMAND; - memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr, - HASHED_SAS_ADDR_SIZE); - memcpy(ssp_hdr->hashed_src_addr, - task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); - ssp_hdr->tag = cpu_to_be16(tag); - - /* fill in command frame IU */ - buf_cmd += sizeof(*ssp_hdr); - memcpy(buf_cmd, &task->ssp_task.LUN, 8); - buf_cmd[9] = fburst | task->ssp_task.task_attr | - (task->ssp_task.task_prio << 3); - memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16); - - /* fill in PRD (scatter/gather) table, if any */ - for_each_sg(task->scatter, sg, tei->n_elem, i) { - buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); - buf_prd->len = cpu_to_le32(sg_dma_len(sg)); - buf_prd++; - } - - return 0; -} - -static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) -{ - struct domain_device *dev = task->dev; - struct mvs_info *mvi = dev->port->ha->lldd_ha; - struct pci_dev *pdev = mvi->pdev; - void __iomem *regs = mvi->regs; - struct mvs_task_exec_info tei; - struct sas_task *t = task; - struct mvs_slot_info *slot; - u32 tag = 0xdeadbeef, rc, n_elem = 0; - unsigned long flags; - u32 n = num, pass = 0; - - spin_lock_irqsave(&mvi->lock, flags); - do { - dev = t->dev; - tei.port = &mvi->port[dev->port->id]; - - if (!tei.port->port_attached) { - if (sas_protocol_ata(t->task_proto)) { - rc = SAS_PHY_DOWN; - goto out_done; - } else { - struct task_status_struct *ts = &t->task_status; - ts->resp = SAS_TASK_UNDELIVERED; - ts->stat = SAS_PHY_DOWN; - t->task_done(t); - if (n > 1) - t = list_entry(t->list.next, - struct sas_task, list); - continue; - } - } - - if (!sas_protocol_ata(t->task_proto)) { - if (t->num_scatter) { - n_elem = pci_map_sg(mvi->pdev, t->scatter, - t->num_scatter, - t->data_dir); - if (!n_elem) { - rc = -ENOMEM; - goto err_out; - } - } - } else { - n_elem = t->num_scatter; - } - - rc = mvs_tag_alloc(mvi, &tag); - if (rc) - goto err_out; - - slot = &mvi->slot_info[tag]; - t->lldd_task = NULL; - slot->n_elem = n_elem; - memset(slot->buf, 0, MVS_SLOT_BUF_SZ); - tei.task = t; - tei.hdr = &mvi->slot[tag]; - tei.tag = tag; - tei.n_elem = n_elem; - - switch (t->task_proto) { - case SAS_PROTOCOL_SMP: - rc = mvs_task_prep_smp(mvi, &tei); - break; - case SAS_PROTOCOL_SSP: - rc = mvs_task_prep_ssp(mvi, &tei); - break; - case SAS_PROTOCOL_SATA: - case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: - rc = mvs_task_prep_ata(mvi, &tei); - break; - default: - dev_printk(KERN_ERR, &pdev->dev, - "unknown sas_task proto: 0x%x\n", - t->task_proto); - rc = -EINVAL; - break; - } - - if (rc) - goto err_out_tag; - - slot->task = t; - slot->port = tei.port; - t->lldd_task = (void *) slot; - list_add_tail(&slot->list, &slot->port->list); - /* TODO: select normal or high priority */ - - spin_lock(&t->task_state_lock); - t->task_state_flags |= SAS_TASK_AT_INITIATOR; - spin_unlock(&t->task_state_lock); - - mvs_hba_memory_dump(mvi, tag, t->task_proto); - - ++pass; - mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); - if (n > 1) - t = list_entry(t->list.next, struct sas_task, list); - } while (--n); - - rc = 0; - goto out_done; - -err_out_tag: - mvs_tag_free(mvi, tag); -err_out: - dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc); - if (!sas_protocol_ata(t->task_proto)) - if (n_elem) - pci_unmap_sg(mvi->pdev, t->scatter, n_elem, - t->data_dir); -out_done: - if (pass) - mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); - spin_unlock_irqrestore(&mvi->lock, flags); - return rc; -} - -static int mvs_task_abort(struct sas_task *task) -{ - int rc; - unsigned long flags; - struct mvs_info *mvi = task->dev->port->ha->lldd_ha; - struct pci_dev *pdev = mvi->pdev; - int tag; - - spin_lock_irqsave(&task->task_state_lock, flags); - if (task->task_state_flags & SAS_TASK_STATE_DONE) { - rc = TMF_RESP_FUNC_COMPLETE; - spin_unlock_irqrestore(&task->task_state_lock, flags); - goto out_done; - } - spin_unlock_irqrestore(&task->task_state_lock, flags); - - switch (task->task_proto) { - case SAS_PROTOCOL_SMP: - dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n"); - break; - case SAS_PROTOCOL_SSP: - dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n"); - break; - case SAS_PROTOCOL_SATA: - case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{ - dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n"); -#if _MV_DUMP - dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n"); - mvs_hexdump(sizeof(struct host_to_dev_fis), - (void *)&task->ata_task.fis, 0); - dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n"); - mvs_hexdump(16, task->ata_task.atapi_packet, 0); -#endif - spin_lock_irqsave(&task->task_state_lock, flags); - if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) { - /* TODO */ - ; - } - spin_unlock_irqrestore(&task->task_state_lock, flags); - break; - } - default: - break; - } - - if (mvs_find_tag(mvi, task, &tag)) { - spin_lock_irqsave(&mvi->lock, flags); - mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag); - spin_unlock_irqrestore(&mvi->lock, flags); - } - if (!mvs_task_exec(task, 1, GFP_ATOMIC)) - rc = TMF_RESP_FUNC_COMPLETE; - else - rc = TMF_RESP_FUNC_FAILED; -out_done: - return rc; -} - -static void mvs_free(struct mvs_info *mvi) -{ - int i; - - if (!mvi) - return; - - for (i = 0; i < MVS_SLOTS; i++) { - struct mvs_slot_info *slot = &mvi->slot_info[i]; - - if (slot->buf) - dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ, - slot->buf, slot->buf_dma); - } - - if (mvi->tx) - dma_free_coherent(&mvi->pdev->dev, - sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, - mvi->tx, mvi->tx_dma); - if (mvi->rx_fis) - dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ, - mvi->rx_fis, mvi->rx_fis_dma); - if (mvi->rx) - dma_free_coherent(&mvi->pdev->dev, - sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), - mvi->rx, mvi->rx_dma); - if (mvi->slot) - dma_free_coherent(&mvi->pdev->dev, - sizeof(*mvi->slot) * MVS_SLOTS, - mvi->slot, mvi->slot_dma); -#ifdef MVS_ENABLE_PERI - if (mvi->peri_regs) - iounmap(mvi->peri_regs); -#endif - if (mvi->regs) - iounmap(mvi->regs); - if (mvi->shost) - scsi_host_put(mvi->shost); - kfree(mvi->sas.sas_port); - kfree(mvi->sas.sas_phy); - kfree(mvi); -} - -/* FIXME: locking? */ -static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, - void *funcdata) -{ - struct mvs_info *mvi = sas_phy->ha->lldd_ha; - int rc = 0, phy_id = sas_phy->id; - u32 tmp; - - tmp = mvs_read_phy_ctl(mvi, phy_id); - - switch (func) { - case PHY_FUNC_SET_LINK_RATE:{ - struct sas_phy_linkrates *rates = funcdata; - u32 lrmin = 0, lrmax = 0; - - lrmin = (rates->minimum_linkrate << 8); - lrmax = (rates->maximum_linkrate << 12); - - if (lrmin) { - tmp &= ~(0xf << 8); - tmp |= lrmin; - } - if (lrmax) { - tmp &= ~(0xf << 12); - tmp |= lrmax; - } - mvs_write_phy_ctl(mvi, phy_id, tmp); - break; - } - - case PHY_FUNC_HARD_RESET: - if (tmp & PHY_RST_HARD) - break; - mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD); - break; - - case PHY_FUNC_LINK_RESET: - mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST); - break; - - case PHY_FUNC_DISABLE: - case PHY_FUNC_RELEASE_SPINUP_HOLD: - default: - rc = -EOPNOTSUPP; - } - - return rc; -} - -static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id) -{ - struct mvs_phy *phy = &mvi->phy[phy_id]; - struct asd_sas_phy *sas_phy = &phy->sas_phy; - - sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; - sas_phy->class = SAS; - sas_phy->iproto = SAS_PROTOCOL_ALL; - sas_phy->tproto = 0; - sas_phy->type = PHY_TYPE_PHYSICAL; - sas_phy->role = PHY_ROLE_INITIATOR; - sas_phy->oob_mode = OOB_NOT_CONNECTED; - sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; - - sas_phy->id = phy_id; - sas_phy->sas_addr = &mvi->sas_addr[0]; - sas_phy->frame_rcvd = &phy->frame_rcvd[0]; - sas_phy->ha = &mvi->sas; - sas_phy->lldd_phy = phy; -} - -static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct mvs_info *mvi; - unsigned long res_start, res_len, res_flag; - struct asd_sas_phy **arr_phy; - struct asd_sas_port **arr_port; - const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data]; - int i; - - /* - * alloc and init our per-HBA mvs_info struct - */ - - mvi = kzalloc(sizeof(*mvi), GFP_KERNEL); - if (!mvi) - return NULL; - - spin_lock_init(&mvi->lock); -#ifdef MVS_USE_TASKLET - tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi); -#endif - mvi->pdev = pdev; - mvi->chip = chip; - - if (pdev->device == 0x6440 && pdev->revision == 0) - mvi->flags |= MVF_PHY_PWR_FIX; - - /* - * alloc and init SCSI, SAS glue - */ - - mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *)); - if (!mvi->shost) - goto err_out; - - arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL); - arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL); - if (!arr_phy || !arr_port) - goto err_out; - - for (i = 0; i < MVS_MAX_PHYS; i++) { - mvs_phy_init(mvi, i); - arr_phy[i] = &mvi->phy[i].sas_phy; - arr_port[i] = &mvi->port[i].sas_port; - mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED; - mvi->port[i].wide_port_phymap = 0; - mvi->port[i].port_attached = 0; - INIT_LIST_HEAD(&mvi->port[i].list); - } - - SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas; - mvi->shost->transportt = mvs_stt; - mvi->shost->max_id = 21; - mvi->shost->max_lun = ~0; - mvi->shost->max_channel = 0; - mvi->shost->max_cmd_len = 16; - - mvi->sas.sas_ha_name = DRV_NAME; - mvi->sas.dev = &pdev->dev; - mvi->sas.lldd_module = THIS_MODULE; - mvi->sas.sas_addr = &mvi->sas_addr[0]; - mvi->sas.sas_phy = arr_phy; - mvi->sas.sas_port = arr_port; - mvi->sas.num_phys = chip->n_phy; - mvi->sas.lldd_max_execute_num = 1; - mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE; - mvi->shost->can_queue = MVS_CAN_QUEUE; - mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys; - mvi->sas.lldd_ha = mvi; - mvi->sas.core.shost = mvi->shost; - - mvs_tag_init(mvi); - - /* - * ioremap main and peripheral registers - */ - -#ifdef MVS_ENABLE_PERI - res_start = pci_resource_start(pdev, 2); - res_len = pci_resource_len(pdev, 2); - if (!res_start || !res_len) - goto err_out; - - mvi->peri_regs = ioremap_nocache(res_start, res_len); - if (!mvi->peri_regs) - goto err_out; -#endif - - res_start = pci_resource_start(pdev, 4); - res_len = pci_resource_len(pdev, 4); - if (!res_start || !res_len) - goto err_out; - - res_flag = pci_resource_flags(pdev, 4); - if (res_flag & IORESOURCE_CACHEABLE) - mvi->regs = ioremap(res_start, res_len); - else - mvi->regs = ioremap_nocache(res_start, res_len); - - if (!mvi->regs) - goto err_out; - - /* - * alloc and init our DMA areas - */ - - mvi->tx = dma_alloc_coherent(&pdev->dev, - sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, - &mvi->tx_dma, GFP_KERNEL); - if (!mvi->tx) - goto err_out; - memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ); - - mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ, - &mvi->rx_fis_dma, GFP_KERNEL); - if (!mvi->rx_fis) - goto err_out; - memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ); - - mvi->rx = dma_alloc_coherent(&pdev->dev, - sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), - &mvi->rx_dma, GFP_KERNEL); - if (!mvi->rx) - goto err_out; - memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1)); - - mvi->rx[0] = cpu_to_le32(0xfff); - mvi->rx_cons = 0xfff; - - mvi->slot = dma_alloc_coherent(&pdev->dev, - sizeof(*mvi->slot) * MVS_SLOTS, - &mvi->slot_dma, GFP_KERNEL); - if (!mvi->slot) - goto err_out; - memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS); - - for (i = 0; i < MVS_SLOTS; i++) { - struct mvs_slot_info *slot = &mvi->slot_info[i]; - - slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ, - &slot->buf_dma, GFP_KERNEL); - if (!slot->buf) - goto err_out; - memset(slot->buf, 0, MVS_SLOT_BUF_SZ); - } - - /* finally, read NVRAM to get our SAS address */ - if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8)) - goto err_out; - return mvi; - -err_out: - mvs_free(mvi); - return NULL; -} - -static u32 mvs_cr32(void __iomem *regs, u32 addr) -{ - mw32(CMD_ADDR, addr); - return mr32(CMD_DATA); -} - -static void mvs_cw32(void __iomem *regs, u32 addr, u32 val) -{ - mw32(CMD_ADDR, addr); - mw32(CMD_DATA, val); -} - -static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port) -{ - void __iomem *regs = mvi->regs; - return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4): - mr32(P4_SER_CTLSTAT + (port - 4) * 4); -} - -static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val) -{ - void __iomem *regs = mvi->regs; - if (port < 4) - mw32(P0_SER_CTLSTAT + port * 4, val); - else - mw32(P4_SER_CTLSTAT + (port - 4) * 4, val); -} - -static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port) -{ - void __iomem *regs = mvi->regs + off; - void __iomem *regs2 = mvi->regs + off2; - return (port < 4)?readl(regs + port * 8): - readl(regs2 + (port - 4) * 8); -} - -static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2, - u32 port, u32 val) -{ - void __iomem *regs = mvi->regs + off; - void __iomem *regs2 = mvi->regs + off2; - if (port < 4) - writel(val, regs + port * 8); - else - writel(val, regs2 + (port - 4) * 8); -} - -static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port) -{ - return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port); -} - -static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val) -{ - mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val); -} - -static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr) -{ - mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr); -} - -static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port) -{ - return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port); -} - -static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val) -{ - mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val); -} - -static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr) -{ - mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr); -} - -static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port) -{ - return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port); -} - -static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val) -{ - mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val); -} - -static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port) -{ - return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port); -} - -static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val) -{ - mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val); -} - -static void __devinit mvs_phy_hacks(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs; - u32 tmp; - - /* workaround for SATA R-ERR, to ignore phy glitch */ - tmp = mvs_cr32(regs, CMD_PHY_TIMER); - tmp &= ~(1 << 9); - tmp |= (1 << 10); - mvs_cw32(regs, CMD_PHY_TIMER, tmp); - - /* enable retry 127 times */ - mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f); - - /* extend open frame timeout to max */ - tmp = mvs_cr32(regs, CMD_SAS_CTL0); - tmp &= ~0xffff; - tmp |= 0x3fff; - mvs_cw32(regs, CMD_SAS_CTL0, tmp); - - /* workaround for WDTIMEOUT , set to 550 ms */ - mvs_cw32(regs, CMD_WD_TIMER, 0x86470); - - /* not to halt for different port op during wideport link change */ - mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d); - - /* workaround for Seagate disk not-found OOB sequence, recv - * COMINIT before sending out COMWAKE */ - tmp = mvs_cr32(regs, CMD_PHY_MODE_21); - tmp &= 0x0000ffff; - tmp |= 0x00fa0000; - mvs_cw32(regs, CMD_PHY_MODE_21, tmp); - - tmp = mvs_cr32(regs, CMD_PHY_TIMER); - tmp &= 0x1fffffff; - tmp |= (2U << 29); /* 8 ms retry */ - mvs_cw32(regs, CMD_PHY_TIMER, tmp); - - /* TEST - for phy decoding error, adjust voltage levels */ - mw32(P0_VSR_ADDR + 0, 0x8); - mw32(P0_VSR_DATA + 0, 0x2F0); - - mw32(P0_VSR_ADDR + 8, 0x8); - mw32(P0_VSR_DATA + 8, 0x2F0); - - mw32(P0_VSR_ADDR + 16, 0x8); - mw32(P0_VSR_DATA + 16, 0x2F0); - - mw32(P0_VSR_ADDR + 24, 0x8); - mw32(P0_VSR_DATA + 24, 0x2F0); - -} - -static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId) -{ - void __iomem *regs = mvi->regs; - u32 tmp; - - tmp = mr32(PCS); - if (mvi->chip->n_phy <= 4) - tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT); - else - tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2); - mw32(PCS, tmp); -} - -static void mvs_detect_porttype(struct mvs_info *mvi, int i) -{ - void __iomem *regs = mvi->regs; - u32 reg; - struct mvs_phy *phy = &mvi->phy[i]; - - /* TODO check & save device type */ - reg = mr32(GBL_PORT_TYPE); - - if (reg & MODE_SAS_SATA & (1 << i)) - phy->phy_type |= PORT_TYPE_SAS; - else - phy->phy_type |= PORT_TYPE_SATA; -} - -static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf) -{ - u32 *s = (u32 *) buf; - - if (!s) - return NULL; - - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); - s[3] = mvs_read_port_cfg_data(mvi, i); - - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); - s[2] = mvs_read_port_cfg_data(mvi, i); - - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); - s[1] = mvs_read_port_cfg_data(mvi, i); - - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); - s[0] = mvs_read_port_cfg_data(mvi, i); - - return (void *)s; -} - -static u32 mvs_is_sig_fis_received(u32 irq_status) -{ - return irq_status & PHYEV_SIG_FIS; -} - -static void mvs_update_wideport(struct mvs_info *mvi, int i) -{ - struct mvs_phy *phy = &mvi->phy[i]; - struct mvs_port *port = phy->port; - int j, no; - - for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy) - if (no & 1) { - mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); - mvs_write_port_cfg_data(mvi, no, - port->wide_port_phymap); - } else { - mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); - mvs_write_port_cfg_data(mvi, no, 0); - } -} - -static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) -{ - u32 tmp; - struct mvs_phy *phy = &mvi->phy[i]; - struct mvs_port *port = phy->port;; - - tmp = mvs_read_phy_ctl(mvi, i); - - if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { - if (!port) - phy->phy_attached = 1; - return tmp; - } - - if (port) { - if (phy->phy_type & PORT_TYPE_SAS) { - port->wide_port_phymap &= ~(1U << i); - if (!port->wide_port_phymap) - port->port_attached = 0; - mvs_update_wideport(mvi, i); - } else if (phy->phy_type & PORT_TYPE_SATA) - port->port_attached = 0; - mvs_free_reg_set(mvi, phy->port); - phy->port = NULL; - phy->phy_attached = 0; - phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); - } - return 0; -} - -static void mvs_update_phyinfo(struct mvs_info *mvi, int i, - int get_st) -{ - struct mvs_phy *phy = &mvi->phy[i]; - struct pci_dev *pdev = mvi->pdev; - u32 tmp; - u64 tmp64; - - mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY); - phy->dev_info = mvs_read_port_cfg_data(mvi, i); - - mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI); - phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32; - - mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO); - phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); - - if (get_st) { - phy->irq_status = mvs_read_port_irq_stat(mvi, i); - phy->phy_status = mvs_is_phy_ready(mvi, i); - } - - if (phy->phy_status) { - u32 phy_st; - struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i]; - - mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); - phy_st = mvs_read_port_cfg_data(mvi, i); - - sas_phy->linkrate = - (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> - PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; - phy->minimum_linkrate = - (phy->phy_status & - PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8; - phy->maximum_linkrate = - (phy->phy_status & - PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12; - - if (phy->phy_type & PORT_TYPE_SAS) { - /* Updated attached_sas_addr */ - mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI); - phy->att_dev_sas_addr = - (u64) mvs_read_port_cfg_data(mvi, i) << 32; - mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO); - phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); - mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO); - phy->att_dev_info = mvs_read_port_cfg_data(mvi, i); - phy->identify.device_type = - phy->att_dev_info & PORT_DEV_TYPE_MASK; - - if (phy->identify.device_type == SAS_END_DEV) - phy->identify.target_port_protocols = - SAS_PROTOCOL_SSP; - else if (phy->identify.device_type != NO_DEVICE) - phy->identify.target_port_protocols = - SAS_PROTOCOL_SMP; - if (phy_st & PHY_OOB_DTCTD) - sas_phy->oob_mode = SAS_OOB_MODE; - phy->frame_rcvd_size = - sizeof(struct sas_identify_frame); - } else if (phy->phy_type & PORT_TYPE_SATA) { - phy->identify.target_port_protocols = SAS_PROTOCOL_STP; - if (mvs_is_sig_fis_received(phy->irq_status)) { - phy->att_dev_sas_addr = i; /* temp */ - if (phy_st & PHY_OOB_DTCTD) - sas_phy->oob_mode = SATA_OOB_MODE; - phy->frame_rcvd_size = - sizeof(struct dev_to_host_fis); - mvs_get_d2h_reg(mvi, i, - (void *)sas_phy->frame_rcvd); - } else { - dev_printk(KERN_DEBUG, &pdev->dev, - "No sig fis\n"); - phy->phy_type &= ~(PORT_TYPE_SATA); - goto out_done; - } - } - tmp64 = cpu_to_be64(phy->att_dev_sas_addr); - memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE); - - dev_printk(KERN_DEBUG, &pdev->dev, - "phy[%d] Get Attached Address 0x%llX ," - " SAS Address 0x%llX\n", - i, - (unsigned long long)phy->att_dev_sas_addr, - (unsigned long long)phy->dev_sas_addr); - dev_printk(KERN_DEBUG, &pdev->dev, - "Rate = %x , type = %d\n", - sas_phy->linkrate, phy->phy_type); - - /* workaround for HW phy decoding error on 1.5g disk drive */ - mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6); - tmp = mvs_read_port_vsr_data(mvi, i); - if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> - PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) == - SAS_LINK_RATE_1_5_GBPS) - tmp &= ~PHY_MODE6_LATECLK; - else - tmp |= PHY_MODE6_LATECLK; - mvs_write_port_vsr_data(mvi, i, tmp); - - } -out_done: - if (get_st) - mvs_write_port_irq_stat(mvi, i, phy->irq_status); -} - -static void mvs_port_formed(struct asd_sas_phy *sas_phy) -{ - struct sas_ha_struct *sas_ha = sas_phy->ha; - struct mvs_info *mvi = sas_ha->lldd_ha; - struct asd_sas_port *sas_port = sas_phy->port; - struct mvs_phy *phy = sas_phy->lldd_phy; - struct mvs_port *port = &mvi->port[sas_port->id]; - unsigned long flags; - - spin_lock_irqsave(&mvi->lock, flags); - port->port_attached = 1; - phy->port = port; - port->taskfileset = MVS_ID_NOT_MAPPED; - if (phy->phy_type & PORT_TYPE_SAS) { - port->wide_port_phymap = sas_port->phy_mask; - mvs_update_wideport(mvi, sas_phy->id); - } - spin_unlock_irqrestore(&mvi->lock, flags); -} - -static int mvs_I_T_nexus_reset(struct domain_device *dev) -{ - return TMF_RESP_FUNC_FAILED; -} - -static int __devinit mvs_hw_init(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs; - int i; - u32 tmp, cctl; - - /* make sure interrupts are masked immediately (paranoia) */ - mw32(GBL_CTL, 0); - tmp = mr32(GBL_CTL); - - /* Reset Controller */ - if (!(tmp & HBA_RST)) { - if (mvi->flags & MVF_PHY_PWR_FIX) { - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); - tmp &= ~PCTL_PWR_ON; - tmp |= PCTL_OFF; - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); - - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); - tmp &= ~PCTL_PWR_ON; - tmp |= PCTL_OFF; - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); - } - - /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */ - mw32_f(GBL_CTL, HBA_RST); - } - - /* wait for reset to finish; timeout is just a guess */ - i = 1000; - while (i-- > 0) { - msleep(10); - - if (!(mr32(GBL_CTL) & HBA_RST)) - break; - } - if (mr32(GBL_CTL) & HBA_RST) { - dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n"); - return -EBUSY; - } - - /* Init Chip */ - /* make sure RST is set; HBA_RST /should/ have done that for us */ - cctl = mr32(CTL); - if (cctl & CCTL_RST) - cctl &= ~CCTL_RST; - else - mw32_f(CTL, cctl | CCTL_RST); - - /* write to device control _AND_ device status register? - A.C. */ - pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp); - tmp &= ~PRD_REQ_MASK; - tmp |= PRD_REQ_SIZE; - pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp); - - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); - tmp |= PCTL_PWR_ON; - tmp &= ~PCTL_OFF; - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); - - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); - tmp |= PCTL_PWR_ON; - tmp &= ~PCTL_OFF; - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); - - mw32_f(CTL, cctl); - - /* reset control */ - mw32(PCS, 0); /*MVS_PCS */ - - mvs_phy_hacks(mvi); - - mw32(CMD_LIST_LO, mvi->slot_dma); - mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16); - - mw32(RX_FIS_LO, mvi->rx_fis_dma); - mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16); - - mw32(TX_CFG, MVS_CHIP_SLOT_SZ); - mw32(TX_LO, mvi->tx_dma); - mw32(TX_HI, (mvi->tx_dma >> 16) >> 16); - - mw32(RX_CFG, MVS_RX_RING_SZ); - mw32(RX_LO, mvi->rx_dma); - mw32(RX_HI, (mvi->rx_dma >> 16) >> 16); - - /* enable auto port detection */ - mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN); - msleep(1100); - /* init and reset phys */ - for (i = 0; i < mvi->chip->n_phy; i++) { - u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]); - u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]); - - mvs_detect_porttype(mvi, i); - - /* set phy local SAS address */ - mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO); - mvs_write_port_cfg_data(mvi, i, lo); - mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI); - mvs_write_port_cfg_data(mvi, i, hi); - - /* reset phy */ - tmp = mvs_read_phy_ctl(mvi, i); - tmp |= PHY_RST; - mvs_write_phy_ctl(mvi, i, tmp); - } - - msleep(100); - - for (i = 0; i < mvi->chip->n_phy; i++) { - /* clear phy int status */ - tmp = mvs_read_port_irq_stat(mvi, i); - tmp &= ~PHYEV_SIG_FIS; - mvs_write_port_irq_stat(mvi, i, tmp); - - /* set phy int mask */ - tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS | - PHYEV_ID_DONE | PHYEV_DEC_ERR; - mvs_write_port_irq_mask(mvi, i, tmp); - - msleep(100); - mvs_update_phyinfo(mvi, i, 1); - mvs_enable_xmt(mvi, i); - } - - /* FIXME: update wide port bitmaps */ - - /* little endian for open address and command table, etc. */ - /* A.C. - * it seems that ( from the spec ) turning on big-endian won't - * do us any good on big-endian machines, need further confirmation - */ - cctl = mr32(CTL); - cctl |= CCTL_ENDIAN_CMD; - cctl |= CCTL_ENDIAN_DATA; - cctl &= ~CCTL_ENDIAN_OPEN; - cctl |= CCTL_ENDIAN_RSP; - mw32_f(CTL, cctl); - - /* reset CMD queue */ - tmp = mr32(PCS); - tmp |= PCS_CMD_RST; - mw32(PCS, tmp); - /* interrupt coalescing may cause missing HW interrput in some case, - * and the max count is 0x1ff, while our max slot is 0x200, - * it will make count 0. - */ - tmp = 0; - mw32(INT_COAL, tmp); - - tmp = 0x100; - mw32(INT_COAL_TMOUT, tmp); - - /* ladies and gentlemen, start your engines */ - mw32(TX_CFG, 0); - mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN); - mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN); - /* enable CMD/CMPL_Q/RESP mode */ - mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN); - - /* enable completion queue interrupt */ - tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS); - mw32(INT_MASK, tmp); - - /* Enable SRS interrupt */ - mw32(INT_MASK_SRS, 0xFF); - return 0; -} - -static void __devinit mvs_print_info(struct mvs_info *mvi) -{ - struct pci_dev *pdev = mvi->pdev; - static int printed_version; - - if (!printed_version++) - dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); - - dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n", - mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr)); -} - -static int __devinit mvs_pci_init(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - int rc; - struct mvs_info *mvi; - irq_handler_t irq_handler = mvs_interrupt; - - rc = pci_enable_device(pdev); - if (rc) - return rc; - - pci_set_master(pdev); - - rc = pci_request_regions(pdev, DRV_NAME); - if (rc) - goto err_out_disable; - - rc = pci_go_64(pdev); - if (rc) - goto err_out_regions; - - mvi = mvs_alloc(pdev, ent); - if (!mvi) { - rc = -ENOMEM; - goto err_out_regions; - } - - rc = mvs_hw_init(mvi); - if (rc) - goto err_out_mvi; - -#ifndef MVS_DISABLE_MSI - if (!pci_enable_msi(pdev)) { - u32 tmp; - void __iomem *regs = mvi->regs; - mvi->flags |= MVF_MSI; - irq_handler = mvs_msi_interrupt; - tmp = mr32(PCS); - mw32(PCS, tmp | PCS_SELF_CLEAR); - } -#endif - - rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi); - if (rc) - goto err_out_msi; - - rc = scsi_add_host(mvi->shost, &pdev->dev); - if (rc) - goto err_out_irq; - - rc = sas_register_ha(&mvi->sas); - if (rc) - goto err_out_shost; - - pci_set_drvdata(pdev, mvi); - - mvs_print_info(mvi); - - mvs_hba_interrupt_enable(mvi); - - scsi_scan_host(mvi->shost); - - return 0; - -err_out_shost: - scsi_remove_host(mvi->shost); -err_out_irq: - free_irq(pdev->irq, mvi); -err_out_msi: - if (mvi->flags |= MVF_MSI) - pci_disable_msi(pdev); -err_out_mvi: - mvs_free(mvi); -err_out_regions: - pci_release_regions(pdev); -err_out_disable: - pci_disable_device(pdev); - return rc; -} - -static void __devexit mvs_pci_remove(struct pci_dev *pdev) -{ - struct mvs_info *mvi = pci_get_drvdata(pdev); - - pci_set_drvdata(pdev, NULL); - - if (mvi) { - sas_unregister_ha(&mvi->sas); - mvs_hba_interrupt_disable(mvi); - sas_remove_host(mvi->shost); - scsi_remove_host(mvi->shost); - - free_irq(pdev->irq, mvi); - if (mvi->flags & MVF_MSI) - pci_disable_msi(pdev); - mvs_free(mvi); - pci_release_regions(pdev); - } - pci_disable_device(pdev); -} - -static struct sas_domain_function_template mvs_transport_ops = { - .lldd_execute_task = mvs_task_exec, - .lldd_control_phy = mvs_phy_control, - .lldd_abort_task = mvs_task_abort, - .lldd_port_formed = mvs_port_formed, - .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset, -}; - -static struct pci_device_id __devinitdata mvs_pci_table[] = { - { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 }, - { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 }, - { - .vendor = PCI_VENDOR_ID_MARVELL, - .device = 0x6440, - .subvendor = PCI_ANY_ID, - .subdevice = 0x6480, - .class = 0, - .class_mask = 0, - .driver_data = chip_6480, - }, - { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 }, - { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 }, - - { } /* terminate list */ -}; - -static struct pci_driver mvs_pci_driver = { - .name = DRV_NAME, - .id_table = mvs_pci_table, - .probe = mvs_pci_init, - .remove = __devexit_p(mvs_pci_remove), -}; - -static int __init mvs_init(void) -{ - int rc; - - mvs_stt = sas_domain_attach_transport(&mvs_transport_ops); - if (!mvs_stt) - return -ENOMEM; - - rc = pci_register_driver(&mvs_pci_driver); - if (rc) - goto err_out; - - return 0; - -err_out: - sas_release_transport(mvs_stt); - return rc; -} - -static void __exit mvs_exit(void) -{ - pci_unregister_driver(&mvs_pci_driver); - sas_release_transport(mvs_stt); -} - -module_init(mvs_init); -module_exit(mvs_exit); - -MODULE_AUTHOR("Jeff Garzik "); -MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver"); -MODULE_VERSION(DRV_VERSION); -MODULE_LICENSE("GPL"); -MODULE_DEVICE_TABLE(pci, mvs_pci_table); diff --git a/drivers/scsi/mvsas/Kconfig b/drivers/scsi/mvsas/Kconfig new file mode 100644 index 000000000000..f83f368e6902 --- /dev/null +++ b/drivers/scsi/mvsas/Kconfig @@ -0,0 +1,35 @@ +# +# Kernel configuration file for 88SE64XX SAS/SATA driver. +# +# Copyright 2007 Red Hat, Inc. +# Copyright 2008 Marvell. +# +# This file is licensed under GPLv2. +# +# This file is part of the 88SE64XX driver. +# +# The 88SE64XX driver is free software; you can redistribute +# it and/or modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; version 2 of the +# License. +# +# The 88SE64XX driver is distributed in the hope that it will be +# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with 88SE64XX Driver; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +# +# + +config SCSI_MVSAS + tristate "Marvell 88SE64XX SAS/SATA support" + depends on PCI + select SCSI_SAS_LIBSAS + select FW_LOADER + help + This driver supports Marvell's SAS/SATA 3Gb/s PCI-E 88SE64XX + chip based host adapters. + diff --git a/drivers/scsi/mvsas/Makefile b/drivers/scsi/mvsas/Makefile new file mode 100644 index 000000000000..1ac6ed955a04 --- /dev/null +++ b/drivers/scsi/mvsas/Makefile @@ -0,0 +1,26 @@ +# +# Makefile for Marvell 88SE64xx SAS/SATA driver. +# +# Copyright 2007 Red Hat, Inc. +# Copyright 2008 Marvell. +# +# This file is licensed under GPLv2. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; version 2 of the +# License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 +# USA + +obj-$(CONFIG_SCSI_MVSAS) += mvsas.o +mvsas-y += mv_sas.o + diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c new file mode 100644 index 000000000000..e4acebd10d1b --- /dev/null +++ b/drivers/scsi/mvsas/mv_sas.c @@ -0,0 +1,3222 @@ +/* + mvsas.c - Marvell 88SE6440 SAS/SATA support + + Copyright 2007 Red Hat, Inc. + Copyright 2008 Marvell. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2, + or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty + of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + See the GNU General Public License for more details. + + You should have received a copy of the GNU General Public + License along with this program; see the file COPYING. If not, + write to the Free Software Foundation, 675 Mass Ave, Cambridge, + MA 02139, USA. + + --------------------------------------------------------------- + + Random notes: + * hardware supports controlling the endian-ness of data + structures. this permits elimination of all the le32_to_cpu() + and cpu_to_le32() conversions. + + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "mvsas" +#define DRV_VERSION "0.5.2" +#define _MV_DUMP 0 +#define MVS_DISABLE_NVRAM +#define MVS_DISABLE_MSI + +#define mr32(reg) readl(regs + MVS_##reg) +#define mw32(reg,val) writel((val), regs + MVS_##reg) +#define mw32_f(reg,val) do { \ + writel((val), regs + MVS_##reg); \ + readl(regs + MVS_##reg); \ + } while (0) + +#define MVS_ID_NOT_MAPPED 0x7f +#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width) + +/* offset for D2H FIS in the Received FIS List Structure */ +#define SATA_RECEIVED_D2H_FIS(reg_set) \ + ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40) +#define SATA_RECEIVED_PIO_FIS(reg_set) \ + ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20) +#define UNASSOC_D2H_FIS(id) \ + ((void *) mvi->rx_fis + 0x100 * id) + +#define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \ + for ((__mc) = (__lseq_mask), (__lseq) = 0; \ + (__mc) != 0 && __rest; \ + (++__lseq), (__mc) >>= 1) + +/* driver compile-time configuration */ +enum driver_configuration { + MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */ + MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */ + /* software requires power-of-2 + ring size */ + + MVS_SLOTS = 512, /* command slots */ + MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */ + MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */ + MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */ + MVS_OAF_SZ = 64, /* Open address frame buffer size */ + + MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */ + + MVS_QUEUE_SIZE = 30, /* Support Queue depth */ + MVS_CAN_QUEUE = MVS_SLOTS - 1, /* SCSI Queue depth */ +}; + +/* unchangeable hardware details */ +enum hardware_details { + MVS_MAX_PHYS = 8, /* max. possible phys */ + MVS_MAX_PORTS = 8, /* max. possible ports */ + MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100), +}; + +/* peripheral registers (BAR2) */ +enum peripheral_registers { + SPI_CTL = 0x10, /* EEPROM control */ + SPI_CMD = 0x14, /* EEPROM command */ + SPI_DATA = 0x18, /* EEPROM data */ +}; + +enum peripheral_register_bits { + TWSI_RDY = (1U << 7), /* EEPROM interface ready */ + TWSI_RD = (1U << 4), /* EEPROM read access */ + + SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */ +}; + +/* enhanced mode registers (BAR4) */ +enum hw_registers { + MVS_GBL_CTL = 0x04, /* global control */ + MVS_GBL_INT_STAT = 0x08, /* global irq status */ + MVS_GBL_PI = 0x0C, /* ports implemented bitmask */ + MVS_GBL_PORT_TYPE = 0xa0, /* port type */ + + MVS_CTL = 0x100, /* SAS/SATA port configuration */ + MVS_PCS = 0x104, /* SAS/SATA port control/status */ + MVS_CMD_LIST_LO = 0x108, /* cmd list addr */ + MVS_CMD_LIST_HI = 0x10C, + MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */ + MVS_RX_FIS_HI = 0x114, + + MVS_TX_CFG = 0x120, /* TX configuration */ + MVS_TX_LO = 0x124, /* TX (delivery) ring addr */ + MVS_TX_HI = 0x128, + + MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */ + MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */ + MVS_RX_CFG = 0x134, /* RX configuration */ + MVS_RX_LO = 0x138, /* RX (completion) ring addr */ + MVS_RX_HI = 0x13C, + MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */ + + MVS_INT_COAL = 0x148, /* Int coalescing config */ + MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */ + MVS_INT_STAT = 0x150, /* Central int status */ + MVS_INT_MASK = 0x154, /* Central int enable */ + MVS_INT_STAT_SRS = 0x158, /* SATA register set status */ + MVS_INT_MASK_SRS = 0x15C, + + /* ports 1-3 follow after this */ + MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */ + MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */ + MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */ + MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */ + + /* ports 1-3 follow after this */ + MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */ + MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */ + + MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */ + MVS_CMD_DATA = 0x1BC, /* Command register port (data) */ + + /* ports 1-3 follow after this */ + MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */ + MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */ + MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */ + MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */ + + /* ports 1-3 follow after this */ + MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */ + MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */ + MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */ + MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */ +}; + +enum hw_register_bits { + /* MVS_GBL_CTL */ + INT_EN = (1U << 1), /* Global int enable */ + HBA_RST = (1U << 0), /* HBA reset */ + + /* MVS_GBL_INT_STAT */ + INT_XOR = (1U << 4), /* XOR engine event */ + INT_SAS_SATA = (1U << 0), /* SAS/SATA event */ + + /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */ + SATA_TARGET = (1U << 16), /* port0 SATA target enable */ + MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */ + MODE_AUTO_DET_PORT6 = (1U << 14), + MODE_AUTO_DET_PORT5 = (1U << 13), + MODE_AUTO_DET_PORT4 = (1U << 12), + MODE_AUTO_DET_PORT3 = (1U << 11), + MODE_AUTO_DET_PORT2 = (1U << 10), + MODE_AUTO_DET_PORT1 = (1U << 9), + MODE_AUTO_DET_PORT0 = (1U << 8), + MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 | + MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 | + MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 | + MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7, + MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */ + MODE_SAS_PORT6_MASK = (1U << 6), + MODE_SAS_PORT5_MASK = (1U << 5), + MODE_SAS_PORT4_MASK = (1U << 4), + MODE_SAS_PORT3_MASK = (1U << 3), + MODE_SAS_PORT2_MASK = (1U << 2), + MODE_SAS_PORT1_MASK = (1U << 1), + MODE_SAS_PORT0_MASK = (1U << 0), + MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK | + MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK | + MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK | + MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK, + + /* SAS_MODE value may be + * dictated (in hw) by values + * of SATA_TARGET & AUTO_DET + */ + + /* MVS_TX_CFG */ + TX_EN = (1U << 16), /* Enable TX */ + TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */ + + /* MVS_RX_CFG */ + RX_EN = (1U << 16), /* Enable RX */ + RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */ + + /* MVS_INT_COAL */ + COAL_EN = (1U << 16), /* Enable int coalescing */ + + /* MVS_INT_STAT, MVS_INT_MASK */ + CINT_I2C = (1U << 31), /* I2C event */ + CINT_SW0 = (1U << 30), /* software event 0 */ + CINT_SW1 = (1U << 29), /* software event 1 */ + CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */ + CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */ + CINT_MEM = (1U << 26), /* int mem parity err */ + CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */ + CINT_SRS = (1U << 3), /* SRS event */ + CINT_CI_STOP = (1U << 1), /* cmd issue stopped */ + CINT_DONE = (1U << 0), /* cmd completion */ + + /* shl for ports 1-3 */ + CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */ + CINT_PORT = (1U << 8), /* port0 event */ + CINT_PORT_MASK_OFFSET = 8, + CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET), + + /* TX (delivery) ring bits */ + TXQ_CMD_SHIFT = 29, + TXQ_CMD_SSP = 1, /* SSP protocol */ + TXQ_CMD_SMP = 2, /* SMP protocol */ + TXQ_CMD_STP = 3, /* STP/SATA protocol */ + TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */ + TXQ_CMD_SLOT_RESET = 7, /* reset command slot */ + TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */ + TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */ + TXQ_SRS_SHIFT = 20, /* SATA register set */ + TXQ_SRS_MASK = 0x7f, + TXQ_PHY_SHIFT = 12, /* PHY bitmap */ + TXQ_PHY_MASK = 0xff, + TXQ_SLOT_MASK = 0xfff, /* slot number */ + + /* RX (completion) ring bits */ + RXQ_GOOD = (1U << 23), /* Response good */ + RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */ + RXQ_CMD_RX = (1U << 20), /* target cmd received */ + RXQ_ATTN = (1U << 19), /* attention */ + RXQ_RSP = (1U << 18), /* response frame xfer'd */ + RXQ_ERR = (1U << 17), /* err info rec xfer'd */ + RXQ_DONE = (1U << 16), /* cmd complete */ + RXQ_SLOT_MASK = 0xfff, /* slot number */ + + /* mvs_cmd_hdr bits */ + MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */ + MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */ + + /* SSP initiator only */ + MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */ + + /* SSP initiator or target */ + MCH_SSP_FR_TASK = 0x1, /* TASK frame */ + + /* SSP target only */ + MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */ + MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */ + MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */ + MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */ + + MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */ + MCH_FBURST = (1U << 11), /* first burst (SSP) */ + MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */ + MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */ + MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */ + MCH_RESET = (1U << 7), /* Reset (STP/SATA) */ + MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */ + MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */ + MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */ + MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/ + + CCTL_RST = (1U << 5), /* port logic reset */ + + /* 0(LSB first), 1(MSB first) */ + CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */ + CCTL_ENDIAN_RSP = (1U << 2), /* response frame */ + CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */ + CCTL_ENDIAN_CMD = (1U << 0), /* command table */ + + /* MVS_Px_SER_CTLSTAT (per-phy control) */ + PHY_SSP_RST = (1U << 3), /* reset SSP link layer */ + PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */ + PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */ + PHY_RST = (1U << 0), /* phy reset */ + PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8), + PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12), + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16), + PHY_NEG_SPP_PHYS_LINK_RATE_MASK = + (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), + PHY_READY_MASK = (1U << 20), + + /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */ + PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */ + PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */ + PHYEV_AN = (1U << 18), /* SATA async notification */ + PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */ + PHYEV_SIG_FIS = (1U << 16), /* signature FIS */ + PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */ + PHYEV_IU_BIG = (1U << 11), /* IU too long err */ + PHYEV_IU_SMALL = (1U << 10), /* IU too short err */ + PHYEV_UNK_TAG = (1U << 9), /* unknown tag */ + PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */ + PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */ + PHYEV_PORT_SEL = (1U << 6), /* port selector present */ + PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */ + PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */ + PHYEV_ID_FAIL = (1U << 3), /* identify failed */ + PHYEV_ID_DONE = (1U << 2), /* identify done */ + PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */ + PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */ + + /* MVS_PCS */ + PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */ + PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */ + PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */ + PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */ + PCS_RSP_RX_EN = (1U << 7), /* raw response rx */ + PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */ + PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */ + PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */ + PCS_CMD_RST = (1U << 1), /* reset cmd issue */ + PCS_CMD_EN = (1U << 0), /* enable cmd issue */ + + /* Port n Attached Device Info */ + PORT_DEV_SSP_TRGT = (1U << 19), + PORT_DEV_SMP_TRGT = (1U << 18), + PORT_DEV_STP_TRGT = (1U << 17), + PORT_DEV_SSP_INIT = (1U << 11), + PORT_DEV_SMP_INIT = (1U << 10), + PORT_DEV_STP_INIT = (1U << 9), + PORT_PHY_ID_MASK = (0xFFU << 24), + PORT_DEV_TRGT_MASK = (0x7U << 17), + PORT_DEV_INIT_MASK = (0x7U << 9), + PORT_DEV_TYPE_MASK = (0x7U << 0), + + /* Port n PHY Status */ + PHY_RDY = (1U << 2), + PHY_DW_SYNC = (1U << 1), + PHY_OOB_DTCTD = (1U << 0), + + /* VSR */ + /* PHYMODE 6 (CDB) */ + PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */ + PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */ + PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/ + PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */ + PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */ + PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */ + PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */ + PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */ + PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */ + PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */ + PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */ + PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */ + PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */ + PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */ +}; + +enum mvs_info_flags { + MVF_MSI = (1U << 0), /* MSI is enabled */ + MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */ +}; + +enum sas_cmd_port_registers { + CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */ + CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */ + CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */ + CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */ + CMD_OOB_SPACE = 0x110, /* OOB space control register */ + CMD_OOB_BURST = 0x114, /* OOB burst control register */ + CMD_PHY_TIMER = 0x118, /* PHY timer control register */ + CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */ + CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */ + CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */ + CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */ + CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */ + CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */ + CMD_ID_TEST = 0x134, /* ID test register */ + CMD_PL_TIMER = 0x138, /* PL timer register */ + CMD_WD_TIMER = 0x13c, /* WD timer register */ + CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */ + CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */ + CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */ + CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */ + CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */ + CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */ + CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */ + CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */ + CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */ + CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */ + CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */ + CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */ + CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */ + CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */ + CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */ + CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */ + CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */ + CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */ + CMD_RESET_COUNT = 0x188, /* Reset Count */ + CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */ + CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */ + CMD_PHY_CTL = 0x194, /* PHY Control and Status */ + CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */ + CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */ + CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */ + CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */ + CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */ + CMD_HOST_CTL = 0x1AC, /* Host Control Status */ + CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */ + CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */ + CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */ + CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */ + CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */ + CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */ +}; + +/* SAS/SATA configuration port registers, aka phy registers */ +enum sas_sata_config_port_regs { + PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */ + PHYR_ADDR_LO = 0x04, /* my SAS address (low) */ + PHYR_ADDR_HI = 0x08, /* my SAS address (high) */ + PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */ + PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */ + PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */ + PHYR_SATA_CTL = 0x18, /* SATA control */ + PHYR_PHY_STAT = 0x1C, /* PHY status */ + PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */ + PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */ + PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */ + PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */ + PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */ + PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */ + PHYR_WIDE_PORT = 0x38, /* wide port participating */ + PHYR_CURRENT0 = 0x80, /* current connection info 0 */ + PHYR_CURRENT1 = 0x84, /* current connection info 1 */ + PHYR_CURRENT2 = 0x88, /* current connection info 2 */ +}; + +/* SAS/SATA Vendor Specific Port Registers */ +enum sas_sata_vsp_regs { + VSR_PHY_STAT = 0x00, /* Phy Status */ + VSR_PHY_MODE1 = 0x01, /* phy tx */ + VSR_PHY_MODE2 = 0x02, /* tx scc */ + VSR_PHY_MODE3 = 0x03, /* pll */ + VSR_PHY_MODE4 = 0x04, /* VCO */ + VSR_PHY_MODE5 = 0x05, /* Rx */ + VSR_PHY_MODE6 = 0x06, /* CDR */ + VSR_PHY_MODE7 = 0x07, /* Impedance */ + VSR_PHY_MODE8 = 0x08, /* Voltage */ + VSR_PHY_MODE9 = 0x09, /* Test */ + VSR_PHY_MODE10 = 0x0A, /* Power */ + VSR_PHY_MODE11 = 0x0B, /* Phy Mode */ + VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */ + VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */ +}; + +enum pci_cfg_registers { + PCR_PHY_CTL = 0x40, + PCR_PHY_CTL2 = 0x90, + PCR_DEV_CTRL = 0xE8, +}; + +enum pci_cfg_register_bits { + PCTL_PWR_ON = (0xFU << 24), + PCTL_OFF = (0xFU << 12), + PRD_REQ_SIZE = (0x4000), + PRD_REQ_MASK = (0x00007000), +}; + +enum nvram_layout_offsets { + NVR_SIG = 0x00, /* 0xAA, 0x55 */ + NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */ +}; + +enum chip_flavors { + chip_6320, + chip_6440, + chip_6480, +}; + +enum port_type { + PORT_TYPE_SAS = (1L << 1), + PORT_TYPE_SATA = (1L << 0), +}; + +/* Command Table Format */ +enum ct_format { + /* SSP */ + SSP_F_H = 0x00, + SSP_F_IU = 0x18, + SSP_F_MAX = 0x4D, + /* STP */ + STP_CMD_FIS = 0x00, + STP_ATAPI_CMD = 0x40, + STP_F_MAX = 0x10, + /* SMP */ + SMP_F_T = 0x00, + SMP_F_DEP = 0x01, + SMP_F_MAX = 0x101, +}; + +enum status_buffer { + SB_EIR_OFF = 0x00, /* Error Information Record */ + SB_RFB_OFF = 0x08, /* Response Frame Buffer */ + SB_RFB_MAX = 0x400, /* RFB size*/ +}; + +enum error_info_rec { + CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */ + CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */ + RSP_OVER = (1U << 29), /* rsp buffer overflow */ + RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */ + UNK_FIS = (1U << 27), /* unknown FIS */ + DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */ + SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */ + TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */ + R_ERR = (1U << 23), /* SATA returned R_ERR prim */ + RD_OFS = (1U << 20), /* Read DATA frame invalid offset */ + XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */ + UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */ + DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */ + INTERLOCK = (1U << 15), /* interlock error */ + NAK = (1U << 14), /* NAK rx'd */ + ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */ + CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */ + OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */ + PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */ + NO_DEST = (1U << 9), /* I_T nexus lost, no destination */ + STP_RES_BSY = (1U << 8), /* STP resources busy */ + BREAK = (1U << 7), /* break received */ + BAD_DEST = (1U << 6), /* bad destination */ + BAD_PROTO = (1U << 5), /* protocol not supported */ + BAD_RATE = (1U << 4), /* cxn rate not supported */ + WRONG_DEST = (1U << 3), /* wrong destination error */ + CREDIT_TO = (1U << 2), /* credit timeout */ + WDOG_TO = (1U << 1), /* watchdog timeout */ + BUF_PAR = (1U << 0), /* buffer parity error */ +}; + +enum error_info_rec_2 { + SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */ + GRD_CHK_ERR = (1U << 14), /* Guard Check Error */ + APP_CHK_ERR = (1U << 13), /* Application Check error */ + REF_CHK_ERR = (1U << 12), /* Reference Check Error */ + USR_BLK_NM = (1U << 0), /* User Block Number */ +}; + +struct mvs_chip_info { + u32 n_phy; + u32 srs_sz; + u32 slot_width; +}; + +struct mvs_err_info { + __le32 flags; + __le32 flags2; +}; + +struct mvs_prd { + __le64 addr; /* 64-bit buffer address */ + __le32 reserved; + __le32 len; /* 16-bit length */ +}; + +struct mvs_cmd_hdr { + __le32 flags; /* PRD tbl len; SAS, SATA ctl */ + __le32 lens; /* cmd, max resp frame len */ + __le32 tags; /* targ port xfer tag; tag */ + __le32 data_len; /* data xfer len */ + __le64 cmd_tbl; /* command table address */ + __le64 open_frame; /* open addr frame address */ + __le64 status_buf; /* status buffer address */ + __le64 prd_tbl; /* PRD tbl address */ + __le32 reserved[4]; +}; + +struct mvs_port { + struct asd_sas_port sas_port; + u8 port_attached; + u8 taskfileset; + u8 wide_port_phymap; + struct list_head list; +}; + +struct mvs_phy { + struct mvs_port *port; + struct asd_sas_phy sas_phy; + struct sas_identify identify; + struct scsi_device *sdev; + u64 dev_sas_addr; + u64 att_dev_sas_addr; + u32 att_dev_info; + u32 dev_info; + u32 phy_type; + u32 phy_status; + u32 irq_status; + u32 frame_rcvd_size; + u8 frame_rcvd[32]; + u8 phy_attached; + enum sas_linkrate minimum_linkrate; + enum sas_linkrate maximum_linkrate; +}; + +struct mvs_slot_info { + struct list_head list; + struct sas_task *task; + u32 n_elem; + u32 tx; + + /* DMA buffer for storing cmd tbl, open addr frame, status buffer, + * and PRD table + */ + void *buf; + dma_addr_t buf_dma; +#if _MV_DUMP + u32 cmd_size; +#endif + + void *response; + struct mvs_port *port; +}; + +struct mvs_info { + unsigned long flags; + + spinlock_t lock; /* host-wide lock */ + struct pci_dev *pdev; /* our device */ + void __iomem *regs; /* enhanced mode registers */ + void __iomem *peri_regs; /* peripheral registers */ + + u8 sas_addr[SAS_ADDR_SIZE]; + struct sas_ha_struct sas; /* SCSI/SAS glue */ + struct Scsi_Host *shost; + + __le32 *tx; /* TX (delivery) DMA ring */ + dma_addr_t tx_dma; + u32 tx_prod; /* cached next-producer idx */ + + __le32 *rx; /* RX (completion) DMA ring */ + dma_addr_t rx_dma; + u32 rx_cons; /* RX consumer idx */ + + __le32 *rx_fis; /* RX'd FIS area */ + dma_addr_t rx_fis_dma; + + struct mvs_cmd_hdr *slot; /* DMA command header slots */ + dma_addr_t slot_dma; + + const struct mvs_chip_info *chip; + + u8 tags[MVS_SLOTS]; + struct mvs_slot_info slot_info[MVS_SLOTS]; + /* further per-slot information */ + struct mvs_phy phy[MVS_MAX_PHYS]; + struct mvs_port port[MVS_MAX_PHYS]; +#ifdef MVS_USE_TASKLET + struct tasklet_struct tasklet; +#endif +}; + +static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, + void *funcdata); +static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port); +static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val); +static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port); +static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val); +static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val); +static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port); + +static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i); +static void mvs_detect_porttype(struct mvs_info *mvi, int i); +static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st); +static void mvs_release_task(struct mvs_info *mvi, int phy_no); + +static int mvs_scan_finished(struct Scsi_Host *, unsigned long); +static void mvs_scan_start(struct Scsi_Host *); +static int mvs_slave_configure(struct scsi_device *sdev); + +static struct scsi_transport_template *mvs_stt; + +static const struct mvs_chip_info mvs_chips[] = { + [chip_6320] = { 2, 16, 9 }, + [chip_6440] = { 4, 16, 9 }, + [chip_6480] = { 8, 32, 10 }, +}; + +static struct scsi_host_template mvs_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .queuecommand = sas_queuecommand, + .target_alloc = sas_target_alloc, + .slave_configure = mvs_slave_configure, + .slave_destroy = sas_slave_destroy, + .scan_finished = mvs_scan_finished, + .scan_start = mvs_scan_start, + .change_queue_depth = sas_change_queue_depth, + .change_queue_type = sas_change_queue_type, + .bios_param = sas_bios_param, + .can_queue = 1, + .cmd_per_lun = 1, + .this_id = -1, + .sg_tablesize = SG_ALL, + .max_sectors = SCSI_DEFAULT_MAX_SECTORS, + .use_clustering = ENABLE_CLUSTERING, + .eh_device_reset_handler = sas_eh_device_reset_handler, + .eh_bus_reset_handler = sas_eh_bus_reset_handler, + .slave_alloc = sas_slave_alloc, + .target_destroy = sas_target_destroy, + .ioctl = sas_ioctl, +}; + +static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) +{ + u32 i; + u32 run; + u32 offset; + + offset = 0; + while (size) { + printk("%08X : ", baseaddr + offset); + if (size >= 16) + run = 16; + else + run = size; + size -= run; + for (i = 0; i < 16; i++) { + if (i < run) + printk("%02X ", (u32)data[i]); + else + printk(" "); + } + printk(": "); + for (i = 0; i < run; i++) + printk("%c", isalnum(data[i]) ? data[i] : '.'); + printk("\n"); + data = &data[16]; + offset += run; + } + printk("\n"); +} + +#if _MV_DUMP +static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag, + enum sas_protocol proto) +{ + u32 offset; + struct pci_dev *pdev = mvi->pdev; + struct mvs_slot_info *slot = &mvi->slot_info[tag]; + + offset = slot->cmd_size + MVS_OAF_SZ + + sizeof(struct mvs_prd) * slot->n_elem; + dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n", + tag); + mvs_hexdump(32, (u8 *) slot->response, + (u32) slot->buf_dma + offset); +} +#endif + +static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag, + enum sas_protocol proto) +{ +#if _MV_DUMP + u32 sz, w_ptr; + u64 addr; + void __iomem *regs = mvi->regs; + struct pci_dev *pdev = mvi->pdev; + struct mvs_slot_info *slot = &mvi->slot_info[tag]; + + /*Delivery Queue */ + sz = mr32(TX_CFG) & TX_RING_SZ_MASK; + w_ptr = slot->tx; + addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO); + dev_printk(KERN_DEBUG, &pdev->dev, + "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr); + dev_printk(KERN_DEBUG, &pdev->dev, + "Delivery Queue Base Address=0x%llX (PA)" + "(tx_dma=0x%llX), Entry=%04d\n", + addr, mvi->tx_dma, w_ptr); + mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]), + (u32) mvi->tx_dma + sizeof(u32) * w_ptr); + /*Command List */ + addr = mvi->slot_dma; + dev_printk(KERN_DEBUG, &pdev->dev, + "Command List Base Address=0x%llX (PA)" + "(slot_dma=0x%llX), Header=%03d\n", + addr, slot->buf_dma, tag); + dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag); + /*mvs_cmd_hdr */ + mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]), + (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr)); + /*1.command table area */ + dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n"); + mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma); + /*2.open address frame area */ + dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n"); + mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size, + (u32) slot->buf_dma + slot->cmd_size); + /*3.status buffer */ + mvs_hba_sb_dump(mvi, tag, proto); + /*4.PRD table */ + dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n"); + mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem, + (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ, + (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ); +#endif +} + +static void mvs_hba_cq_dump(struct mvs_info *mvi) +{ +#if (_MV_DUMP > 2) + u64 addr; + void __iomem *regs = mvi->regs; + struct pci_dev *pdev = mvi->pdev; + u32 entry = mvi->rx_cons + 1; + u32 rx_desc = le32_to_cpu(mvi->rx[entry]); + + /*Completion Queue */ + addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO); + dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n", + mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task); + dev_printk(KERN_DEBUG, &pdev->dev, + "Completion List Base Address=0x%llX (PA), " + "CQ_Entry=%04d, CQ_WP=0x%08X\n", + addr, entry - 1, mvi->rx[0]); + mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc), + mvi->rx_dma + sizeof(u32) * entry); +#endif +} + +static void mvs_hba_interrupt_enable(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + tmp = mr32(GBL_CTL); + + mw32(GBL_CTL, tmp | INT_EN); +} + +static void mvs_hba_interrupt_disable(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + tmp = mr32(GBL_CTL); + + mw32(GBL_CTL, tmp & ~INT_EN); +} + +static int mvs_int_rx(struct mvs_info *mvi, bool self_clear); + +/* move to PCI layer or libata core? */ +static int pci_go_64(struct pci_dev *pdev) +{ + int rc; + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (rc) { + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_printk(KERN_ERR, &pdev->dev, + "64-bit DMA enable failed\n"); + return rc; + } + } + } else { + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_printk(KERN_ERR, &pdev->dev, + "32-bit DMA enable failed\n"); + return rc; + } + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_printk(KERN_ERR, &pdev->dev, + "32-bit consistent DMA enable failed\n"); + return rc; + } + } + + return rc; +} + +static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) +{ + if (task->lldd_task) { + struct mvs_slot_info *slot; + slot = (struct mvs_slot_info *) task->lldd_task; + *tag = slot - mvi->slot_info; + return 1; + } + return 0; +} + +static void mvs_tag_clear(struct mvs_info *mvi, u32 tag) +{ + void *bitmap = (void *) &mvi->tags; + clear_bit(tag, bitmap); +} + +static void mvs_tag_free(struct mvs_info *mvi, u32 tag) +{ + mvs_tag_clear(mvi, tag); +} + +static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) +{ + void *bitmap = (void *) &mvi->tags; + set_bit(tag, bitmap); +} + +static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) +{ + unsigned int index, tag; + void *bitmap = (void *) &mvi->tags; + + index = find_first_zero_bit(bitmap, MVS_SLOTS); + tag = index; + if (tag >= MVS_SLOTS) + return -SAS_QUEUE_FULL; + mvs_tag_set(mvi, tag); + *tag_out = tag; + return 0; +} + +static void mvs_tag_init(struct mvs_info *mvi) +{ + int i; + for (i = 0; i < MVS_SLOTS; ++i) + mvs_tag_clear(mvi, i); +} + +#ifndef MVS_DISABLE_NVRAM +static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data) +{ + int timeout = 1000; + + if (addr & ~SPI_ADDR_MASK) + return -EINVAL; + + writel(addr, regs + SPI_CMD); + writel(TWSI_RD, regs + SPI_CTL); + + while (timeout-- > 0) { + if (readl(regs + SPI_CTL) & TWSI_RDY) { + *data = readl(regs + SPI_DATA); + return 0; + } + + udelay(10); + } + + return -EBUSY; +} + +static int mvs_eep_read_buf(void __iomem *regs, u32 addr, + void *buf, u32 buflen) +{ + u32 addr_end, tmp_addr, i, j; + u32 tmp = 0; + int rc; + u8 *tmp8, *buf8 = buf; + + addr_end = addr + buflen; + tmp_addr = ALIGN(addr, 4); + if (addr > 0xff) + return -EINVAL; + + j = addr & 0x3; + if (j) { + rc = mvs_eep_read(regs, tmp_addr, &tmp); + if (rc) + return rc; + + tmp8 = (u8 *)&tmp; + for (i = j; i < 4; i++) + *buf8++ = tmp8[i]; + + tmp_addr += 4; + } + + for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) { + rc = mvs_eep_read(regs, tmp_addr, &tmp); + if (rc) + return rc; + + memcpy(buf8, &tmp, 4); + buf8 += 4; + } + + if (tmp_addr < addr_end) { + rc = mvs_eep_read(regs, tmp_addr, &tmp); + if (rc) + return rc; + + tmp8 = (u8 *)&tmp; + j = addr_end - tmp_addr; + for (i = 0; i < j; i++) + *buf8++ = tmp8[i]; + + tmp_addr += 4; + } + + return 0; +} +#endif + +static int mvs_nvram_read(struct mvs_info *mvi, u32 addr, + void *buf, u32 buflen) +{ +#ifndef MVS_DISABLE_NVRAM + void __iomem *regs = mvi->regs; + int rc, i; + u32 sum; + u8 hdr[2], *tmp; + const char *msg; + + rc = mvs_eep_read_buf(regs, addr, &hdr, 2); + if (rc) { + msg = "nvram hdr read failed"; + goto err_out; + } + rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen); + if (rc) { + msg = "nvram read failed"; + goto err_out; + } + + if (hdr[0] != 0x5A) { + /* entry id */ + msg = "invalid nvram entry id"; + rc = -ENOENT; + goto err_out; + } + + tmp = buf; + sum = ((u32)hdr[0]) + ((u32)hdr[1]); + for (i = 0; i < buflen; i++) + sum += ((u32)tmp[i]); + + if (sum) { + msg = "nvram checksum failure"; + rc = -EILSEQ; + goto err_out; + } + + return 0; + +err_out: + dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg); + return rc; +#else + /* FIXME , For SAS target mode */ + memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8); + return 0; +#endif +} + +static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) +{ + struct mvs_phy *phy = &mvi->phy[i]; + struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i]; + + if (!phy->phy_attached) + return; + + if (sas_phy->phy) { + struct sas_phy *sphy = sas_phy->phy; + + sphy->negotiated_linkrate = sas_phy->linkrate; + sphy->minimum_linkrate = phy->minimum_linkrate; + sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; + sphy->maximum_linkrate = phy->maximum_linkrate; + sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS; + } + + if (phy->phy_type & PORT_TYPE_SAS) { + struct sas_identify_frame *id; + + id = (struct sas_identify_frame *)phy->frame_rcvd; + id->dev_type = phy->identify.device_type; + id->initiator_bits = SAS_PROTOCOL_ALL; + id->target_bits = phy->identify.target_port_protocols; + } else if (phy->phy_type & PORT_TYPE_SATA) { + /* TODO */ + } + mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size; + mvi->sas.notify_port_event(mvi->sas.sas_phy[i], + PORTE_BYTES_DMAED); +} + +static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + /* give the phy enabling interrupt event time to come in (1s + * is empirically about all it takes) */ + if (time < HZ) + return 0; + /* Wait for discovery to finish */ + scsi_flush_work(shost); + return 1; +} + +static void mvs_scan_start(struct Scsi_Host *shost) +{ + int i; + struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha; + + for (i = 0; i < mvi->chip->n_phy; ++i) { + mvs_bytes_dmaed(mvi, i); + } +} + +static int mvs_slave_configure(struct scsi_device *sdev) +{ + struct domain_device *dev = sdev_to_domain_dev(sdev); + int ret = sas_slave_configure(sdev); + + if (ret) + return ret; + + if (dev_is_sata(dev)) { + /* struct ata_port *ap = dev->sata_dev.ap; */ + /* struct ata_device *adev = ap->link.device; */ + + /* clamp at no NCQ for the time being */ + /* adev->flags |= ATA_DFLAG_NCQ_OFF; */ + scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1); + } + return 0; +} + +static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) +{ + struct pci_dev *pdev = mvi->pdev; + struct sas_ha_struct *sas_ha = &mvi->sas; + struct mvs_phy *phy = &mvi->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no); + /* + * events is port event now , + * we need check the interrupt status which belongs to per port. + */ + dev_printk(KERN_DEBUG, &pdev->dev, + "Port %d Event = %X\n", + phy_no, phy->irq_status); + + if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) { + mvs_release_task(mvi, phy_no); + if (!mvs_is_phy_ready(mvi, phy_no)) { + sas_phy_disconnected(sas_phy); + sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); + dev_printk(KERN_INFO, &pdev->dev, + "Port %d Unplug Notice\n", phy_no); + + } else + mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL); + } + if (!(phy->irq_status & PHYEV_DEC_ERR)) { + if (phy->irq_status & PHYEV_COMWAKE) { + u32 tmp = mvs_read_port_irq_mask(mvi, phy_no); + mvs_write_port_irq_mask(mvi, phy_no, + tmp | PHYEV_SIG_FIS); + } + if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { + phy->phy_status = mvs_is_phy_ready(mvi, phy_no); + if (phy->phy_status) { + mvs_detect_porttype(mvi, phy_no); + + if (phy->phy_type & PORT_TYPE_SATA) { + u32 tmp = mvs_read_port_irq_mask(mvi, + phy_no); + tmp &= ~PHYEV_SIG_FIS; + mvs_write_port_irq_mask(mvi, + phy_no, tmp); + } + + mvs_update_phyinfo(mvi, phy_no, 0); + sas_ha->notify_phy_event(sas_phy, + PHYE_OOB_DONE); + mvs_bytes_dmaed(mvi, phy_no); + } else { + dev_printk(KERN_DEBUG, &pdev->dev, + "plugin interrupt but phy is gone\n"); + mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, + NULL); + } + } else if (phy->irq_status & PHYEV_BROAD_CH) { + mvs_release_task(mvi, phy_no); + sas_ha->notify_port_event(sas_phy, + PORTE_BROADCAST_RCVD); + } + } + mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status); +} + +static void mvs_int_sata(struct mvs_info *mvi) +{ + u32 tmp; + void __iomem *regs = mvi->regs; + tmp = mr32(INT_STAT_SRS); + mw32(INT_STAT_SRS, tmp & 0xFFFF); +} + +static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task, + u32 slot_idx) +{ + void __iomem *regs = mvi->regs; + struct domain_device *dev = task->dev; + struct asd_sas_port *sas_port = dev->port; + struct mvs_port *port = mvi->slot_info[slot_idx].port; + u32 reg_set, phy_mask; + + if (!sas_protocol_ata(task->task_proto)) { + reg_set = 0; + phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap : + sas_port->phy_mask; + } else { + reg_set = port->taskfileset; + phy_mask = sas_port->phy_mask; + } + mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx | + (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) | + (phy_mask << TXQ_PHY_SHIFT) | + (reg_set << TXQ_SRS_SHIFT)); + + mw32(TX_PROD_IDX, mvi->tx_prod); + mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); +} + +static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, + u32 slot_idx, int err) +{ + struct mvs_port *port = mvi->slot_info[slot_idx].port; + struct task_status_struct *tstat = &task->task_status; + struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; + int stat = SAM_GOOD; + + resp->frame_len = sizeof(struct dev_to_host_fis); + memcpy(&resp->ending_fis[0], + SATA_RECEIVED_D2H_FIS(port->taskfileset), + sizeof(struct dev_to_host_fis)); + tstat->buf_valid_size = sizeof(*resp); + if (unlikely(err)) + stat = SAS_PROTO_RESPONSE; + return stat; +} + +static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) +{ + u32 slot_idx = rx_desc & RXQ_SLOT_MASK; + mvs_tag_clear(mvi, slot_idx); +} + +static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, + struct mvs_slot_info *slot, u32 slot_idx) +{ + if (!sas_protocol_ata(task->task_proto)) + if (slot->n_elem) + pci_unmap_sg(mvi->pdev, task->scatter, + slot->n_elem, task->data_dir); + + switch (task->task_proto) { + case SAS_PROTOCOL_SMP: + pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1, + PCI_DMA_FROMDEVICE); + pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1, + PCI_DMA_TODEVICE); + break; + + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SSP: + default: + /* do nothing */ + break; + } + list_del(&slot->list); + task->lldd_task = NULL; + slot->task = NULL; + slot->port = NULL; +} + +static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, + u32 slot_idx) +{ + struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; + u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response)); + u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4)); + int stat = SAM_CHECK_COND; + + if (err_dw1 & SLOT_BSY_ERR) { + stat = SAS_QUEUE_FULL; + mvs_slot_reset(mvi, task, slot_idx); + } + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: + break; + case SAS_PROTOCOL_SMP: + break; + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + if (err_dw0 & TFILE_ERR) + stat = mvs_sata_done(mvi, task, slot_idx, 1); + break; + default: + break; + } + + mvs_hexdump(16, (u8 *) slot->response, 0); + return stat; +} + +static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) +{ + u32 slot_idx = rx_desc & RXQ_SLOT_MASK; + struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; + struct sas_task *task = slot->task; + struct task_status_struct *tstat; + struct mvs_port *port; + bool aborted; + void *to; + + if (unlikely(!task || !task->lldd_task)) + return -1; + + mvs_hba_cq_dump(mvi); + + spin_lock(&task->task_state_lock); + aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; + if (!aborted) { + task->task_state_flags &= + ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); + task->task_state_flags |= SAS_TASK_STATE_DONE; + } + spin_unlock(&task->task_state_lock); + + if (aborted) { + mvs_slot_task_free(mvi, task, slot, slot_idx); + mvs_slot_free(mvi, rx_desc); + return -1; + } + + port = slot->port; + tstat = &task->task_status; + memset(tstat, 0, sizeof(*tstat)); + tstat->resp = SAS_TASK_COMPLETE; + + if (unlikely(!port->port_attached || flags)) { + mvs_slot_err(mvi, task, slot_idx); + if (!sas_protocol_ata(task->task_proto)) + tstat->stat = SAS_PHY_DOWN; + goto out; + } + + /* error info record present */ + if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { + tstat->stat = mvs_slot_err(mvi, task, slot_idx); + goto out; + } + + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: + /* hw says status == 0, datapres == 0 */ + if (rx_desc & RXQ_GOOD) { + tstat->stat = SAM_GOOD; + tstat->resp = SAS_TASK_COMPLETE; + } + /* response frame present */ + else if (rx_desc & RXQ_RSP) { + struct ssp_response_iu *iu = + slot->response + sizeof(struct mvs_err_info); + sas_ssp_task_response(&mvi->pdev->dev, task, iu); + } + + /* should never happen? */ + else + tstat->stat = SAM_CHECK_COND; + break; + + case SAS_PROTOCOL_SMP: { + struct scatterlist *sg_resp = &task->smp_task.smp_resp; + tstat->stat = SAM_GOOD; + to = kmap_atomic(sg_page(sg_resp), KM_IRQ0); + memcpy(to + sg_resp->offset, + slot->response + sizeof(struct mvs_err_info), + sg_dma_len(sg_resp)); + kunmap_atomic(to, KM_IRQ0); + break; + } + + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { + tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0); + break; + } + + default: + tstat->stat = SAM_CHECK_COND; + break; + } + +out: + mvs_slot_task_free(mvi, task, slot, slot_idx); + if (unlikely(tstat->stat != SAS_QUEUE_FULL)) + mvs_slot_free(mvi, rx_desc); + + spin_unlock(&mvi->lock); + task->task_done(task); + spin_lock(&mvi->lock); + return tstat->stat; +} + +static void mvs_release_task(struct mvs_info *mvi, int phy_no) +{ + struct list_head *pos, *n; + struct mvs_slot_info *slot; + struct mvs_phy *phy = &mvi->phy[phy_no]; + struct mvs_port *port = phy->port; + u32 rx_desc; + + if (!port) + return; + + list_for_each_safe(pos, n, &port->list) { + slot = container_of(pos, struct mvs_slot_info, list); + rx_desc = (u32) (slot - mvi->slot_info); + mvs_slot_complete(mvi, rx_desc, 1); + } +} + +static void mvs_int_full(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + u32 tmp, stat; + int i; + + stat = mr32(INT_STAT); + + mvs_int_rx(mvi, false); + + for (i = 0; i < MVS_MAX_PORTS; i++) { + tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED); + if (tmp) + mvs_int_port(mvi, i, tmp); + } + + if (stat & CINT_SRS) + mvs_int_sata(mvi); + + mw32(INT_STAT, stat); +} + +static int mvs_int_rx(struct mvs_info *mvi, bool self_clear) +{ + void __iomem *regs = mvi->regs; + u32 rx_prod_idx, rx_desc; + bool attn = false; + struct pci_dev *pdev = mvi->pdev; + + /* the first dword in the RX ring is special: it contains + * a mirror of the hardware's RX producer index, so that + * we don't have to stall the CPU reading that register. + * The actual RX ring is offset by one dword, due to this. + */ + rx_prod_idx = mvi->rx_cons; + mvi->rx_cons = le32_to_cpu(mvi->rx[0]); + if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */ + return 0; + + /* The CMPL_Q may come late, read from register and try again + * note: if coalescing is enabled, + * it will need to read from register every time for sure + */ + if (mvi->rx_cons == rx_prod_idx) + mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK; + + if (mvi->rx_cons == rx_prod_idx) + return 0; + + while (mvi->rx_cons != rx_prod_idx) { + + /* increment our internal RX consumer pointer */ + rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); + + rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); + + if (likely(rx_desc & RXQ_DONE)) + mvs_slot_complete(mvi, rx_desc, 0); + if (rx_desc & RXQ_ATTN) { + attn = true; + dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n", + rx_desc); + } else if (rx_desc & RXQ_ERR) { + if (!(rx_desc & RXQ_DONE)) + mvs_slot_complete(mvi, rx_desc, 0); + dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n", + rx_desc); + } else if (rx_desc & RXQ_SLOT_RESET) { + dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n", + rx_desc); + mvs_slot_free(mvi, rx_desc); + } + } + + if (attn && self_clear) + mvs_int_full(mvi); + + return 0; +} + +#ifdef MVS_USE_TASKLET +static void mvs_tasklet(unsigned long data) +{ + struct mvs_info *mvi = (struct mvs_info *) data; + unsigned long flags; + + spin_lock_irqsave(&mvi->lock, flags); + +#ifdef MVS_DISABLE_MSI + mvs_int_full(mvi); +#else + mvs_int_rx(mvi, true); +#endif + spin_unlock_irqrestore(&mvi->lock, flags); +} +#endif + +static irqreturn_t mvs_interrupt(int irq, void *opaque) +{ + struct mvs_info *mvi = opaque; + void __iomem *regs = mvi->regs; + u32 stat; + + stat = mr32(GBL_INT_STAT); + + if (stat == 0 || stat == 0xffffffff) + return IRQ_NONE; + + /* clear CMD_CMPLT ASAP */ + mw32_f(INT_STAT, CINT_DONE); + +#ifndef MVS_USE_TASKLET + spin_lock(&mvi->lock); + + mvs_int_full(mvi); + + spin_unlock(&mvi->lock); +#else + tasklet_schedule(&mvi->tasklet); +#endif + return IRQ_HANDLED; +} + +#ifndef MVS_DISABLE_MSI +static irqreturn_t mvs_msi_interrupt(int irq, void *opaque) +{ + struct mvs_info *mvi = opaque; + +#ifndef MVS_USE_TASKLET + spin_lock(&mvi->lock); + + mvs_int_rx(mvi, true); + + spin_unlock(&mvi->lock); +#else + tasklet_schedule(&mvi->tasklet); +#endif + return IRQ_HANDLED; +} +#endif + +struct mvs_task_exec_info { + struct sas_task *task; + struct mvs_cmd_hdr *hdr; + struct mvs_port *port; + u32 tag; + int n_elem; +}; + +static int mvs_task_prep_smp(struct mvs_info *mvi, + struct mvs_task_exec_info *tei) +{ + int elem, rc, i; + struct sas_task *task = tei->task; + struct mvs_cmd_hdr *hdr = tei->hdr; + struct scatterlist *sg_req, *sg_resp; + u32 req_len, resp_len, tag = tei->tag; + void *buf_tmp; + u8 *buf_oaf; + dma_addr_t buf_tmp_dma; + struct mvs_prd *buf_prd; + struct scatterlist *sg; + struct mvs_slot_info *slot = &mvi->slot_info[tag]; + struct asd_sas_port *sas_port = task->dev->port; + u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); +#if _MV_DUMP + u8 *buf_cmd; + void *from; +#endif + /* + * DMA-map SMP request, response buffers + */ + sg_req = &task->smp_task.smp_req; + elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE); + if (!elem) + return -ENOMEM; + req_len = sg_dma_len(sg_req); + + sg_resp = &task->smp_task.smp_resp; + elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE); + if (!elem) { + rc = -ENOMEM; + goto err_out; + } + resp_len = sg_dma_len(sg_resp); + + /* must be in dwords */ + if ((req_len & 0x3) || (resp_len & 0x3)) { + rc = -EINVAL; + goto err_out_2; + } + + /* + * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs + */ + + /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ + buf_tmp = slot->buf; + buf_tmp_dma = slot->buf_dma; + +#if _MV_DUMP + buf_cmd = buf_tmp; + hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); + buf_tmp += req_len; + buf_tmp_dma += req_len; + slot->cmd_size = req_len; +#else + hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req)); +#endif + + /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ + buf_oaf = buf_tmp; + hdr->open_frame = cpu_to_le64(buf_tmp_dma); + + buf_tmp += MVS_OAF_SZ; + buf_tmp_dma += MVS_OAF_SZ; + + /* region 3: PRD table ********************************************* */ + buf_prd = buf_tmp; + if (tei->n_elem) + hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); + else + hdr->prd_tbl = 0; + + i = sizeof(struct mvs_prd) * tei->n_elem; + buf_tmp += i; + buf_tmp_dma += i; + + /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ + slot->response = buf_tmp; + hdr->status_buf = cpu_to_le64(buf_tmp_dma); + + /* + * Fill in TX ring and command slot header + */ + slot->tx = mvi->tx_prod; + mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) | + TXQ_MODE_I | tag | + (sas_port->phy_mask << TXQ_PHY_SHIFT)); + + hdr->flags |= flags; + hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4)); + hdr->tags = cpu_to_le32(tag); + hdr->data_len = 0; + + /* generate open address frame hdr (first 12 bytes) */ + buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */ + buf_oaf[1] = task->dev->linkrate & 0xf; + *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */ + memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); + + /* fill in PRD (scatter/gather) table, if any */ + for_each_sg(task->scatter, sg, tei->n_elem, i) { + buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); + buf_prd->len = cpu_to_le32(sg_dma_len(sg)); + buf_prd++; + } + +#if _MV_DUMP + /* copy cmd table */ + from = kmap_atomic(sg_page(sg_req), KM_IRQ0); + memcpy(buf_cmd, from + sg_req->offset, req_len); + kunmap_atomic(from, KM_IRQ0); +#endif + return 0; + +err_out_2: + pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1, + PCI_DMA_FROMDEVICE); +err_out: + pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1, + PCI_DMA_TODEVICE); + return rc; +} + +static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port) +{ + void __iomem *regs = mvi->regs; + u32 tmp, offs; + u8 *tfs = &port->taskfileset; + + if (*tfs == MVS_ID_NOT_MAPPED) + return; + + offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT); + if (*tfs < 16) { + tmp = mr32(PCS); + mw32(PCS, tmp & ~offs); + } else { + tmp = mr32(CTL); + mw32(CTL, tmp & ~offs); + } + + tmp = mr32(INT_STAT_SRS) & (1U << *tfs); + if (tmp) + mw32(INT_STAT_SRS, tmp); + + *tfs = MVS_ID_NOT_MAPPED; +} + +static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port) +{ + int i; + u32 tmp, offs; + void __iomem *regs = mvi->regs; + + if (port->taskfileset != MVS_ID_NOT_MAPPED) + return 0; + + tmp = mr32(PCS); + + for (i = 0; i < mvi->chip->srs_sz; i++) { + if (i == 16) + tmp = mr32(CTL); + offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT); + if (!(tmp & offs)) { + port->taskfileset = i; + + if (i < 16) + mw32(PCS, tmp | offs); + else + mw32(CTL, tmp | offs); + tmp = mr32(INT_STAT_SRS) & (1U << i); + if (tmp) + mw32(INT_STAT_SRS, tmp); + return 0; + } + } + return MVS_ID_NOT_MAPPED; +} + +static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag) +{ + struct ata_queued_cmd *qc = task->uldd_task; + + if (qc) { + if (qc->tf.command == ATA_CMD_FPDMA_WRITE || + qc->tf.command == ATA_CMD_FPDMA_READ) { + *tag = qc->tag; + return 1; + } + } + + return 0; +} + +static int mvs_task_prep_ata(struct mvs_info *mvi, + struct mvs_task_exec_info *tei) +{ + struct sas_task *task = tei->task; + struct domain_device *dev = task->dev; + struct mvs_cmd_hdr *hdr = tei->hdr; + struct asd_sas_port *sas_port = dev->port; + struct mvs_slot_info *slot; + struct scatterlist *sg; + struct mvs_prd *buf_prd; + struct mvs_port *port = tei->port; + u32 tag = tei->tag; + u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); + void *buf_tmp; + u8 *buf_cmd, *buf_oaf; + dma_addr_t buf_tmp_dma; + u32 i, req_len, resp_len; + const u32 max_resp_len = SB_RFB_MAX; + + if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED) + return -EBUSY; + + slot = &mvi->slot_info[tag]; + slot->tx = mvi->tx_prod; + mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | + (TXQ_CMD_STP << TXQ_CMD_SHIFT) | + (sas_port->phy_mask << TXQ_PHY_SHIFT) | + (port->taskfileset << TXQ_SRS_SHIFT)); + + if (task->ata_task.use_ncq) + flags |= MCH_FPDMA; + if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) { + if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI) + flags |= MCH_ATAPI; + } + + /* FIXME: fill in port multiplier number */ + + hdr->flags = cpu_to_le32(flags); + + /* FIXME: the low order order 5 bits for the TAG if enable NCQ */ + if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr->tags)) + task->ata_task.fis.sector_count |= hdr->tags << 3; + else + hdr->tags = cpu_to_le32(tag); + hdr->data_len = cpu_to_le32(task->total_xfer_len); + + /* + * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs + */ + + /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */ + buf_cmd = buf_tmp = slot->buf; + buf_tmp_dma = slot->buf_dma; + + hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); + + buf_tmp += MVS_ATA_CMD_SZ; + buf_tmp_dma += MVS_ATA_CMD_SZ; +#if _MV_DUMP + slot->cmd_size = MVS_ATA_CMD_SZ; +#endif + + /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ + /* used for STP. unused for SATA? */ + buf_oaf = buf_tmp; + hdr->open_frame = cpu_to_le64(buf_tmp_dma); + + buf_tmp += MVS_OAF_SZ; + buf_tmp_dma += MVS_OAF_SZ; + + /* region 3: PRD table ********************************************* */ + buf_prd = buf_tmp; + if (tei->n_elem) + hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); + else + hdr->prd_tbl = 0; + + i = sizeof(struct mvs_prd) * tei->n_elem; + buf_tmp += i; + buf_tmp_dma += i; + + /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ + /* FIXME: probably unused, for SATA. kept here just in case + * we get a STP/SATA error information record + */ + slot->response = buf_tmp; + hdr->status_buf = cpu_to_le64(buf_tmp_dma); + + req_len = sizeof(struct host_to_dev_fis); + resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ - + sizeof(struct mvs_err_info) - i; + + /* request, response lengths */ + resp_len = min(resp_len, max_resp_len); + hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); + + task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ + /* fill in command FIS and ATAPI CDB */ + memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); + if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) + memcpy(buf_cmd + STP_ATAPI_CMD, + task->ata_task.atapi_packet, 16); + + /* generate open address frame hdr (first 12 bytes) */ + buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */ + buf_oaf[1] = task->dev->linkrate & 0xf; + *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag); + memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); + + /* fill in PRD (scatter/gather) table, if any */ + for_each_sg(task->scatter, sg, tei->n_elem, i) { + buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); + buf_prd->len = cpu_to_le32(sg_dma_len(sg)); + buf_prd++; + } + + return 0; +} + +static int mvs_task_prep_ssp(struct mvs_info *mvi, + struct mvs_task_exec_info *tei) +{ + struct sas_task *task = tei->task; + struct mvs_cmd_hdr *hdr = tei->hdr; + struct mvs_port *port = tei->port; + struct mvs_slot_info *slot; + struct scatterlist *sg; + struct mvs_prd *buf_prd; + struct ssp_frame_hdr *ssp_hdr; + void *buf_tmp; + u8 *buf_cmd, *buf_oaf, fburst = 0; + dma_addr_t buf_tmp_dma; + u32 flags; + u32 resp_len, req_len, i, tag = tei->tag; + const u32 max_resp_len = SB_RFB_MAX; + u8 phy_mask; + + slot = &mvi->slot_info[tag]; + + phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap : + task->dev->port->phy_mask; + slot->tx = mvi->tx_prod; + mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | + (TXQ_CMD_SSP << TXQ_CMD_SHIFT) | + (phy_mask << TXQ_PHY_SHIFT)); + + flags = MCH_RETRY; + if (task->ssp_task.enable_first_burst) { + flags |= MCH_FBURST; + fburst = (1 << 7); + } + hdr->flags = cpu_to_le32(flags | + (tei->n_elem << MCH_PRD_LEN_SHIFT) | + (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT)); + + hdr->tags = cpu_to_le32(tag); + hdr->data_len = cpu_to_le32(task->total_xfer_len); + + /* + * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs + */ + + /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ + buf_cmd = buf_tmp = slot->buf; + buf_tmp_dma = slot->buf_dma; + + hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); + + buf_tmp += MVS_SSP_CMD_SZ; + buf_tmp_dma += MVS_SSP_CMD_SZ; +#if _MV_DUMP + slot->cmd_size = MVS_SSP_CMD_SZ; +#endif + + /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ + buf_oaf = buf_tmp; + hdr->open_frame = cpu_to_le64(buf_tmp_dma); + + buf_tmp += MVS_OAF_SZ; + buf_tmp_dma += MVS_OAF_SZ; + + /* region 3: PRD table ********************************************* */ + buf_prd = buf_tmp; + if (tei->n_elem) + hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); + else + hdr->prd_tbl = 0; + + i = sizeof(struct mvs_prd) * tei->n_elem; + buf_tmp += i; + buf_tmp_dma += i; + + /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ + slot->response = buf_tmp; + hdr->status_buf = cpu_to_le64(buf_tmp_dma); + + resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ - + sizeof(struct mvs_err_info) - i; + resp_len = min(resp_len, max_resp_len); + + req_len = sizeof(struct ssp_frame_hdr) + 28; + + /* request, response lengths */ + hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); + + /* generate open address frame hdr (first 12 bytes) */ + buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */ + buf_oaf[1] = task->dev->linkrate & 0xf; + *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag); + memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); + + /* fill in SSP frame header (Command Table.SSP frame header) */ + ssp_hdr = (struct ssp_frame_hdr *)buf_cmd; + ssp_hdr->frame_type = SSP_COMMAND; + memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr, + HASHED_SAS_ADDR_SIZE); + memcpy(ssp_hdr->hashed_src_addr, + task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); + ssp_hdr->tag = cpu_to_be16(tag); + + /* fill in command frame IU */ + buf_cmd += sizeof(*ssp_hdr); + memcpy(buf_cmd, &task->ssp_task.LUN, 8); + buf_cmd[9] = fburst | task->ssp_task.task_attr | + (task->ssp_task.task_prio << 3); + memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16); + + /* fill in PRD (scatter/gather) table, if any */ + for_each_sg(task->scatter, sg, tei->n_elem, i) { + buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); + buf_prd->len = cpu_to_le32(sg_dma_len(sg)); + buf_prd++; + } + + return 0; +} + +static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) +{ + struct domain_device *dev = task->dev; + struct mvs_info *mvi = dev->port->ha->lldd_ha; + struct pci_dev *pdev = mvi->pdev; + void __iomem *regs = mvi->regs; + struct mvs_task_exec_info tei; + struct sas_task *t = task; + struct mvs_slot_info *slot; + u32 tag = 0xdeadbeef, rc, n_elem = 0; + unsigned long flags; + u32 n = num, pass = 0; + + spin_lock_irqsave(&mvi->lock, flags); + do { + dev = t->dev; + tei.port = &mvi->port[dev->port->id]; + + if (!tei.port->port_attached) { + if (sas_protocol_ata(t->task_proto)) { + rc = SAS_PHY_DOWN; + goto out_done; + } else { + struct task_status_struct *ts = &t->task_status; + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + t->task_done(t); + if (n > 1) + t = list_entry(t->list.next, + struct sas_task, list); + continue; + } + } + + if (!sas_protocol_ata(t->task_proto)) { + if (t->num_scatter) { + n_elem = pci_map_sg(mvi->pdev, t->scatter, + t->num_scatter, + t->data_dir); + if (!n_elem) { + rc = -ENOMEM; + goto err_out; + } + } + } else { + n_elem = t->num_scatter; + } + + rc = mvs_tag_alloc(mvi, &tag); + if (rc) + goto err_out; + + slot = &mvi->slot_info[tag]; + t->lldd_task = NULL; + slot->n_elem = n_elem; + memset(slot->buf, 0, MVS_SLOT_BUF_SZ); + tei.task = t; + tei.hdr = &mvi->slot[tag]; + tei.tag = tag; + tei.n_elem = n_elem; + + switch (t->task_proto) { + case SAS_PROTOCOL_SMP: + rc = mvs_task_prep_smp(mvi, &tei); + break; + case SAS_PROTOCOL_SSP: + rc = mvs_task_prep_ssp(mvi, &tei); + break; + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + rc = mvs_task_prep_ata(mvi, &tei); + break; + default: + dev_printk(KERN_ERR, &pdev->dev, + "unknown sas_task proto: 0x%x\n", + t->task_proto); + rc = -EINVAL; + break; + } + + if (rc) + goto err_out_tag; + + slot->task = t; + slot->port = tei.port; + t->lldd_task = (void *) slot; + list_add_tail(&slot->list, &slot->port->list); + /* TODO: select normal or high priority */ + + spin_lock(&t->task_state_lock); + t->task_state_flags |= SAS_TASK_AT_INITIATOR; + spin_unlock(&t->task_state_lock); + + mvs_hba_memory_dump(mvi, tag, t->task_proto); + + ++pass; + mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); + if (n > 1) + t = list_entry(t->list.next, struct sas_task, list); + } while (--n); + + rc = 0; + goto out_done; + +err_out_tag: + mvs_tag_free(mvi, tag); +err_out: + dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc); + if (!sas_protocol_ata(t->task_proto)) + if (n_elem) + pci_unmap_sg(mvi->pdev, t->scatter, n_elem, + t->data_dir); +out_done: + if (pass) + mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); + spin_unlock_irqrestore(&mvi->lock, flags); + return rc; +} + +static int mvs_task_abort(struct sas_task *task) +{ + int rc; + unsigned long flags; + struct mvs_info *mvi = task->dev->port->ha->lldd_ha; + struct pci_dev *pdev = mvi->pdev; + int tag; + + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) { + rc = TMF_RESP_FUNC_COMPLETE; + spin_unlock_irqrestore(&task->task_state_lock, flags); + goto out_done; + } + spin_unlock_irqrestore(&task->task_state_lock, flags); + + switch (task->task_proto) { + case SAS_PROTOCOL_SMP: + dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n"); + break; + case SAS_PROTOCOL_SSP: + dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n"); + break; + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{ + dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n"); +#if _MV_DUMP + dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n"); + mvs_hexdump(sizeof(struct host_to_dev_fis), + (void *)&task->ata_task.fis, 0); + dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n"); + mvs_hexdump(16, task->ata_task.atapi_packet, 0); +#endif + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) { + /* TODO */ + ; + } + spin_unlock_irqrestore(&task->task_state_lock, flags); + break; + } + default: + break; + } + + if (mvs_find_tag(mvi, task, &tag)) { + spin_lock_irqsave(&mvi->lock, flags); + mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag); + spin_unlock_irqrestore(&mvi->lock, flags); + } + if (!mvs_task_exec(task, 1, GFP_ATOMIC)) + rc = TMF_RESP_FUNC_COMPLETE; + else + rc = TMF_RESP_FUNC_FAILED; +out_done: + return rc; +} + +static void mvs_free(struct mvs_info *mvi) +{ + int i; + + if (!mvi) + return; + + for (i = 0; i < MVS_SLOTS; i++) { + struct mvs_slot_info *slot = &mvi->slot_info[i]; + + if (slot->buf) + dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ, + slot->buf, slot->buf_dma); + } + + if (mvi->tx) + dma_free_coherent(&mvi->pdev->dev, + sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, + mvi->tx, mvi->tx_dma); + if (mvi->rx_fis) + dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ, + mvi->rx_fis, mvi->rx_fis_dma); + if (mvi->rx) + dma_free_coherent(&mvi->pdev->dev, + sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), + mvi->rx, mvi->rx_dma); + if (mvi->slot) + dma_free_coherent(&mvi->pdev->dev, + sizeof(*mvi->slot) * MVS_SLOTS, + mvi->slot, mvi->slot_dma); +#ifdef MVS_ENABLE_PERI + if (mvi->peri_regs) + iounmap(mvi->peri_regs); +#endif + if (mvi->regs) + iounmap(mvi->regs); + if (mvi->shost) + scsi_host_put(mvi->shost); + kfree(mvi->sas.sas_port); + kfree(mvi->sas.sas_phy); + kfree(mvi); +} + +/* FIXME: locking? */ +static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, + void *funcdata) +{ + struct mvs_info *mvi = sas_phy->ha->lldd_ha; + int rc = 0, phy_id = sas_phy->id; + u32 tmp; + + tmp = mvs_read_phy_ctl(mvi, phy_id); + + switch (func) { + case PHY_FUNC_SET_LINK_RATE:{ + struct sas_phy_linkrates *rates = funcdata; + u32 lrmin = 0, lrmax = 0; + + lrmin = (rates->minimum_linkrate << 8); + lrmax = (rates->maximum_linkrate << 12); + + if (lrmin) { + tmp &= ~(0xf << 8); + tmp |= lrmin; + } + if (lrmax) { + tmp &= ~(0xf << 12); + tmp |= lrmax; + } + mvs_write_phy_ctl(mvi, phy_id, tmp); + break; + } + + case PHY_FUNC_HARD_RESET: + if (tmp & PHY_RST_HARD) + break; + mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD); + break; + + case PHY_FUNC_LINK_RESET: + mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST); + break; + + case PHY_FUNC_DISABLE: + case PHY_FUNC_RELEASE_SPINUP_HOLD: + default: + rc = -EOPNOTSUPP; + } + + return rc; +} + +static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id) +{ + struct mvs_phy *phy = &mvi->phy[phy_id]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; + sas_phy->class = SAS; + sas_phy->iproto = SAS_PROTOCOL_ALL; + sas_phy->tproto = 0; + sas_phy->type = PHY_TYPE_PHYSICAL; + sas_phy->role = PHY_ROLE_INITIATOR; + sas_phy->oob_mode = OOB_NOT_CONNECTED; + sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; + + sas_phy->id = phy_id; + sas_phy->sas_addr = &mvi->sas_addr[0]; + sas_phy->frame_rcvd = &phy->frame_rcvd[0]; + sas_phy->ha = &mvi->sas; + sas_phy->lldd_phy = phy; +} + +static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct mvs_info *mvi; + unsigned long res_start, res_len, res_flag; + struct asd_sas_phy **arr_phy; + struct asd_sas_port **arr_port; + const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data]; + int i; + + /* + * alloc and init our per-HBA mvs_info struct + */ + + mvi = kzalloc(sizeof(*mvi), GFP_KERNEL); + if (!mvi) + return NULL; + + spin_lock_init(&mvi->lock); +#ifdef MVS_USE_TASKLET + tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi); +#endif + mvi->pdev = pdev; + mvi->chip = chip; + + if (pdev->device == 0x6440 && pdev->revision == 0) + mvi->flags |= MVF_PHY_PWR_FIX; + + /* + * alloc and init SCSI, SAS glue + */ + + mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *)); + if (!mvi->shost) + goto err_out; + + arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL); + arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL); + if (!arr_phy || !arr_port) + goto err_out; + + for (i = 0; i < MVS_MAX_PHYS; i++) { + mvs_phy_init(mvi, i); + arr_phy[i] = &mvi->phy[i].sas_phy; + arr_port[i] = &mvi->port[i].sas_port; + mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED; + mvi->port[i].wide_port_phymap = 0; + mvi->port[i].port_attached = 0; + INIT_LIST_HEAD(&mvi->port[i].list); + } + + SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas; + mvi->shost->transportt = mvs_stt; + mvi->shost->max_id = 21; + mvi->shost->max_lun = ~0; + mvi->shost->max_channel = 0; + mvi->shost->max_cmd_len = 16; + + mvi->sas.sas_ha_name = DRV_NAME; + mvi->sas.dev = &pdev->dev; + mvi->sas.lldd_module = THIS_MODULE; + mvi->sas.sas_addr = &mvi->sas_addr[0]; + mvi->sas.sas_phy = arr_phy; + mvi->sas.sas_port = arr_port; + mvi->sas.num_phys = chip->n_phy; + mvi->sas.lldd_max_execute_num = 1; + mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE; + mvi->shost->can_queue = MVS_CAN_QUEUE; + mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys; + mvi->sas.lldd_ha = mvi; + mvi->sas.core.shost = mvi->shost; + + mvs_tag_init(mvi); + + /* + * ioremap main and peripheral registers + */ + +#ifdef MVS_ENABLE_PERI + res_start = pci_resource_start(pdev, 2); + res_len = pci_resource_len(pdev, 2); + if (!res_start || !res_len) + goto err_out; + + mvi->peri_regs = ioremap_nocache(res_start, res_len); + if (!mvi->peri_regs) + goto err_out; +#endif + + res_start = pci_resource_start(pdev, 4); + res_len = pci_resource_len(pdev, 4); + if (!res_start || !res_len) + goto err_out; + + res_flag = pci_resource_flags(pdev, 4); + if (res_flag & IORESOURCE_CACHEABLE) + mvi->regs = ioremap(res_start, res_len); + else + mvi->regs = ioremap_nocache(res_start, res_len); + + if (!mvi->regs) + goto err_out; + + /* + * alloc and init our DMA areas + */ + + mvi->tx = dma_alloc_coherent(&pdev->dev, + sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, + &mvi->tx_dma, GFP_KERNEL); + if (!mvi->tx) + goto err_out; + memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ); + + mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ, + &mvi->rx_fis_dma, GFP_KERNEL); + if (!mvi->rx_fis) + goto err_out; + memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ); + + mvi->rx = dma_alloc_coherent(&pdev->dev, + sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), + &mvi->rx_dma, GFP_KERNEL); + if (!mvi->rx) + goto err_out; + memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1)); + + mvi->rx[0] = cpu_to_le32(0xfff); + mvi->rx_cons = 0xfff; + + mvi->slot = dma_alloc_coherent(&pdev->dev, + sizeof(*mvi->slot) * MVS_SLOTS, + &mvi->slot_dma, GFP_KERNEL); + if (!mvi->slot) + goto err_out; + memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS); + + for (i = 0; i < MVS_SLOTS; i++) { + struct mvs_slot_info *slot = &mvi->slot_info[i]; + + slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ, + &slot->buf_dma, GFP_KERNEL); + if (!slot->buf) + goto err_out; + memset(slot->buf, 0, MVS_SLOT_BUF_SZ); + } + + /* finally, read NVRAM to get our SAS address */ + if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8)) + goto err_out; + return mvi; + +err_out: + mvs_free(mvi); + return NULL; +} + +static u32 mvs_cr32(void __iomem *regs, u32 addr) +{ + mw32(CMD_ADDR, addr); + return mr32(CMD_DATA); +} + +static void mvs_cw32(void __iomem *regs, u32 addr, u32 val) +{ + mw32(CMD_ADDR, addr); + mw32(CMD_DATA, val); +} + +static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port) +{ + void __iomem *regs = mvi->regs; + return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4): + mr32(P4_SER_CTLSTAT + (port - 4) * 4); +} + +static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val) +{ + void __iomem *regs = mvi->regs; + if (port < 4) + mw32(P0_SER_CTLSTAT + port * 4, val); + else + mw32(P4_SER_CTLSTAT + (port - 4) * 4, val); +} + +static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port) +{ + void __iomem *regs = mvi->regs + off; + void __iomem *regs2 = mvi->regs + off2; + return (port < 4)?readl(regs + port * 8): + readl(regs2 + (port - 4) * 8); +} + +static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2, + u32 port, u32 val) +{ + void __iomem *regs = mvi->regs + off; + void __iomem *regs2 = mvi->regs + off2; + if (port < 4) + writel(val, regs + port * 8); + else + writel(val, regs2 + (port - 4) * 8); +} + +static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port) +{ + return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port); +} + +static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val) +{ + mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val); +} + +static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr) +{ + mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr); +} + +static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port) +{ + return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port); +} + +static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val) +{ + mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val); +} + +static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr) +{ + mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr); +} + +static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port) +{ + return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port); +} + +static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val) +{ + mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val); +} + +static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port) +{ + return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port); +} + +static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val) +{ + mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val); +} + +static void __devinit mvs_phy_hacks(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + /* workaround for SATA R-ERR, to ignore phy glitch */ + tmp = mvs_cr32(regs, CMD_PHY_TIMER); + tmp &= ~(1 << 9); + tmp |= (1 << 10); + mvs_cw32(regs, CMD_PHY_TIMER, tmp); + + /* enable retry 127 times */ + mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f); + + /* extend open frame timeout to max */ + tmp = mvs_cr32(regs, CMD_SAS_CTL0); + tmp &= ~0xffff; + tmp |= 0x3fff; + mvs_cw32(regs, CMD_SAS_CTL0, tmp); + + /* workaround for WDTIMEOUT , set to 550 ms */ + mvs_cw32(regs, CMD_WD_TIMER, 0x86470); + + /* not to halt for different port op during wideport link change */ + mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d); + + /* workaround for Seagate disk not-found OOB sequence, recv + * COMINIT before sending out COMWAKE */ + tmp = mvs_cr32(regs, CMD_PHY_MODE_21); + tmp &= 0x0000ffff; + tmp |= 0x00fa0000; + mvs_cw32(regs, CMD_PHY_MODE_21, tmp); + + tmp = mvs_cr32(regs, CMD_PHY_TIMER); + tmp &= 0x1fffffff; + tmp |= (2U << 29); /* 8 ms retry */ + mvs_cw32(regs, CMD_PHY_TIMER, tmp); + + /* TEST - for phy decoding error, adjust voltage levels */ + mw32(P0_VSR_ADDR + 0, 0x8); + mw32(P0_VSR_DATA + 0, 0x2F0); + + mw32(P0_VSR_ADDR + 8, 0x8); + mw32(P0_VSR_DATA + 8, 0x2F0); + + mw32(P0_VSR_ADDR + 16, 0x8); + mw32(P0_VSR_DATA + 16, 0x2F0); + + mw32(P0_VSR_ADDR + 24, 0x8); + mw32(P0_VSR_DATA + 24, 0x2F0); + +} + +static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + tmp = mr32(PCS); + if (mvi->chip->n_phy <= 4) + tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT); + else + tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2); + mw32(PCS, tmp); +} + +static void mvs_detect_porttype(struct mvs_info *mvi, int i) +{ + void __iomem *regs = mvi->regs; + u32 reg; + struct mvs_phy *phy = &mvi->phy[i]; + + /* TODO check & save device type */ + reg = mr32(GBL_PORT_TYPE); + + if (reg & MODE_SAS_SATA & (1 << i)) + phy->phy_type |= PORT_TYPE_SAS; + else + phy->phy_type |= PORT_TYPE_SATA; +} + +static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf) +{ + u32 *s = (u32 *) buf; + + if (!s) + return NULL; + + mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); + s[3] = mvs_read_port_cfg_data(mvi, i); + + mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); + s[2] = mvs_read_port_cfg_data(mvi, i); + + mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); + s[1] = mvs_read_port_cfg_data(mvi, i); + + mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); + s[0] = mvs_read_port_cfg_data(mvi, i); + + return (void *)s; +} + +static u32 mvs_is_sig_fis_received(u32 irq_status) +{ + return irq_status & PHYEV_SIG_FIS; +} + +static void mvs_update_wideport(struct mvs_info *mvi, int i) +{ + struct mvs_phy *phy = &mvi->phy[i]; + struct mvs_port *port = phy->port; + int j, no; + + for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy) + if (no & 1) { + mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); + mvs_write_port_cfg_data(mvi, no, + port->wide_port_phymap); + } else { + mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); + mvs_write_port_cfg_data(mvi, no, 0); + } +} + +static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) +{ + u32 tmp; + struct mvs_phy *phy = &mvi->phy[i]; + struct mvs_port *port = phy->port;; + + tmp = mvs_read_phy_ctl(mvi, i); + + if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { + if (!port) + phy->phy_attached = 1; + return tmp; + } + + if (port) { + if (phy->phy_type & PORT_TYPE_SAS) { + port->wide_port_phymap &= ~(1U << i); + if (!port->wide_port_phymap) + port->port_attached = 0; + mvs_update_wideport(mvi, i); + } else if (phy->phy_type & PORT_TYPE_SATA) + port->port_attached = 0; + mvs_free_reg_set(mvi, phy->port); + phy->port = NULL; + phy->phy_attached = 0; + phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); + } + return 0; +} + +static void mvs_update_phyinfo(struct mvs_info *mvi, int i, + int get_st) +{ + struct mvs_phy *phy = &mvi->phy[i]; + struct pci_dev *pdev = mvi->pdev; + u32 tmp; + u64 tmp64; + + mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY); + phy->dev_info = mvs_read_port_cfg_data(mvi, i); + + mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI); + phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32; + + mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO); + phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); + + if (get_st) { + phy->irq_status = mvs_read_port_irq_stat(mvi, i); + phy->phy_status = mvs_is_phy_ready(mvi, i); + } + + if (phy->phy_status) { + u32 phy_st; + struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i]; + + mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); + phy_st = mvs_read_port_cfg_data(mvi, i); + + sas_phy->linkrate = + (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; + phy->minimum_linkrate = + (phy->phy_status & + PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8; + phy->maximum_linkrate = + (phy->phy_status & + PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12; + + if (phy->phy_type & PORT_TYPE_SAS) { + /* Updated attached_sas_addr */ + mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI); + phy->att_dev_sas_addr = + (u64) mvs_read_port_cfg_data(mvi, i) << 32; + mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO); + phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); + mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO); + phy->att_dev_info = mvs_read_port_cfg_data(mvi, i); + phy->identify.device_type = + phy->att_dev_info & PORT_DEV_TYPE_MASK; + + if (phy->identify.device_type == SAS_END_DEV) + phy->identify.target_port_protocols = + SAS_PROTOCOL_SSP; + else if (phy->identify.device_type != NO_DEVICE) + phy->identify.target_port_protocols = + SAS_PROTOCOL_SMP; + if (phy_st & PHY_OOB_DTCTD) + sas_phy->oob_mode = SAS_OOB_MODE; + phy->frame_rcvd_size = + sizeof(struct sas_identify_frame); + } else if (phy->phy_type & PORT_TYPE_SATA) { + phy->identify.target_port_protocols = SAS_PROTOCOL_STP; + if (mvs_is_sig_fis_received(phy->irq_status)) { + phy->att_dev_sas_addr = i; /* temp */ + if (phy_st & PHY_OOB_DTCTD) + sas_phy->oob_mode = SATA_OOB_MODE; + phy->frame_rcvd_size = + sizeof(struct dev_to_host_fis); + mvs_get_d2h_reg(mvi, i, + (void *)sas_phy->frame_rcvd); + } else { + dev_printk(KERN_DEBUG, &pdev->dev, + "No sig fis\n"); + phy->phy_type &= ~(PORT_TYPE_SATA); + goto out_done; + } + } + tmp64 = cpu_to_be64(phy->att_dev_sas_addr); + memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE); + + dev_printk(KERN_DEBUG, &pdev->dev, + "phy[%d] Get Attached Address 0x%llX ," + " SAS Address 0x%llX\n", + i, + (unsigned long long)phy->att_dev_sas_addr, + (unsigned long long)phy->dev_sas_addr); + dev_printk(KERN_DEBUG, &pdev->dev, + "Rate = %x , type = %d\n", + sas_phy->linkrate, phy->phy_type); + + /* workaround for HW phy decoding error on 1.5g disk drive */ + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6); + tmp = mvs_read_port_vsr_data(mvi, i); + if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) == + SAS_LINK_RATE_1_5_GBPS) + tmp &= ~PHY_MODE6_LATECLK; + else + tmp |= PHY_MODE6_LATECLK; + mvs_write_port_vsr_data(mvi, i, tmp); + + } +out_done: + if (get_st) + mvs_write_port_irq_stat(mvi, i, phy->irq_status); +} + +static void mvs_port_formed(struct asd_sas_phy *sas_phy) +{ + struct sas_ha_struct *sas_ha = sas_phy->ha; + struct mvs_info *mvi = sas_ha->lldd_ha; + struct asd_sas_port *sas_port = sas_phy->port; + struct mvs_phy *phy = sas_phy->lldd_phy; + struct mvs_port *port = &mvi->port[sas_port->id]; + unsigned long flags; + + spin_lock_irqsave(&mvi->lock, flags); + port->port_attached = 1; + phy->port = port; + port->taskfileset = MVS_ID_NOT_MAPPED; + if (phy->phy_type & PORT_TYPE_SAS) { + port->wide_port_phymap = sas_port->phy_mask; + mvs_update_wideport(mvi, sas_phy->id); + } + spin_unlock_irqrestore(&mvi->lock, flags); +} + +static int mvs_I_T_nexus_reset(struct domain_device *dev) +{ + return TMF_RESP_FUNC_FAILED; +} + +static int __devinit mvs_hw_init(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + int i; + u32 tmp, cctl; + + /* make sure interrupts are masked immediately (paranoia) */ + mw32(GBL_CTL, 0); + tmp = mr32(GBL_CTL); + + /* Reset Controller */ + if (!(tmp & HBA_RST)) { + if (mvi->flags & MVF_PHY_PWR_FIX) { + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); + tmp &= ~PCTL_PWR_ON; + tmp |= PCTL_OFF; + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); + + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); + tmp &= ~PCTL_PWR_ON; + tmp |= PCTL_OFF; + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); + } + + /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */ + mw32_f(GBL_CTL, HBA_RST); + } + + /* wait for reset to finish; timeout is just a guess */ + i = 1000; + while (i-- > 0) { + msleep(10); + + if (!(mr32(GBL_CTL) & HBA_RST)) + break; + } + if (mr32(GBL_CTL) & HBA_RST) { + dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n"); + return -EBUSY; + } + + /* Init Chip */ + /* make sure RST is set; HBA_RST /should/ have done that for us */ + cctl = mr32(CTL); + if (cctl & CCTL_RST) + cctl &= ~CCTL_RST; + else + mw32_f(CTL, cctl | CCTL_RST); + + /* write to device control _AND_ device status register? - A.C. */ + pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp); + tmp &= ~PRD_REQ_MASK; + tmp |= PRD_REQ_SIZE; + pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp); + + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); + tmp |= PCTL_PWR_ON; + tmp &= ~PCTL_OFF; + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); + + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); + tmp |= PCTL_PWR_ON; + tmp &= ~PCTL_OFF; + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); + + mw32_f(CTL, cctl); + + /* reset control */ + mw32(PCS, 0); /*MVS_PCS */ + + mvs_phy_hacks(mvi); + + mw32(CMD_LIST_LO, mvi->slot_dma); + mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16); + + mw32(RX_FIS_LO, mvi->rx_fis_dma); + mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16); + + mw32(TX_CFG, MVS_CHIP_SLOT_SZ); + mw32(TX_LO, mvi->tx_dma); + mw32(TX_HI, (mvi->tx_dma >> 16) >> 16); + + mw32(RX_CFG, MVS_RX_RING_SZ); + mw32(RX_LO, mvi->rx_dma); + mw32(RX_HI, (mvi->rx_dma >> 16) >> 16); + + /* enable auto port detection */ + mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN); + msleep(1100); + /* init and reset phys */ + for (i = 0; i < mvi->chip->n_phy; i++) { + u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]); + u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]); + + mvs_detect_porttype(mvi, i); + + /* set phy local SAS address */ + mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO); + mvs_write_port_cfg_data(mvi, i, lo); + mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI); + mvs_write_port_cfg_data(mvi, i, hi); + + /* reset phy */ + tmp = mvs_read_phy_ctl(mvi, i); + tmp |= PHY_RST; + mvs_write_phy_ctl(mvi, i, tmp); + } + + msleep(100); + + for (i = 0; i < mvi->chip->n_phy; i++) { + /* clear phy int status */ + tmp = mvs_read_port_irq_stat(mvi, i); + tmp &= ~PHYEV_SIG_FIS; + mvs_write_port_irq_stat(mvi, i, tmp); + + /* set phy int mask */ + tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS | + PHYEV_ID_DONE | PHYEV_DEC_ERR; + mvs_write_port_irq_mask(mvi, i, tmp); + + msleep(100); + mvs_update_phyinfo(mvi, i, 1); + mvs_enable_xmt(mvi, i); + } + + /* FIXME: update wide port bitmaps */ + + /* little endian for open address and command table, etc. */ + /* A.C. + * it seems that ( from the spec ) turning on big-endian won't + * do us any good on big-endian machines, need further confirmation + */ + cctl = mr32(CTL); + cctl |= CCTL_ENDIAN_CMD; + cctl |= CCTL_ENDIAN_DATA; + cctl &= ~CCTL_ENDIAN_OPEN; + cctl |= CCTL_ENDIAN_RSP; + mw32_f(CTL, cctl); + + /* reset CMD queue */ + tmp = mr32(PCS); + tmp |= PCS_CMD_RST; + mw32(PCS, tmp); + /* interrupt coalescing may cause missing HW interrput in some case, + * and the max count is 0x1ff, while our max slot is 0x200, + * it will make count 0. + */ + tmp = 0; + mw32(INT_COAL, tmp); + + tmp = 0x100; + mw32(INT_COAL_TMOUT, tmp); + + /* ladies and gentlemen, start your engines */ + mw32(TX_CFG, 0); + mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN); + mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN); + /* enable CMD/CMPL_Q/RESP mode */ + mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN); + + /* enable completion queue interrupt */ + tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS); + mw32(INT_MASK, tmp); + + /* Enable SRS interrupt */ + mw32(INT_MASK_SRS, 0xFF); + return 0; +} + +static void __devinit mvs_print_info(struct mvs_info *mvi) +{ + struct pci_dev *pdev = mvi->pdev; + static int printed_version; + + if (!printed_version++) + dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); + + dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n", + mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr)); +} + +static int __devinit mvs_pci_init(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int rc; + struct mvs_info *mvi; + irq_handler_t irq_handler = mvs_interrupt; + + rc = pci_enable_device(pdev); + if (rc) + return rc; + + pci_set_master(pdev); + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out_disable; + + rc = pci_go_64(pdev); + if (rc) + goto err_out_regions; + + mvi = mvs_alloc(pdev, ent); + if (!mvi) { + rc = -ENOMEM; + goto err_out_regions; + } + + rc = mvs_hw_init(mvi); + if (rc) + goto err_out_mvi; + +#ifndef MVS_DISABLE_MSI + if (!pci_enable_msi(pdev)) { + u32 tmp; + void __iomem *regs = mvi->regs; + mvi->flags |= MVF_MSI; + irq_handler = mvs_msi_interrupt; + tmp = mr32(PCS); + mw32(PCS, tmp | PCS_SELF_CLEAR); + } +#endif + + rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi); + if (rc) + goto err_out_msi; + + rc = scsi_add_host(mvi->shost, &pdev->dev); + if (rc) + goto err_out_irq; + + rc = sas_register_ha(&mvi->sas); + if (rc) + goto err_out_shost; + + pci_set_drvdata(pdev, mvi); + + mvs_print_info(mvi); + + mvs_hba_interrupt_enable(mvi); + + scsi_scan_host(mvi->shost); + + return 0; + +err_out_shost: + scsi_remove_host(mvi->shost); +err_out_irq: + free_irq(pdev->irq, mvi); +err_out_msi: + if (mvi->flags |= MVF_MSI) + pci_disable_msi(pdev); +err_out_mvi: + mvs_free(mvi); +err_out_regions: + pci_release_regions(pdev); +err_out_disable: + pci_disable_device(pdev); + return rc; +} + +static void __devexit mvs_pci_remove(struct pci_dev *pdev) +{ + struct mvs_info *mvi = pci_get_drvdata(pdev); + + pci_set_drvdata(pdev, NULL); + + if (mvi) { + sas_unregister_ha(&mvi->sas); + mvs_hba_interrupt_disable(mvi); + sas_remove_host(mvi->shost); + scsi_remove_host(mvi->shost); + + free_irq(pdev->irq, mvi); + if (mvi->flags & MVF_MSI) + pci_disable_msi(pdev); + mvs_free(mvi); + pci_release_regions(pdev); + } + pci_disable_device(pdev); +} + +static struct sas_domain_function_template mvs_transport_ops = { + .lldd_execute_task = mvs_task_exec, + .lldd_control_phy = mvs_phy_control, + .lldd_abort_task = mvs_task_abort, + .lldd_port_formed = mvs_port_formed, + .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset, +}; + +static struct pci_device_id __devinitdata mvs_pci_table[] = { + { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 }, + { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 }, + { + .vendor = PCI_VENDOR_ID_MARVELL, + .device = 0x6440, + .subvendor = PCI_ANY_ID, + .subdevice = 0x6480, + .class = 0, + .class_mask = 0, + .driver_data = chip_6480, + }, + { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 }, + { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 }, + + { } /* terminate list */ +}; + +static struct pci_driver mvs_pci_driver = { + .name = DRV_NAME, + .id_table = mvs_pci_table, + .probe = mvs_pci_init, + .remove = __devexit_p(mvs_pci_remove), +}; + +static int __init mvs_init(void) +{ + int rc; + + mvs_stt = sas_domain_attach_transport(&mvs_transport_ops); + if (!mvs_stt) + return -ENOMEM; + + rc = pci_register_driver(&mvs_pci_driver); + if (rc) + goto err_out; + + return 0; + +err_out: + sas_release_transport(mvs_stt); + return rc; +} + +static void __exit mvs_exit(void) +{ + pci_unregister_driver(&mvs_pci_driver); + sas_release_transport(mvs_stt); +} + +module_init(mvs_init); +module_exit(mvs_exit); + +MODULE_AUTHOR("Jeff Garzik "); +MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver"); +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, mvs_pci_table); -- cgit v1.2.3 From dd4969a892ea522ecf9d7d826ba1531ce044d46f Mon Sep 17 00:00:00 2001 From: Jeff Garzik Date: Fri, 8 May 2009 17:44:01 -0400 Subject: [SCSI] mvsas: split driver into multiple files Split mvsas driver into multiple source codes, based on the split and function distribution found in Marvell's mvsas update. Signed-off-by: Jeff Garzik Signed-off-by: James Bottomley --- drivers/scsi/mvsas/Makefile | 5 +- drivers/scsi/mvsas/mv_64xx.c | 184 +++ drivers/scsi/mvsas/mv_64xx.h | 92 ++ drivers/scsi/mvsas/mv_chips.h | 118 ++ drivers/scsi/mvsas/mv_defs.h | 441 ++++++ drivers/scsi/mvsas/mv_init.c | 524 +++++++ drivers/scsi/mvsas/mv_sas.c | 3444 ++++++++++++----------------------------- drivers/scsi/mvsas/mv_sas.h | 205 +++ 8 files changed, 2587 insertions(+), 2426 deletions(-) create mode 100644 drivers/scsi/mvsas/mv_64xx.c create mode 100644 drivers/scsi/mvsas/mv_64xx.h create mode 100644 drivers/scsi/mvsas/mv_chips.h create mode 100644 drivers/scsi/mvsas/mv_defs.h create mode 100644 drivers/scsi/mvsas/mv_init.c create mode 100644 drivers/scsi/mvsas/mv_sas.h (limited to 'drivers') diff --git a/drivers/scsi/mvsas/Makefile b/drivers/scsi/mvsas/Makefile index 1ac6ed955a04..a1ca681e1a57 100644 --- a/drivers/scsi/mvsas/Makefile +++ b/drivers/scsi/mvsas/Makefile @@ -22,5 +22,6 @@ # USA obj-$(CONFIG_SCSI_MVSAS) += mvsas.o -mvsas-y += mv_sas.o - +mvsas-y += mv_init.o \ + mv_sas.o \ + mv_64xx.o diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c new file mode 100644 index 000000000000..697806c856af --- /dev/null +++ b/drivers/scsi/mvsas/mv_64xx.c @@ -0,0 +1,184 @@ +/* + mv_64xx.c - Marvell 88SE6440 SAS/SATA support + + Copyright 2007 Red Hat, Inc. + Copyright 2008 Marvell. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2, + or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty + of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + See the GNU General Public License for more details. + + You should have received a copy of the GNU General Public + License along with this program; see the file COPYING. If not, + write to the Free Software Foundation, 675 Mass Ave, Cambridge, + MA 02139, USA. + + */ + +#include "mv_sas.h" +#include "mv_64xx.h" +#include "mv_chips.h" + +void mvs_detect_porttype(struct mvs_info *mvi, int i) +{ + void __iomem *regs = mvi->regs; + u32 reg; + struct mvs_phy *phy = &mvi->phy[i]; + + /* TODO check & save device type */ + reg = mr32(GBL_PORT_TYPE); + + if (reg & MODE_SAS_SATA & (1 << i)) + phy->phy_type |= PORT_TYPE_SAS; + else + phy->phy_type |= PORT_TYPE_SATA; +} + +void mvs_enable_xmt(struct mvs_info *mvi, int PhyId) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + tmp = mr32(PCS); + if (mvi->chip->n_phy <= 4) + tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT); + else + tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2); + mw32(PCS, tmp); +} + +void __devinit mvs_phy_hacks(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + /* workaround for SATA R-ERR, to ignore phy glitch */ + tmp = mvs_cr32(regs, CMD_PHY_TIMER); + tmp &= ~(1 << 9); + tmp |= (1 << 10); + mvs_cw32(regs, CMD_PHY_TIMER, tmp); + + /* enable retry 127 times */ + mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f); + + /* extend open frame timeout to max */ + tmp = mvs_cr32(regs, CMD_SAS_CTL0); + tmp &= ~0xffff; + tmp |= 0x3fff; + mvs_cw32(regs, CMD_SAS_CTL0, tmp); + + /* workaround for WDTIMEOUT , set to 550 ms */ + mvs_cw32(regs, CMD_WD_TIMER, 0x86470); + + /* not to halt for different port op during wideport link change */ + mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d); + + /* workaround for Seagate disk not-found OOB sequence, recv + * COMINIT before sending out COMWAKE */ + tmp = mvs_cr32(regs, CMD_PHY_MODE_21); + tmp &= 0x0000ffff; + tmp |= 0x00fa0000; + mvs_cw32(regs, CMD_PHY_MODE_21, tmp); + + tmp = mvs_cr32(regs, CMD_PHY_TIMER); + tmp &= 0x1fffffff; + tmp |= (2U << 29); /* 8 ms retry */ + mvs_cw32(regs, CMD_PHY_TIMER, tmp); + + /* TEST - for phy decoding error, adjust voltage levels */ + mw32(P0_VSR_ADDR + 0, 0x8); + mw32(P0_VSR_DATA + 0, 0x2F0); + + mw32(P0_VSR_ADDR + 8, 0x8); + mw32(P0_VSR_DATA + 8, 0x2F0); + + mw32(P0_VSR_ADDR + 16, 0x8); + mw32(P0_VSR_DATA + 16, 0x2F0); + + mw32(P0_VSR_ADDR + 24, 0x8); + mw32(P0_VSR_DATA + 24, 0x2F0); + +} + +void mvs_hba_interrupt_enable(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + tmp = mr32(GBL_CTL); + + mw32(GBL_CTL, tmp | INT_EN); +} + +void mvs_hba_interrupt_disable(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + tmp = mr32(GBL_CTL); + + mw32(GBL_CTL, tmp & ~INT_EN); +} + +void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port) +{ + void __iomem *regs = mvi->regs; + u32 tmp, offs; + u8 *tfs = &port->taskfileset; + + if (*tfs == MVS_ID_NOT_MAPPED) + return; + + offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT); + if (*tfs < 16) { + tmp = mr32(PCS); + mw32(PCS, tmp & ~offs); + } else { + tmp = mr32(CTL); + mw32(CTL, tmp & ~offs); + } + + tmp = mr32(INT_STAT_SRS) & (1U << *tfs); + if (tmp) + mw32(INT_STAT_SRS, tmp); + + *tfs = MVS_ID_NOT_MAPPED; +} + +u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port) +{ + int i; + u32 tmp, offs; + void __iomem *regs = mvi->regs; + + if (port->taskfileset != MVS_ID_NOT_MAPPED) + return 0; + + tmp = mr32(PCS); + + for (i = 0; i < mvi->chip->srs_sz; i++) { + if (i == 16) + tmp = mr32(CTL); + offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT); + if (!(tmp & offs)) { + port->taskfileset = i; + + if (i < 16) + mw32(PCS, tmp | offs); + else + mw32(CTL, tmp | offs); + tmp = mr32(INT_STAT_SRS) & (1U << i); + if (tmp) + mw32(INT_STAT_SRS, tmp); + return 0; + } + } + return MVS_ID_NOT_MAPPED; +} + diff --git a/drivers/scsi/mvsas/mv_64xx.h b/drivers/scsi/mvsas/mv_64xx.h new file mode 100644 index 000000000000..c9f399ebc926 --- /dev/null +++ b/drivers/scsi/mvsas/mv_64xx.h @@ -0,0 +1,92 @@ +#ifndef _MVS64XX_REG_H_ +#define _MVS64XX_REG_H_ + +/* enhanced mode registers (BAR4) */ +enum hw_registers { + MVS_GBL_CTL = 0x04, /* global control */ + MVS_GBL_INT_STAT = 0x08, /* global irq status */ + MVS_GBL_PI = 0x0C, /* ports implemented bitmask */ + MVS_GBL_PORT_TYPE = 0xa0, /* port type */ + + MVS_CTL = 0x100, /* SAS/SATA port configuration */ + MVS_PCS = 0x104, /* SAS/SATA port control/status */ + MVS_CMD_LIST_LO = 0x108, /* cmd list addr */ + MVS_CMD_LIST_HI = 0x10C, + MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */ + MVS_RX_FIS_HI = 0x114, + + MVS_TX_CFG = 0x120, /* TX configuration */ + MVS_TX_LO = 0x124, /* TX (delivery) ring addr */ + MVS_TX_HI = 0x128, + + MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */ + MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */ + MVS_RX_CFG = 0x134, /* RX configuration */ + MVS_RX_LO = 0x138, /* RX (completion) ring addr */ + MVS_RX_HI = 0x13C, + MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */ + + MVS_INT_COAL = 0x148, /* Int coalescing config */ + MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */ + MVS_INT_STAT = 0x150, /* Central int status */ + MVS_INT_MASK = 0x154, /* Central int enable */ + MVS_INT_STAT_SRS = 0x158, /* SATA register set status */ + MVS_INT_MASK_SRS = 0x15C, + + /* ports 1-3 follow after this */ + MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */ + MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */ + MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */ + MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */ + + /* ports 1-3 follow after this */ + MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */ + MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */ + + MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */ + MVS_CMD_DATA = 0x1BC, /* Command register port (data) */ + + /* ports 1-3 follow after this */ + MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */ + MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */ + MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */ + MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */ + + /* ports 1-3 follow after this */ + MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */ + MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */ + MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */ + MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */ +}; + +enum pci_cfg_registers { + PCR_PHY_CTL = 0x40, + PCR_PHY_CTL2 = 0x90, + PCR_DEV_CTRL = 0xE8, +}; + +/* SAS/SATA Vendor Specific Port Registers */ +enum sas_sata_vsp_regs { + VSR_PHY_STAT = 0x00, /* Phy Status */ + VSR_PHY_MODE1 = 0x01, /* phy tx */ + VSR_PHY_MODE2 = 0x02, /* tx scc */ + VSR_PHY_MODE3 = 0x03, /* pll */ + VSR_PHY_MODE4 = 0x04, /* VCO */ + VSR_PHY_MODE5 = 0x05, /* Rx */ + VSR_PHY_MODE6 = 0x06, /* CDR */ + VSR_PHY_MODE7 = 0x07, /* Impedance */ + VSR_PHY_MODE8 = 0x08, /* Voltage */ + VSR_PHY_MODE9 = 0x09, /* Test */ + VSR_PHY_MODE10 = 0x0A, /* Power */ + VSR_PHY_MODE11 = 0x0B, /* Phy Mode */ + VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */ + VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */ +}; + +struct mvs_prd { + __le64 addr; /* 64-bit buffer address */ + __le32 reserved; + __le32 len; /* 16-bit length */ +}; + +#endif diff --git a/drivers/scsi/mvsas/mv_chips.h b/drivers/scsi/mvsas/mv_chips.h new file mode 100644 index 000000000000..cf74b7a3f643 --- /dev/null +++ b/drivers/scsi/mvsas/mv_chips.h @@ -0,0 +1,118 @@ +#ifndef _MV_CHIPS_H_ +#define _MV_CHIPS_H_ + +#define mr32(reg) readl(regs + MVS_##reg) +#define mw32(reg,val) writel((val), regs + MVS_##reg) +#define mw32_f(reg,val) do { \ + writel((val), regs + MVS_##reg); \ + readl(regs + MVS_##reg); \ + } while (0) + +static inline u32 mvs_cr32(void __iomem *regs, u32 addr) +{ + mw32(CMD_ADDR, addr); + return mr32(CMD_DATA); +} + +static inline void mvs_cw32(void __iomem *regs, u32 addr, u32 val) +{ + mw32(CMD_ADDR, addr); + mw32(CMD_DATA, val); +} + +static inline u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port) +{ + void __iomem *regs = mvi->regs; + return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4): + mr32(P4_SER_CTLSTAT + (port - 4) * 4); +} + +static inline void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val) +{ + void __iomem *regs = mvi->regs; + if (port < 4) + mw32(P0_SER_CTLSTAT + port * 4, val); + else + mw32(P4_SER_CTLSTAT + (port - 4) * 4, val); +} + +static inline u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port) +{ + void __iomem *regs = mvi->regs + off; + void __iomem *regs2 = mvi->regs + off2; + return (port < 4)?readl(regs + port * 8): + readl(regs2 + (port - 4) * 8); +} + +static inline void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2, + u32 port, u32 val) +{ + void __iomem *regs = mvi->regs + off; + void __iomem *regs2 = mvi->regs + off2; + if (port < 4) + writel(val, regs + port * 8); + else + writel(val, regs2 + (port - 4) * 8); +} + +static inline u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port) +{ + return mvs_read_port(mvi, MVS_P0_CFG_DATA, + MVS_P4_CFG_DATA, port); +} + +static inline void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val) +{ + mvs_write_port(mvi, MVS_P0_CFG_DATA, + MVS_P4_CFG_DATA, port, val); +} + +static inline void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr) +{ + mvs_write_port(mvi, MVS_P0_CFG_ADDR, + MVS_P4_CFG_ADDR, port, addr); +} + +static inline u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port) +{ + return mvs_read_port(mvi, MVS_P0_VSR_DATA, + MVS_P4_VSR_DATA, port); +} + +static inline void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val) +{ + mvs_write_port(mvi, MVS_P0_VSR_DATA, + MVS_P4_VSR_DATA, port, val); +} + +static inline void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr) +{ + mvs_write_port(mvi, MVS_P0_VSR_ADDR, + MVS_P4_VSR_ADDR, port, addr); +} + +static inline u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port) +{ + return mvs_read_port(mvi, MVS_P0_INT_STAT, + MVS_P4_INT_STAT, port); +} + +static inline void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val) +{ + mvs_write_port(mvi, MVS_P0_INT_STAT, + MVS_P4_INT_STAT, port, val); +} + +static inline u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port) +{ + return mvs_read_port(mvi, MVS_P0_INT_MASK, + MVS_P4_INT_MASK, port); +} + +static inline void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val) +{ + mvs_write_port(mvi, MVS_P0_INT_MASK, + MVS_P4_INT_MASK, port, val); +} + +#endif diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h new file mode 100644 index 000000000000..d8e96a3e5a21 --- /dev/null +++ b/drivers/scsi/mvsas/mv_defs.h @@ -0,0 +1,441 @@ +/* + mv_defs.h - Marvell 88SE6440 SAS/SATA support + + Copyright 2007 Red Hat, Inc. + Copyright 2008 Marvell. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2, + or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty + of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + See the GNU General Public License for more details. + + You should have received a copy of the GNU General Public + License along with this program; see the file COPYING. If not, + write to the Free Software Foundation, 675 Mass Ave, Cambridge, + MA 02139, USA. + + */ + +#ifndef _MV_DEFS_H_ +#define _MV_DEFS_H_ + +/* driver compile-time configuration */ +enum driver_configuration { + MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */ + MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */ + /* software requires power-of-2 + ring size */ + + MVS_SLOTS = 512, /* command slots */ + MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */ + MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */ + MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */ + MVS_OAF_SZ = 64, /* Open address frame buffer size */ + + MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */ + + MVS_QUEUE_SIZE = 30, /* Support Queue depth */ + MVS_CAN_QUEUE = MVS_SLOTS - 1, /* SCSI Queue depth */ +}; + +/* unchangeable hardware details */ +enum hardware_details { + MVS_MAX_PHYS = 8, /* max. possible phys */ + MVS_MAX_PORTS = 8, /* max. possible ports */ + MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100), +}; + +/* peripheral registers (BAR2) */ +enum peripheral_registers { + SPI_CTL = 0x10, /* EEPROM control */ + SPI_CMD = 0x14, /* EEPROM command */ + SPI_DATA = 0x18, /* EEPROM data */ +}; + +enum peripheral_register_bits { + TWSI_RDY = (1U << 7), /* EEPROM interface ready */ + TWSI_RD = (1U << 4), /* EEPROM read access */ + + SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */ +}; + +enum hw_register_bits { + /* MVS_GBL_CTL */ + INT_EN = (1U << 1), /* Global int enable */ + HBA_RST = (1U << 0), /* HBA reset */ + + /* MVS_GBL_INT_STAT */ + INT_XOR = (1U << 4), /* XOR engine event */ + INT_SAS_SATA = (1U << 0), /* SAS/SATA event */ + + /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */ + SATA_TARGET = (1U << 16), /* port0 SATA target enable */ + MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */ + MODE_AUTO_DET_PORT6 = (1U << 14), + MODE_AUTO_DET_PORT5 = (1U << 13), + MODE_AUTO_DET_PORT4 = (1U << 12), + MODE_AUTO_DET_PORT3 = (1U << 11), + MODE_AUTO_DET_PORT2 = (1U << 10), + MODE_AUTO_DET_PORT1 = (1U << 9), + MODE_AUTO_DET_PORT0 = (1U << 8), + MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 | + MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 | + MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 | + MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7, + MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */ + MODE_SAS_PORT6_MASK = (1U << 6), + MODE_SAS_PORT5_MASK = (1U << 5), + MODE_SAS_PORT4_MASK = (1U << 4), + MODE_SAS_PORT3_MASK = (1U << 3), + MODE_SAS_PORT2_MASK = (1U << 2), + MODE_SAS_PORT1_MASK = (1U << 1), + MODE_SAS_PORT0_MASK = (1U << 0), + MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK | + MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK | + MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK | + MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK, + + /* SAS_MODE value may be + * dictated (in hw) by values + * of SATA_TARGET & AUTO_DET + */ + + /* MVS_TX_CFG */ + TX_EN = (1U << 16), /* Enable TX */ + TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */ + + /* MVS_RX_CFG */ + RX_EN = (1U << 16), /* Enable RX */ + RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */ + + /* MVS_INT_COAL */ + COAL_EN = (1U << 16), /* Enable int coalescing */ + + /* MVS_INT_STAT, MVS_INT_MASK */ + CINT_I2C = (1U << 31), /* I2C event */ + CINT_SW0 = (1U << 30), /* software event 0 */ + CINT_SW1 = (1U << 29), /* software event 1 */ + CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */ + CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */ + CINT_MEM = (1U << 26), /* int mem parity err */ + CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */ + CINT_SRS = (1U << 3), /* SRS event */ + CINT_CI_STOP = (1U << 1), /* cmd issue stopped */ + CINT_DONE = (1U << 0), /* cmd completion */ + + /* shl for ports 1-3 */ + CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */ + CINT_PORT = (1U << 8), /* port0 event */ + CINT_PORT_MASK_OFFSET = 8, + CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET), + + /* TX (delivery) ring bits */ + TXQ_CMD_SHIFT = 29, + TXQ_CMD_SSP = 1, /* SSP protocol */ + TXQ_CMD_SMP = 2, /* SMP protocol */ + TXQ_CMD_STP = 3, /* STP/SATA protocol */ + TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */ + TXQ_CMD_SLOT_RESET = 7, /* reset command slot */ + TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */ + TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */ + TXQ_SRS_SHIFT = 20, /* SATA register set */ + TXQ_SRS_MASK = 0x7f, + TXQ_PHY_SHIFT = 12, /* PHY bitmap */ + TXQ_PHY_MASK = 0xff, + TXQ_SLOT_MASK = 0xfff, /* slot number */ + + /* RX (completion) ring bits */ + RXQ_GOOD = (1U << 23), /* Response good */ + RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */ + RXQ_CMD_RX = (1U << 20), /* target cmd received */ + RXQ_ATTN = (1U << 19), /* attention */ + RXQ_RSP = (1U << 18), /* response frame xfer'd */ + RXQ_ERR = (1U << 17), /* err info rec xfer'd */ + RXQ_DONE = (1U << 16), /* cmd complete */ + RXQ_SLOT_MASK = 0xfff, /* slot number */ + + /* mvs_cmd_hdr bits */ + MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */ + MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */ + + /* SSP initiator only */ + MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */ + + /* SSP initiator or target */ + MCH_SSP_FR_TASK = 0x1, /* TASK frame */ + + /* SSP target only */ + MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */ + MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */ + MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */ + MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */ + + MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */ + MCH_FBURST = (1U << 11), /* first burst (SSP) */ + MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */ + MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */ + MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */ + MCH_RESET = (1U << 7), /* Reset (STP/SATA) */ + MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */ + MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */ + MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */ + MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/ + + CCTL_RST = (1U << 5), /* port logic reset */ + + /* 0(LSB first), 1(MSB first) */ + CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */ + CCTL_ENDIAN_RSP = (1U << 2), /* response frame */ + CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */ + CCTL_ENDIAN_CMD = (1U << 0), /* command table */ + + /* MVS_Px_SER_CTLSTAT (per-phy control) */ + PHY_SSP_RST = (1U << 3), /* reset SSP link layer */ + PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */ + PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */ + PHY_RST = (1U << 0), /* phy reset */ + PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8), + PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12), + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16), + PHY_NEG_SPP_PHYS_LINK_RATE_MASK = + (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), + PHY_READY_MASK = (1U << 20), + + /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */ + PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */ + PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */ + PHYEV_AN = (1U << 18), /* SATA async notification */ + PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */ + PHYEV_SIG_FIS = (1U << 16), /* signature FIS */ + PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */ + PHYEV_IU_BIG = (1U << 11), /* IU too long err */ + PHYEV_IU_SMALL = (1U << 10), /* IU too short err */ + PHYEV_UNK_TAG = (1U << 9), /* unknown tag */ + PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */ + PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */ + PHYEV_PORT_SEL = (1U << 6), /* port selector present */ + PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */ + PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */ + PHYEV_ID_FAIL = (1U << 3), /* identify failed */ + PHYEV_ID_DONE = (1U << 2), /* identify done */ + PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */ + PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */ + + /* MVS_PCS */ + PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */ + PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */ + PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */ + PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */ + PCS_RSP_RX_EN = (1U << 7), /* raw response rx */ + PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */ + PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */ + PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */ + PCS_CMD_RST = (1U << 1), /* reset cmd issue */ + PCS_CMD_EN = (1U << 0), /* enable cmd issue */ + + /* Port n Attached Device Info */ + PORT_DEV_SSP_TRGT = (1U << 19), + PORT_DEV_SMP_TRGT = (1U << 18), + PORT_DEV_STP_TRGT = (1U << 17), + PORT_DEV_SSP_INIT = (1U << 11), + PORT_DEV_SMP_INIT = (1U << 10), + PORT_DEV_STP_INIT = (1U << 9), + PORT_PHY_ID_MASK = (0xFFU << 24), + PORT_DEV_TRGT_MASK = (0x7U << 17), + PORT_DEV_INIT_MASK = (0x7U << 9), + PORT_DEV_TYPE_MASK = (0x7U << 0), + + /* Port n PHY Status */ + PHY_RDY = (1U << 2), + PHY_DW_SYNC = (1U << 1), + PHY_OOB_DTCTD = (1U << 0), + + /* VSR */ + /* PHYMODE 6 (CDB) */ + PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */ + PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */ + PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/ + PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */ + PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */ + PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */ + PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */ + PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */ + PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */ + PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */ + PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */ + PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */ + PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */ + PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */ +}; + +/* SAS/SATA configuration port registers, aka phy registers */ +enum sas_sata_config_port_regs { + PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */ + PHYR_ADDR_LO = 0x04, /* my SAS address (low) */ + PHYR_ADDR_HI = 0x08, /* my SAS address (high) */ + PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */ + PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */ + PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */ + PHYR_SATA_CTL = 0x18, /* SATA control */ + PHYR_PHY_STAT = 0x1C, /* PHY status */ + PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */ + PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */ + PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */ + PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */ + PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */ + PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */ + PHYR_WIDE_PORT = 0x38, /* wide port participating */ + PHYR_CURRENT0 = 0x80, /* current connection info 0 */ + PHYR_CURRENT1 = 0x84, /* current connection info 1 */ + PHYR_CURRENT2 = 0x88, /* current connection info 2 */ +}; + +enum mvs_info_flags { + MVF_MSI = (1U << 0), /* MSI is enabled */ + MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */ +}; + +enum sas_cmd_port_registers { + CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */ + CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */ + CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */ + CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */ + CMD_OOB_SPACE = 0x110, /* OOB space control register */ + CMD_OOB_BURST = 0x114, /* OOB burst control register */ + CMD_PHY_TIMER = 0x118, /* PHY timer control register */ + CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */ + CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */ + CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */ + CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */ + CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */ + CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */ + CMD_ID_TEST = 0x134, /* ID test register */ + CMD_PL_TIMER = 0x138, /* PL timer register */ + CMD_WD_TIMER = 0x13c, /* WD timer register */ + CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */ + CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */ + CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */ + CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */ + CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */ + CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */ + CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */ + CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */ + CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */ + CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */ + CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */ + CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */ + CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */ + CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */ + CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */ + CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */ + CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */ + CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */ + CMD_RESET_COUNT = 0x188, /* Reset Count */ + CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */ + CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */ + CMD_PHY_CTL = 0x194, /* PHY Control and Status */ + CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */ + CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */ + CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */ + CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */ + CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */ + CMD_HOST_CTL = 0x1AC, /* Host Control Status */ + CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */ + CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */ + CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */ + CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */ + CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */ + CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */ +}; + +enum pci_cfg_register_bits { + PCTL_PWR_ON = (0xFU << 24), + PCTL_OFF = (0xFU << 12), + PRD_REQ_SIZE = (0x4000), + PRD_REQ_MASK = (0x00007000), +}; + +enum nvram_layout_offsets { + NVR_SIG = 0x00, /* 0xAA, 0x55 */ + NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */ +}; + +enum chip_flavors { + chip_6320, + chip_6440, + chip_6480, +}; + +enum port_type { + PORT_TYPE_SAS = (1L << 1), + PORT_TYPE_SATA = (1L << 0), +}; + +/* Command Table Format */ +enum ct_format { + /* SSP */ + SSP_F_H = 0x00, + SSP_F_IU = 0x18, + SSP_F_MAX = 0x4D, + /* STP */ + STP_CMD_FIS = 0x00, + STP_ATAPI_CMD = 0x40, + STP_F_MAX = 0x10, + /* SMP */ + SMP_F_T = 0x00, + SMP_F_DEP = 0x01, + SMP_F_MAX = 0x101, +}; + +enum status_buffer { + SB_EIR_OFF = 0x00, /* Error Information Record */ + SB_RFB_OFF = 0x08, /* Response Frame Buffer */ + SB_RFB_MAX = 0x400, /* RFB size*/ +}; + +enum error_info_rec { + CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */ + CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */ + RSP_OVER = (1U << 29), /* rsp buffer overflow */ + RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */ + UNK_FIS = (1U << 27), /* unknown FIS */ + DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */ + SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */ + TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */ + R_ERR = (1U << 23), /* SATA returned R_ERR prim */ + RD_OFS = (1U << 20), /* Read DATA frame invalid offset */ + XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */ + UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */ + DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */ + INTERLOCK = (1U << 15), /* interlock error */ + NAK = (1U << 14), /* NAK rx'd */ + ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */ + CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */ + OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */ + PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */ + NO_DEST = (1U << 9), /* I_T nexus lost, no destination */ + STP_RES_BSY = (1U << 8), /* STP resources busy */ + BREAK = (1U << 7), /* break received */ + BAD_DEST = (1U << 6), /* bad destination */ + BAD_PROTO = (1U << 5), /* protocol not supported */ + BAD_RATE = (1U << 4), /* cxn rate not supported */ + WRONG_DEST = (1U << 3), /* wrong destination error */ + CREDIT_TO = (1U << 2), /* credit timeout */ + WDOG_TO = (1U << 1), /* watchdog timeout */ + BUF_PAR = (1U << 0), /* buffer parity error */ +}; + +enum error_info_rec_2 { + SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */ + GRD_CHK_ERR = (1U << 14), /* Guard Check Error */ + APP_CHK_ERR = (1U << 13), /* Application Check error */ + REF_CHK_ERR = (1U << 12), /* Reference Check Error */ + USR_BLK_NM = (1U << 0), /* User Block Number */ +}; + +#endif diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c new file mode 100644 index 000000000000..258a1a923290 --- /dev/null +++ b/drivers/scsi/mvsas/mv_init.c @@ -0,0 +1,524 @@ +/* + mv_init.c - Marvell 88SE6440 SAS/SATA init support + + Copyright 2007 Red Hat, Inc. + Copyright 2008 Marvell. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2, + or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty + of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + See the GNU General Public License for more details. + + You should have received a copy of the GNU General Public + License along with this program; see the file COPYING. If not, + write to the Free Software Foundation, 675 Mass Ave, Cambridge, + MA 02139, USA. + + */ + +#include "mv_sas.h" +#include "mv_64xx.h" +#include "mv_chips.h" + +static struct scsi_transport_template *mvs_stt; + +static const struct mvs_chip_info mvs_chips[] = { + [chip_6320] = { 2, 16, 9 }, + [chip_6440] = { 4, 16, 9 }, + [chip_6480] = { 8, 32, 10 }, +}; + +static struct scsi_host_template mvs_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .queuecommand = sas_queuecommand, + .target_alloc = sas_target_alloc, + .slave_configure = mvs_slave_configure, + .slave_destroy = sas_slave_destroy, + .scan_finished = mvs_scan_finished, + .scan_start = mvs_scan_start, + .change_queue_depth = sas_change_queue_depth, + .change_queue_type = sas_change_queue_type, + .bios_param = sas_bios_param, + .can_queue = 1, + .cmd_per_lun = 1, + .this_id = -1, + .sg_tablesize = SG_ALL, + .max_sectors = SCSI_DEFAULT_MAX_SECTORS, + .use_clustering = ENABLE_CLUSTERING, + .eh_device_reset_handler = sas_eh_device_reset_handler, + .eh_bus_reset_handler = sas_eh_bus_reset_handler, + .slave_alloc = sas_slave_alloc, + .target_destroy = sas_target_destroy, + .ioctl = sas_ioctl, +}; + +static struct sas_domain_function_template mvs_transport_ops = { + .lldd_execute_task = mvs_task_exec, + .lldd_control_phy = mvs_phy_control, + .lldd_abort_task = mvs_task_abort, + .lldd_port_formed = mvs_port_formed, + .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset, +}; + +static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id) +{ + struct mvs_phy *phy = &mvi->phy[phy_id]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; + sas_phy->class = SAS; + sas_phy->iproto = SAS_PROTOCOL_ALL; + sas_phy->tproto = 0; + sas_phy->type = PHY_TYPE_PHYSICAL; + sas_phy->role = PHY_ROLE_INITIATOR; + sas_phy->oob_mode = OOB_NOT_CONNECTED; + sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; + + sas_phy->id = phy_id; + sas_phy->sas_addr = &mvi->sas_addr[0]; + sas_phy->frame_rcvd = &phy->frame_rcvd[0]; + sas_phy->ha = &mvi->sas; + sas_phy->lldd_phy = phy; +} + +static void mvs_free(struct mvs_info *mvi) +{ + int i; + + if (!mvi) + return; + + for (i = 0; i < MVS_SLOTS; i++) { + struct mvs_slot_info *slot = &mvi->slot_info[i]; + + if (slot->buf) + dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ, + slot->buf, slot->buf_dma); + } + + if (mvi->tx) + dma_free_coherent(&mvi->pdev->dev, + sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, + mvi->tx, mvi->tx_dma); + if (mvi->rx_fis) + dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ, + mvi->rx_fis, mvi->rx_fis_dma); + if (mvi->rx) + dma_free_coherent(&mvi->pdev->dev, + sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), + mvi->rx, mvi->rx_dma); + if (mvi->slot) + dma_free_coherent(&mvi->pdev->dev, + sizeof(*mvi->slot) * MVS_SLOTS, + mvi->slot, mvi->slot_dma); +#ifdef MVS_ENABLE_PERI + if (mvi->peri_regs) + iounmap(mvi->peri_regs); +#endif + if (mvi->regs) + iounmap(mvi->regs); + if (mvi->shost) + scsi_host_put(mvi->shost); + kfree(mvi->sas.sas_port); + kfree(mvi->sas.sas_phy); + kfree(mvi); +} + +#ifdef MVS_USE_TASKLET +static void mvs_tasklet(unsigned long data) +{ + struct mvs_info *mvi = (struct mvs_info *) data; + unsigned long flags; + + spin_lock_irqsave(&mvi->lock, flags); + +#ifdef MVS_DISABLE_MSI + mvs_int_full(mvi); +#else + mvs_int_rx(mvi, true); +#endif + spin_unlock_irqrestore(&mvi->lock, flags); +} +#endif + +static irqreturn_t mvs_interrupt(int irq, void *opaque) +{ + struct mvs_info *mvi = opaque; + void __iomem *regs = mvi->regs; + u32 stat; + + stat = mr32(GBL_INT_STAT); + + if (stat == 0 || stat == 0xffffffff) + return IRQ_NONE; + + /* clear CMD_CMPLT ASAP */ + mw32_f(INT_STAT, CINT_DONE); + +#ifndef MVS_USE_TASKLET + spin_lock(&mvi->lock); + + mvs_int_full(mvi); + + spin_unlock(&mvi->lock); +#else + tasklet_schedule(&mvi->tasklet); +#endif + return IRQ_HANDLED; +} + +static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct mvs_info *mvi; + unsigned long res_start, res_len, res_flag; + struct asd_sas_phy **arr_phy; + struct asd_sas_port **arr_port; + const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data]; + int i; + + /* + * alloc and init our per-HBA mvs_info struct + */ + + mvi = kzalloc(sizeof(*mvi), GFP_KERNEL); + if (!mvi) + return NULL; + + spin_lock_init(&mvi->lock); +#ifdef MVS_USE_TASKLET + tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi); +#endif + mvi->pdev = pdev; + mvi->chip = chip; + + if (pdev->device == 0x6440 && pdev->revision == 0) + mvi->flags |= MVF_PHY_PWR_FIX; + + /* + * alloc and init SCSI, SAS glue + */ + + mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *)); + if (!mvi->shost) + goto err_out; + + arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL); + arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL); + if (!arr_phy || !arr_port) + goto err_out; + + for (i = 0; i < MVS_MAX_PHYS; i++) { + mvs_phy_init(mvi, i); + arr_phy[i] = &mvi->phy[i].sas_phy; + arr_port[i] = &mvi->port[i].sas_port; + mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED; + mvi->port[i].wide_port_phymap = 0; + mvi->port[i].port_attached = 0; + INIT_LIST_HEAD(&mvi->port[i].list); + } + + SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas; + mvi->shost->transportt = mvs_stt; + mvi->shost->max_id = 21; + mvi->shost->max_lun = ~0; + mvi->shost->max_channel = 0; + mvi->shost->max_cmd_len = 16; + + mvi->sas.sas_ha_name = DRV_NAME; + mvi->sas.dev = &pdev->dev; + mvi->sas.lldd_module = THIS_MODULE; + mvi->sas.sas_addr = &mvi->sas_addr[0]; + mvi->sas.sas_phy = arr_phy; + mvi->sas.sas_port = arr_port; + mvi->sas.num_phys = chip->n_phy; + mvi->sas.lldd_max_execute_num = 1; + mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE; + mvi->shost->can_queue = MVS_CAN_QUEUE; + mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys; + mvi->sas.lldd_ha = mvi; + mvi->sas.core.shost = mvi->shost; + + mvs_tag_init(mvi); + + /* + * ioremap main and peripheral registers + */ + +#ifdef MVS_ENABLE_PERI + res_start = pci_resource_start(pdev, 2); + res_len = pci_resource_len(pdev, 2); + if (!res_start || !res_len) + goto err_out; + + mvi->peri_regs = ioremap_nocache(res_start, res_len); + if (!mvi->peri_regs) + goto err_out; +#endif + + res_start = pci_resource_start(pdev, 4); + res_len = pci_resource_len(pdev, 4); + if (!res_start || !res_len) + goto err_out; + + res_flag = pci_resource_flags(pdev, 4); + if (res_flag & IORESOURCE_CACHEABLE) + mvi->regs = ioremap(res_start, res_len); + else + mvi->regs = ioremap_nocache(res_start, res_len); + + if (!mvi->regs) + goto err_out; + + /* + * alloc and init our DMA areas + */ + + mvi->tx = dma_alloc_coherent(&pdev->dev, + sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, + &mvi->tx_dma, GFP_KERNEL); + if (!mvi->tx) + goto err_out; + memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ); + + mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ, + &mvi->rx_fis_dma, GFP_KERNEL); + if (!mvi->rx_fis) + goto err_out; + memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ); + + mvi->rx = dma_alloc_coherent(&pdev->dev, + sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), + &mvi->rx_dma, GFP_KERNEL); + if (!mvi->rx) + goto err_out; + memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1)); + + mvi->rx[0] = cpu_to_le32(0xfff); + mvi->rx_cons = 0xfff; + + mvi->slot = dma_alloc_coherent(&pdev->dev, + sizeof(*mvi->slot) * MVS_SLOTS, + &mvi->slot_dma, GFP_KERNEL); + if (!mvi->slot) + goto err_out; + memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS); + + for (i = 0; i < MVS_SLOTS; i++) { + struct mvs_slot_info *slot = &mvi->slot_info[i]; + + slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ, + &slot->buf_dma, GFP_KERNEL); + if (!slot->buf) + goto err_out; + memset(slot->buf, 0, MVS_SLOT_BUF_SZ); + } + + /* finally, read NVRAM to get our SAS address */ + if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8)) + goto err_out; + return mvi; + +err_out: + mvs_free(mvi); + return NULL; +} + +/* move to PCI layer or libata core? */ +static int pci_go_64(struct pci_dev *pdev) +{ + int rc; + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (rc) { + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_printk(KERN_ERR, &pdev->dev, + "64-bit DMA enable failed\n"); + return rc; + } + } + } else { + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_printk(KERN_ERR, &pdev->dev, + "32-bit DMA enable failed\n"); + return rc; + } + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_printk(KERN_ERR, &pdev->dev, + "32-bit consistent DMA enable failed\n"); + return rc; + } + } + + return rc; +} + +static int __devinit mvs_pci_init(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int rc; + struct mvs_info *mvi; + irq_handler_t irq_handler = mvs_interrupt; + + rc = pci_enable_device(pdev); + if (rc) + return rc; + + pci_set_master(pdev); + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out_disable; + + rc = pci_go_64(pdev); + if (rc) + goto err_out_regions; + + mvi = mvs_alloc(pdev, ent); + if (!mvi) { + rc = -ENOMEM; + goto err_out_regions; + } + + rc = mvs_hw_init(mvi); + if (rc) + goto err_out_mvi; + +#ifndef MVS_DISABLE_MSI + if (!pci_enable_msi(pdev)) { + u32 tmp; + void __iomem *regs = mvi->regs; + mvi->flags |= MVF_MSI; + irq_handler = mvs_msi_interrupt; + tmp = mr32(PCS); + mw32(PCS, tmp | PCS_SELF_CLEAR); + } +#endif + + rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi); + if (rc) + goto err_out_msi; + + rc = scsi_add_host(mvi->shost, &pdev->dev); + if (rc) + goto err_out_irq; + + rc = sas_register_ha(&mvi->sas); + if (rc) + goto err_out_shost; + + pci_set_drvdata(pdev, mvi); + + mvs_print_info(mvi); + + mvs_hba_interrupt_enable(mvi); + + scsi_scan_host(mvi->shost); + + return 0; + +err_out_shost: + scsi_remove_host(mvi->shost); +err_out_irq: + free_irq(pdev->irq, mvi); +err_out_msi: + if (mvi->flags |= MVF_MSI) + pci_disable_msi(pdev); +err_out_mvi: + mvs_free(mvi); +err_out_regions: + pci_release_regions(pdev); +err_out_disable: + pci_disable_device(pdev); + return rc; +} + +static void __devexit mvs_pci_remove(struct pci_dev *pdev) +{ + struct mvs_info *mvi = pci_get_drvdata(pdev); + + pci_set_drvdata(pdev, NULL); + + if (mvi) { + sas_unregister_ha(&mvi->sas); + mvs_hba_interrupt_disable(mvi); + sas_remove_host(mvi->shost); + scsi_remove_host(mvi->shost); + + free_irq(pdev->irq, mvi); + if (mvi->flags & MVF_MSI) + pci_disable_msi(pdev); + mvs_free(mvi); + pci_release_regions(pdev); + } + pci_disable_device(pdev); +} + +static struct pci_device_id __devinitdata mvs_pci_table[] = { + { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 }, + { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 }, + { + .vendor = PCI_VENDOR_ID_MARVELL, + .device = 0x6440, + .subvendor = PCI_ANY_ID, + .subdevice = 0x6480, + .class = 0, + .class_mask = 0, + .driver_data = chip_6480, + }, + { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 }, + { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 }, + + { } /* terminate list */ +}; + +static struct pci_driver mvs_pci_driver = { + .name = DRV_NAME, + .id_table = mvs_pci_table, + .probe = mvs_pci_init, + .remove = __devexit_p(mvs_pci_remove), +}; + +static int __init mvs_init(void) +{ + int rc; + + mvs_stt = sas_domain_attach_transport(&mvs_transport_ops); + if (!mvs_stt) + return -ENOMEM; + + rc = pci_register_driver(&mvs_pci_driver); + if (rc) + goto err_out; + + return 0; + +err_out: + sas_release_transport(mvs_stt); + return rc; +} + +static void __exit mvs_exit(void) +{ + pci_unregister_driver(&mvs_pci_driver); + sas_release_transport(mvs_stt); +} + +module_init(mvs_init); +module_exit(mvs_exit); + +MODULE_AUTHOR("Jeff Garzik "); +MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver"); +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, mvs_pci_table); diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index e4acebd10d1b..6a583c19c6e5 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -1,5 +1,5 @@ /* - mvsas.c - Marvell 88SE6440 SAS/SATA support + mv_sas.c - Marvell 88SE6440 SAS/SATA support Copyright 2007 Red Hat, Inc. Copyright 2008 Marvell. @@ -28,34 +28,9 @@ */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define DRV_NAME "mvsas" -#define DRV_VERSION "0.5.2" -#define _MV_DUMP 0 -#define MVS_DISABLE_NVRAM -#define MVS_DISABLE_MSI - -#define mr32(reg) readl(regs + MVS_##reg) -#define mw32(reg,val) writel((val), regs + MVS_##reg) -#define mw32_f(reg,val) do { \ - writel((val), regs + MVS_##reg); \ - readl(regs + MVS_##reg); \ - } while (0) - -#define MVS_ID_NOT_MAPPED 0x7f -#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width) +#include "mv_sas.h" +#include "mv_64xx.h" +#include "mv_chips.h" /* offset for D2H FIS in the Received FIS List Structure */ #define SATA_RECEIVED_D2H_FIS(reg_set) \ @@ -65,670 +40,70 @@ #define UNASSOC_D2H_FIS(id) \ ((void *) mvi->rx_fis + 0x100 * id) -#define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \ - for ((__mc) = (__lseq_mask), (__lseq) = 0; \ - (__mc) != 0 && __rest; \ - (++__lseq), (__mc) >>= 1) - -/* driver compile-time configuration */ -enum driver_configuration { - MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */ - MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */ - /* software requires power-of-2 - ring size */ - - MVS_SLOTS = 512, /* command slots */ - MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */ - MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */ - MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */ - MVS_OAF_SZ = 64, /* Open address frame buffer size */ - - MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */ - - MVS_QUEUE_SIZE = 30, /* Support Queue depth */ - MVS_CAN_QUEUE = MVS_SLOTS - 1, /* SCSI Queue depth */ -}; - -/* unchangeable hardware details */ -enum hardware_details { - MVS_MAX_PHYS = 8, /* max. possible phys */ - MVS_MAX_PORTS = 8, /* max. possible ports */ - MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100), -}; - -/* peripheral registers (BAR2) */ -enum peripheral_registers { - SPI_CTL = 0x10, /* EEPROM control */ - SPI_CMD = 0x14, /* EEPROM command */ - SPI_DATA = 0x18, /* EEPROM data */ -}; - -enum peripheral_register_bits { - TWSI_RDY = (1U << 7), /* EEPROM interface ready */ - TWSI_RD = (1U << 4), /* EEPROM read access */ - - SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */ -}; - -/* enhanced mode registers (BAR4) */ -enum hw_registers { - MVS_GBL_CTL = 0x04, /* global control */ - MVS_GBL_INT_STAT = 0x08, /* global irq status */ - MVS_GBL_PI = 0x0C, /* ports implemented bitmask */ - MVS_GBL_PORT_TYPE = 0xa0, /* port type */ - - MVS_CTL = 0x100, /* SAS/SATA port configuration */ - MVS_PCS = 0x104, /* SAS/SATA port control/status */ - MVS_CMD_LIST_LO = 0x108, /* cmd list addr */ - MVS_CMD_LIST_HI = 0x10C, - MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */ - MVS_RX_FIS_HI = 0x114, - - MVS_TX_CFG = 0x120, /* TX configuration */ - MVS_TX_LO = 0x124, /* TX (delivery) ring addr */ - MVS_TX_HI = 0x128, - - MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */ - MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */ - MVS_RX_CFG = 0x134, /* RX configuration */ - MVS_RX_LO = 0x138, /* RX (completion) ring addr */ - MVS_RX_HI = 0x13C, - MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */ - - MVS_INT_COAL = 0x148, /* Int coalescing config */ - MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */ - MVS_INT_STAT = 0x150, /* Central int status */ - MVS_INT_MASK = 0x154, /* Central int enable */ - MVS_INT_STAT_SRS = 0x158, /* SATA register set status */ - MVS_INT_MASK_SRS = 0x15C, - - /* ports 1-3 follow after this */ - MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */ - MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */ - MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */ - MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */ - - /* ports 1-3 follow after this */ - MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */ - MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */ - - MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */ - MVS_CMD_DATA = 0x1BC, /* Command register port (data) */ - - /* ports 1-3 follow after this */ - MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */ - MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */ - MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */ - MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */ - - /* ports 1-3 follow after this */ - MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */ - MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */ - MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */ - MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */ -}; - -enum hw_register_bits { - /* MVS_GBL_CTL */ - INT_EN = (1U << 1), /* Global int enable */ - HBA_RST = (1U << 0), /* HBA reset */ - - /* MVS_GBL_INT_STAT */ - INT_XOR = (1U << 4), /* XOR engine event */ - INT_SAS_SATA = (1U << 0), /* SAS/SATA event */ - - /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */ - SATA_TARGET = (1U << 16), /* port0 SATA target enable */ - MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */ - MODE_AUTO_DET_PORT6 = (1U << 14), - MODE_AUTO_DET_PORT5 = (1U << 13), - MODE_AUTO_DET_PORT4 = (1U << 12), - MODE_AUTO_DET_PORT3 = (1U << 11), - MODE_AUTO_DET_PORT2 = (1U << 10), - MODE_AUTO_DET_PORT1 = (1U << 9), - MODE_AUTO_DET_PORT0 = (1U << 8), - MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 | - MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 | - MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 | - MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7, - MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */ - MODE_SAS_PORT6_MASK = (1U << 6), - MODE_SAS_PORT5_MASK = (1U << 5), - MODE_SAS_PORT4_MASK = (1U << 4), - MODE_SAS_PORT3_MASK = (1U << 3), - MODE_SAS_PORT2_MASK = (1U << 2), - MODE_SAS_PORT1_MASK = (1U << 1), - MODE_SAS_PORT0_MASK = (1U << 0), - MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK | - MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK | - MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK | - MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK, - - /* SAS_MODE value may be - * dictated (in hw) by values - * of SATA_TARGET & AUTO_DET - */ - - /* MVS_TX_CFG */ - TX_EN = (1U << 16), /* Enable TX */ - TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */ - - /* MVS_RX_CFG */ - RX_EN = (1U << 16), /* Enable RX */ - RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */ - - /* MVS_INT_COAL */ - COAL_EN = (1U << 16), /* Enable int coalescing */ - - /* MVS_INT_STAT, MVS_INT_MASK */ - CINT_I2C = (1U << 31), /* I2C event */ - CINT_SW0 = (1U << 30), /* software event 0 */ - CINT_SW1 = (1U << 29), /* software event 1 */ - CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */ - CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */ - CINT_MEM = (1U << 26), /* int mem parity err */ - CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */ - CINT_SRS = (1U << 3), /* SRS event */ - CINT_CI_STOP = (1U << 1), /* cmd issue stopped */ - CINT_DONE = (1U << 0), /* cmd completion */ - - /* shl for ports 1-3 */ - CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */ - CINT_PORT = (1U << 8), /* port0 event */ - CINT_PORT_MASK_OFFSET = 8, - CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET), - - /* TX (delivery) ring bits */ - TXQ_CMD_SHIFT = 29, - TXQ_CMD_SSP = 1, /* SSP protocol */ - TXQ_CMD_SMP = 2, /* SMP protocol */ - TXQ_CMD_STP = 3, /* STP/SATA protocol */ - TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */ - TXQ_CMD_SLOT_RESET = 7, /* reset command slot */ - TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */ - TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */ - TXQ_SRS_SHIFT = 20, /* SATA register set */ - TXQ_SRS_MASK = 0x7f, - TXQ_PHY_SHIFT = 12, /* PHY bitmap */ - TXQ_PHY_MASK = 0xff, - TXQ_SLOT_MASK = 0xfff, /* slot number */ - - /* RX (completion) ring bits */ - RXQ_GOOD = (1U << 23), /* Response good */ - RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */ - RXQ_CMD_RX = (1U << 20), /* target cmd received */ - RXQ_ATTN = (1U << 19), /* attention */ - RXQ_RSP = (1U << 18), /* response frame xfer'd */ - RXQ_ERR = (1U << 17), /* err info rec xfer'd */ - RXQ_DONE = (1U << 16), /* cmd complete */ - RXQ_SLOT_MASK = 0xfff, /* slot number */ - - /* mvs_cmd_hdr bits */ - MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */ - MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */ - - /* SSP initiator only */ - MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */ - - /* SSP initiator or target */ - MCH_SSP_FR_TASK = 0x1, /* TASK frame */ - - /* SSP target only */ - MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */ - MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */ - MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */ - MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */ - - MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */ - MCH_FBURST = (1U << 11), /* first burst (SSP) */ - MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */ - MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */ - MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */ - MCH_RESET = (1U << 7), /* Reset (STP/SATA) */ - MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */ - MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */ - MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */ - MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/ - - CCTL_RST = (1U << 5), /* port logic reset */ - - /* 0(LSB first), 1(MSB first) */ - CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */ - CCTL_ENDIAN_RSP = (1U << 2), /* response frame */ - CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */ - CCTL_ENDIAN_CMD = (1U << 0), /* command table */ - - /* MVS_Px_SER_CTLSTAT (per-phy control) */ - PHY_SSP_RST = (1U << 3), /* reset SSP link layer */ - PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */ - PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */ - PHY_RST = (1U << 0), /* phy reset */ - PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8), - PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12), - PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16), - PHY_NEG_SPP_PHYS_LINK_RATE_MASK = - (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), - PHY_READY_MASK = (1U << 20), - - /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */ - PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */ - PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */ - PHYEV_AN = (1U << 18), /* SATA async notification */ - PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */ - PHYEV_SIG_FIS = (1U << 16), /* signature FIS */ - PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */ - PHYEV_IU_BIG = (1U << 11), /* IU too long err */ - PHYEV_IU_SMALL = (1U << 10), /* IU too short err */ - PHYEV_UNK_TAG = (1U << 9), /* unknown tag */ - PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */ - PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */ - PHYEV_PORT_SEL = (1U << 6), /* port selector present */ - PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */ - PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */ - PHYEV_ID_FAIL = (1U << 3), /* identify failed */ - PHYEV_ID_DONE = (1U << 2), /* identify done */ - PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */ - PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */ - - /* MVS_PCS */ - PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */ - PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */ - PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */ - PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */ - PCS_RSP_RX_EN = (1U << 7), /* raw response rx */ - PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */ - PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */ - PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */ - PCS_CMD_RST = (1U << 1), /* reset cmd issue */ - PCS_CMD_EN = (1U << 0), /* enable cmd issue */ - - /* Port n Attached Device Info */ - PORT_DEV_SSP_TRGT = (1U << 19), - PORT_DEV_SMP_TRGT = (1U << 18), - PORT_DEV_STP_TRGT = (1U << 17), - PORT_DEV_SSP_INIT = (1U << 11), - PORT_DEV_SMP_INIT = (1U << 10), - PORT_DEV_STP_INIT = (1U << 9), - PORT_PHY_ID_MASK = (0xFFU << 24), - PORT_DEV_TRGT_MASK = (0x7U << 17), - PORT_DEV_INIT_MASK = (0x7U << 9), - PORT_DEV_TYPE_MASK = (0x7U << 0), - - /* Port n PHY Status */ - PHY_RDY = (1U << 2), - PHY_DW_SYNC = (1U << 1), - PHY_OOB_DTCTD = (1U << 0), - - /* VSR */ - /* PHYMODE 6 (CDB) */ - PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */ - PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */ - PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/ - PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */ - PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */ - PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */ - PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */ - PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */ - PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */ - PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */ - PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */ - PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */ - PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */ - PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */ -}; - -enum mvs_info_flags { - MVF_MSI = (1U << 0), /* MSI is enabled */ - MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */ -}; - -enum sas_cmd_port_registers { - CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */ - CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */ - CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */ - CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */ - CMD_OOB_SPACE = 0x110, /* OOB space control register */ - CMD_OOB_BURST = 0x114, /* OOB burst control register */ - CMD_PHY_TIMER = 0x118, /* PHY timer control register */ - CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */ - CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */ - CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */ - CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */ - CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */ - CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */ - CMD_ID_TEST = 0x134, /* ID test register */ - CMD_PL_TIMER = 0x138, /* PL timer register */ - CMD_WD_TIMER = 0x13c, /* WD timer register */ - CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */ - CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */ - CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */ - CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */ - CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */ - CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */ - CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */ - CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */ - CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */ - CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */ - CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */ - CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */ - CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */ - CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */ - CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */ - CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */ - CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */ - CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */ - CMD_RESET_COUNT = 0x188, /* Reset Count */ - CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */ - CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */ - CMD_PHY_CTL = 0x194, /* PHY Control and Status */ - CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */ - CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */ - CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */ - CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */ - CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */ - CMD_HOST_CTL = 0x1AC, /* Host Control Status */ - CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */ - CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */ - CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */ - CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */ - CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */ - CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */ -}; - -/* SAS/SATA configuration port registers, aka phy registers */ -enum sas_sata_config_port_regs { - PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */ - PHYR_ADDR_LO = 0x04, /* my SAS address (low) */ - PHYR_ADDR_HI = 0x08, /* my SAS address (high) */ - PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */ - PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */ - PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */ - PHYR_SATA_CTL = 0x18, /* SATA control */ - PHYR_PHY_STAT = 0x1C, /* PHY status */ - PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */ - PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */ - PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */ - PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */ - PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */ - PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */ - PHYR_WIDE_PORT = 0x38, /* wide port participating */ - PHYR_CURRENT0 = 0x80, /* current connection info 0 */ - PHYR_CURRENT1 = 0x84, /* current connection info 1 */ - PHYR_CURRENT2 = 0x88, /* current connection info 2 */ -}; - -/* SAS/SATA Vendor Specific Port Registers */ -enum sas_sata_vsp_regs { - VSR_PHY_STAT = 0x00, /* Phy Status */ - VSR_PHY_MODE1 = 0x01, /* phy tx */ - VSR_PHY_MODE2 = 0x02, /* tx scc */ - VSR_PHY_MODE3 = 0x03, /* pll */ - VSR_PHY_MODE4 = 0x04, /* VCO */ - VSR_PHY_MODE5 = 0x05, /* Rx */ - VSR_PHY_MODE6 = 0x06, /* CDR */ - VSR_PHY_MODE7 = 0x07, /* Impedance */ - VSR_PHY_MODE8 = 0x08, /* Voltage */ - VSR_PHY_MODE9 = 0x09, /* Test */ - VSR_PHY_MODE10 = 0x0A, /* Power */ - VSR_PHY_MODE11 = 0x0B, /* Phy Mode */ - VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */ - VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */ -}; - -enum pci_cfg_registers { - PCR_PHY_CTL = 0x40, - PCR_PHY_CTL2 = 0x90, - PCR_DEV_CTRL = 0xE8, -}; - -enum pci_cfg_register_bits { - PCTL_PWR_ON = (0xFU << 24), - PCTL_OFF = (0xFU << 12), - PRD_REQ_SIZE = (0x4000), - PRD_REQ_MASK = (0x00007000), -}; - -enum nvram_layout_offsets { - NVR_SIG = 0x00, /* 0xAA, 0x55 */ - NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */ -}; - -enum chip_flavors { - chip_6320, - chip_6440, - chip_6480, -}; - -enum port_type { - PORT_TYPE_SAS = (1L << 1), - PORT_TYPE_SATA = (1L << 0), -}; - -/* Command Table Format */ -enum ct_format { - /* SSP */ - SSP_F_H = 0x00, - SSP_F_IU = 0x18, - SSP_F_MAX = 0x4D, - /* STP */ - STP_CMD_FIS = 0x00, - STP_ATAPI_CMD = 0x40, - STP_F_MAX = 0x10, - /* SMP */ - SMP_F_T = 0x00, - SMP_F_DEP = 0x01, - SMP_F_MAX = 0x101, -}; - -enum status_buffer { - SB_EIR_OFF = 0x00, /* Error Information Record */ - SB_RFB_OFF = 0x08, /* Response Frame Buffer */ - SB_RFB_MAX = 0x400, /* RFB size*/ -}; - -enum error_info_rec { - CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */ - CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */ - RSP_OVER = (1U << 29), /* rsp buffer overflow */ - RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */ - UNK_FIS = (1U << 27), /* unknown FIS */ - DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */ - SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */ - TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */ - R_ERR = (1U << 23), /* SATA returned R_ERR prim */ - RD_OFS = (1U << 20), /* Read DATA frame invalid offset */ - XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */ - UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */ - DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */ - INTERLOCK = (1U << 15), /* interlock error */ - NAK = (1U << 14), /* NAK rx'd */ - ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */ - CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */ - OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */ - PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */ - NO_DEST = (1U << 9), /* I_T nexus lost, no destination */ - STP_RES_BSY = (1U << 8), /* STP resources busy */ - BREAK = (1U << 7), /* break received */ - BAD_DEST = (1U << 6), /* bad destination */ - BAD_PROTO = (1U << 5), /* protocol not supported */ - BAD_RATE = (1U << 4), /* cxn rate not supported */ - WRONG_DEST = (1U << 3), /* wrong destination error */ - CREDIT_TO = (1U << 2), /* credit timeout */ - WDOG_TO = (1U << 1), /* watchdog timeout */ - BUF_PAR = (1U << 0), /* buffer parity error */ -}; - -enum error_info_rec_2 { - SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */ - GRD_CHK_ERR = (1U << 14), /* Guard Check Error */ - APP_CHK_ERR = (1U << 13), /* Application Check error */ - REF_CHK_ERR = (1U << 12), /* Reference Check Error */ - USR_BLK_NM = (1U << 0), /* User Block Number */ -}; - -struct mvs_chip_info { - u32 n_phy; - u32 srs_sz; - u32 slot_width; -}; - -struct mvs_err_info { - __le32 flags; - __le32 flags2; -}; - -struct mvs_prd { - __le64 addr; /* 64-bit buffer address */ - __le32 reserved; - __le32 len; /* 16-bit length */ -}; - -struct mvs_cmd_hdr { - __le32 flags; /* PRD tbl len; SAS, SATA ctl */ - __le32 lens; /* cmd, max resp frame len */ - __le32 tags; /* targ port xfer tag; tag */ - __le32 data_len; /* data xfer len */ - __le64 cmd_tbl; /* command table address */ - __le64 open_frame; /* open addr frame address */ - __le64 status_buf; /* status buffer address */ - __le64 prd_tbl; /* PRD tbl address */ - __le32 reserved[4]; -}; - -struct mvs_port { - struct asd_sas_port sas_port; - u8 port_attached; - u8 taskfileset; - u8 wide_port_phymap; - struct list_head list; -}; - -struct mvs_phy { - struct mvs_port *port; - struct asd_sas_phy sas_phy; - struct sas_identify identify; - struct scsi_device *sdev; - u64 dev_sas_addr; - u64 att_dev_sas_addr; - u32 att_dev_info; - u32 dev_info; - u32 phy_type; - u32 phy_status; - u32 irq_status; - u32 frame_rcvd_size; - u8 frame_rcvd[32]; - u8 phy_attached; - enum sas_linkrate minimum_linkrate; - enum sas_linkrate maximum_linkrate; -}; - -struct mvs_slot_info { - struct list_head list; - struct sas_task *task; - u32 n_elem; - u32 tx; - - /* DMA buffer for storing cmd tbl, open addr frame, status buffer, - * and PRD table - */ - void *buf; - dma_addr_t buf_dma; -#if _MV_DUMP - u32 cmd_size; -#endif - - void *response; - struct mvs_port *port; +struct mvs_task_exec_info { + struct sas_task *task; + struct mvs_cmd_hdr *hdr; + struct mvs_port *port; + u32 tag; + int n_elem; }; -struct mvs_info { - unsigned long flags; - - spinlock_t lock; /* host-wide lock */ - struct pci_dev *pdev; /* our device */ - void __iomem *regs; /* enhanced mode registers */ - void __iomem *peri_regs; /* peripheral registers */ - - u8 sas_addr[SAS_ADDR_SIZE]; - struct sas_ha_struct sas; /* SCSI/SAS glue */ - struct Scsi_Host *shost; - - __le32 *tx; /* TX (delivery) DMA ring */ - dma_addr_t tx_dma; - u32 tx_prod; /* cached next-producer idx */ - - __le32 *rx; /* RX (completion) DMA ring */ - dma_addr_t rx_dma; - u32 rx_cons; /* RX consumer idx */ - - __le32 *rx_fis; /* RX'd FIS area */ - dma_addr_t rx_fis_dma; - - struct mvs_cmd_hdr *slot; /* DMA command header slots */ - dma_addr_t slot_dma; - - const struct mvs_chip_info *chip; +static void mvs_release_task(struct mvs_info *mvi, int phy_no); +static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i); +static void mvs_update_phyinfo(struct mvs_info *mvi, int i, + int get_st); +static int mvs_int_rx(struct mvs_info *mvi, bool self_clear); +static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task, + u32 slot_idx); - u8 tags[MVS_SLOTS]; - struct mvs_slot_info slot_info[MVS_SLOTS]; - /* further per-slot information */ - struct mvs_phy phy[MVS_MAX_PHYS]; - struct mvs_port port[MVS_MAX_PHYS]; -#ifdef MVS_USE_TASKLET - struct tasklet_struct tasklet; -#endif -}; +static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) +{ + if (task->lldd_task) { + struct mvs_slot_info *slot; + slot = (struct mvs_slot_info *) task->lldd_task; + *tag = slot - mvi->slot_info; + return 1; + } + return 0; +} -static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, - void *funcdata); -static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port); -static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val); -static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port); -static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val); -static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val); -static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port); +static void mvs_tag_clear(struct mvs_info *mvi, u32 tag) +{ + void *bitmap = (void *) &mvi->tags; + clear_bit(tag, bitmap); +} -static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i); -static void mvs_detect_porttype(struct mvs_info *mvi, int i); -static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st); -static void mvs_release_task(struct mvs_info *mvi, int phy_no); +static void mvs_tag_free(struct mvs_info *mvi, u32 tag) +{ + mvs_tag_clear(mvi, tag); +} -static int mvs_scan_finished(struct Scsi_Host *, unsigned long); -static void mvs_scan_start(struct Scsi_Host *); -static int mvs_slave_configure(struct scsi_device *sdev); +static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) +{ + void *bitmap = (void *) &mvi->tags; + set_bit(tag, bitmap); +} -static struct scsi_transport_template *mvs_stt; +static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) +{ + unsigned int index, tag; + void *bitmap = (void *) &mvi->tags; -static const struct mvs_chip_info mvs_chips[] = { - [chip_6320] = { 2, 16, 9 }, - [chip_6440] = { 4, 16, 9 }, - [chip_6480] = { 8, 32, 10 }, -}; + index = find_first_zero_bit(bitmap, MVS_SLOTS); + tag = index; + if (tag >= MVS_SLOTS) + return -SAS_QUEUE_FULL; + mvs_tag_set(mvi, tag); + *tag_out = tag; + return 0; +} -static struct scsi_host_template mvs_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .queuecommand = sas_queuecommand, - .target_alloc = sas_target_alloc, - .slave_configure = mvs_slave_configure, - .slave_destroy = sas_slave_destroy, - .scan_finished = mvs_scan_finished, - .scan_start = mvs_scan_start, - .change_queue_depth = sas_change_queue_depth, - .change_queue_type = sas_change_queue_type, - .bios_param = sas_bios_param, - .can_queue = 1, - .cmd_per_lun = 1, - .this_id = -1, - .sg_tablesize = SG_ALL, - .max_sectors = SCSI_DEFAULT_MAX_SECTORS, - .use_clustering = ENABLE_CLUSTERING, - .eh_device_reset_handler = sas_eh_device_reset_handler, - .eh_bus_reset_handler = sas_eh_bus_reset_handler, - .slave_alloc = sas_slave_alloc, - .target_destroy = sas_target_destroy, - .ioctl = sas_ioctl, -}; +void mvs_tag_init(struct mvs_info *mvi) +{ + int i; + for (i = 0; i < MVS_SLOTS; ++i) + mvs_tag_clear(mvi, i); +} static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) { @@ -848,959 +223,270 @@ static void mvs_hba_cq_dump(struct mvs_info *mvi) #endif } -static void mvs_hba_interrupt_enable(struct mvs_info *mvi) +/* FIXME: locking? */ +int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, void *funcdata) { - void __iomem *regs = mvi->regs; + struct mvs_info *mvi = sas_phy->ha->lldd_ha; + int rc = 0, phy_id = sas_phy->id; u32 tmp; - tmp = mr32(GBL_CTL); - - mw32(GBL_CTL, tmp | INT_EN); -} + tmp = mvs_read_phy_ctl(mvi, phy_id); -static void mvs_hba_interrupt_disable(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs; - u32 tmp; + switch (func) { + case PHY_FUNC_SET_LINK_RATE:{ + struct sas_phy_linkrates *rates = funcdata; + u32 lrmin = 0, lrmax = 0; - tmp = mr32(GBL_CTL); + lrmin = (rates->minimum_linkrate << 8); + lrmax = (rates->maximum_linkrate << 12); - mw32(GBL_CTL, tmp & ~INT_EN); -} + if (lrmin) { + tmp &= ~(0xf << 8); + tmp |= lrmin; + } + if (lrmax) { + tmp &= ~(0xf << 12); + tmp |= lrmax; + } + mvs_write_phy_ctl(mvi, phy_id, tmp); + break; + } -static int mvs_int_rx(struct mvs_info *mvi, bool self_clear); + case PHY_FUNC_HARD_RESET: + if (tmp & PHY_RST_HARD) + break; + mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD); + break; -/* move to PCI layer or libata core? */ -static int pci_go_64(struct pci_dev *pdev) -{ - int rc; + case PHY_FUNC_LINK_RESET: + mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST); + break; - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (rc) { - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "64-bit DMA enable failed\n"); - return rc; - } - } - } else { - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "32-bit DMA enable failed\n"); - return rc; - } - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "32-bit consistent DMA enable failed\n"); - return rc; - } + case PHY_FUNC_DISABLE: + case PHY_FUNC_RELEASE_SPINUP_HOLD: + default: + rc = -EOPNOTSUPP; } return rc; } -static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) +static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) { - if (task->lldd_task) { - struct mvs_slot_info *slot; - slot = (struct mvs_slot_info *) task->lldd_task; - *tag = slot - mvi->slot_info; - return 1; + struct mvs_phy *phy = &mvi->phy[i]; + struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i]; + + if (!phy->phy_attached) + return; + + if (sas_phy->phy) { + struct sas_phy *sphy = sas_phy->phy; + + sphy->negotiated_linkrate = sas_phy->linkrate; + sphy->minimum_linkrate = phy->minimum_linkrate; + sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; + sphy->maximum_linkrate = phy->maximum_linkrate; + sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS; } - return 0; -} -static void mvs_tag_clear(struct mvs_info *mvi, u32 tag) -{ - void *bitmap = (void *) &mvi->tags; - clear_bit(tag, bitmap); -} + if (phy->phy_type & PORT_TYPE_SAS) { + struct sas_identify_frame *id; -static void mvs_tag_free(struct mvs_info *mvi, u32 tag) -{ - mvs_tag_clear(mvi, tag); + id = (struct sas_identify_frame *)phy->frame_rcvd; + id->dev_type = phy->identify.device_type; + id->initiator_bits = SAS_PROTOCOL_ALL; + id->target_bits = phy->identify.target_port_protocols; + } else if (phy->phy_type & PORT_TYPE_SATA) { + /* TODO */ + } + mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size; + mvi->sas.notify_port_event(mvi->sas.sas_phy[i], + PORTE_BYTES_DMAED); } -static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) +int mvs_slave_configure(struct scsi_device *sdev) { - void *bitmap = (void *) &mvi->tags; - set_bit(tag, bitmap); -} + struct domain_device *dev = sdev_to_domain_dev(sdev); + int ret = sas_slave_configure(sdev); -static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) -{ - unsigned int index, tag; - void *bitmap = (void *) &mvi->tags; + if (ret) + return ret; - index = find_first_zero_bit(bitmap, MVS_SLOTS); - tag = index; - if (tag >= MVS_SLOTS) - return -SAS_QUEUE_FULL; - mvs_tag_set(mvi, tag); - *tag_out = tag; + if (dev_is_sata(dev)) { + /* struct ata_port *ap = dev->sata_dev.ap; */ + /* struct ata_device *adev = ap->link.device; */ + + /* clamp at no NCQ for the time being */ + /* adev->flags |= ATA_DFLAG_NCQ_OFF; */ + scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1); + } return 0; } -static void mvs_tag_init(struct mvs_info *mvi) +void mvs_scan_start(struct Scsi_Host *shost) { int i; - for (i = 0; i < MVS_SLOTS; ++i) - mvs_tag_clear(mvi, i); + struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha; + + for (i = 0; i < mvi->chip->n_phy; ++i) { + mvs_bytes_dmaed(mvi, i); + } } -#ifndef MVS_DISABLE_NVRAM -static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data) +int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time) { - int timeout = 1000; - - if (addr & ~SPI_ADDR_MASK) - return -EINVAL; - - writel(addr, regs + SPI_CMD); - writel(TWSI_RD, regs + SPI_CTL); - - while (timeout-- > 0) { - if (readl(regs + SPI_CTL) & TWSI_RDY) { - *data = readl(regs + SPI_DATA); - return 0; - } - - udelay(10); - } - - return -EBUSY; + /* give the phy enabling interrupt event time to come in (1s + * is empirically about all it takes) */ + if (time < HZ) + return 0; + /* Wait for discovery to finish */ + scsi_flush_work(shost); + return 1; } -static int mvs_eep_read_buf(void __iomem *regs, u32 addr, - void *buf, u32 buflen) +static int mvs_task_prep_smp(struct mvs_info *mvi, + struct mvs_task_exec_info *tei) { - u32 addr_end, tmp_addr, i, j; - u32 tmp = 0; - int rc; - u8 *tmp8, *buf8 = buf; - - addr_end = addr + buflen; - tmp_addr = ALIGN(addr, 4); - if (addr > 0xff) - return -EINVAL; - - j = addr & 0x3; - if (j) { - rc = mvs_eep_read(regs, tmp_addr, &tmp); - if (rc) - return rc; + int elem, rc, i; + struct sas_task *task = tei->task; + struct mvs_cmd_hdr *hdr = tei->hdr; + struct scatterlist *sg_req, *sg_resp; + u32 req_len, resp_len, tag = tei->tag; + void *buf_tmp; + u8 *buf_oaf; + dma_addr_t buf_tmp_dma; + struct mvs_prd *buf_prd; + struct scatterlist *sg; + struct mvs_slot_info *slot = &mvi->slot_info[tag]; + struct asd_sas_port *sas_port = task->dev->port; + u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); +#if _MV_DUMP + u8 *buf_cmd; + void *from; +#endif + /* + * DMA-map SMP request, response buffers + */ + sg_req = &task->smp_task.smp_req; + elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE); + if (!elem) + return -ENOMEM; + req_len = sg_dma_len(sg_req); - tmp8 = (u8 *)&tmp; - for (i = j; i < 4; i++) - *buf8++ = tmp8[i]; + sg_resp = &task->smp_task.smp_resp; + elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE); + if (!elem) { + rc = -ENOMEM; + goto err_out; + } + resp_len = sg_dma_len(sg_resp); - tmp_addr += 4; + /* must be in dwords */ + if ((req_len & 0x3) || (resp_len & 0x3)) { + rc = -EINVAL; + goto err_out_2; } - for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) { - rc = mvs_eep_read(regs, tmp_addr, &tmp); - if (rc) - return rc; + /* + * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs + */ - memcpy(buf8, &tmp, 4); - buf8 += 4; - } + /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ + buf_tmp = slot->buf; + buf_tmp_dma = slot->buf_dma; - if (tmp_addr < addr_end) { - rc = mvs_eep_read(regs, tmp_addr, &tmp); - if (rc) - return rc; +#if _MV_DUMP + buf_cmd = buf_tmp; + hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); + buf_tmp += req_len; + buf_tmp_dma += req_len; + slot->cmd_size = req_len; +#else + hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req)); +#endif - tmp8 = (u8 *)&tmp; - j = addr_end - tmp_addr; - for (i = 0; i < j; i++) - *buf8++ = tmp8[i]; + /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ + buf_oaf = buf_tmp; + hdr->open_frame = cpu_to_le64(buf_tmp_dma); - tmp_addr += 4; - } + buf_tmp += MVS_OAF_SZ; + buf_tmp_dma += MVS_OAF_SZ; - return 0; -} -#endif + /* region 3: PRD table ********************************************* */ + buf_prd = buf_tmp; + if (tei->n_elem) + hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); + else + hdr->prd_tbl = 0; -static int mvs_nvram_read(struct mvs_info *mvi, u32 addr, - void *buf, u32 buflen) -{ -#ifndef MVS_DISABLE_NVRAM - void __iomem *regs = mvi->regs; - int rc, i; - u32 sum; - u8 hdr[2], *tmp; - const char *msg; + i = sizeof(struct mvs_prd) * tei->n_elem; + buf_tmp += i; + buf_tmp_dma += i; - rc = mvs_eep_read_buf(regs, addr, &hdr, 2); - if (rc) { - msg = "nvram hdr read failed"; - goto err_out; - } - rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen); - if (rc) { - msg = "nvram read failed"; - goto err_out; - } + /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ + slot->response = buf_tmp; + hdr->status_buf = cpu_to_le64(buf_tmp_dma); - if (hdr[0] != 0x5A) { - /* entry id */ - msg = "invalid nvram entry id"; - rc = -ENOENT; - goto err_out; - } + /* + * Fill in TX ring and command slot header + */ + slot->tx = mvi->tx_prod; + mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) | + TXQ_MODE_I | tag | + (sas_port->phy_mask << TXQ_PHY_SHIFT)); - tmp = buf; - sum = ((u32)hdr[0]) + ((u32)hdr[1]); - for (i = 0; i < buflen; i++) - sum += ((u32)tmp[i]); + hdr->flags |= flags; + hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4)); + hdr->tags = cpu_to_le32(tag); + hdr->data_len = 0; - if (sum) { - msg = "nvram checksum failure"; - rc = -EILSEQ; - goto err_out; + /* generate open address frame hdr (first 12 bytes) */ + buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */ + buf_oaf[1] = task->dev->linkrate & 0xf; + *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */ + memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); + + /* fill in PRD (scatter/gather) table, if any */ + for_each_sg(task->scatter, sg, tei->n_elem, i) { + buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); + buf_prd->len = cpu_to_le32(sg_dma_len(sg)); + buf_prd++; } +#if _MV_DUMP + /* copy cmd table */ + from = kmap_atomic(sg_page(sg_req), KM_IRQ0); + memcpy(buf_cmd, from + sg_req->offset, req_len); + kunmap_atomic(from, KM_IRQ0); +#endif return 0; +err_out_2: + pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1, + PCI_DMA_FROMDEVICE); err_out: - dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg); + pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1, + PCI_DMA_TODEVICE); return rc; -#else - /* FIXME , For SAS target mode */ - memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8); - return 0; -#endif } -static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) +static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag) { - struct mvs_phy *phy = &mvi->phy[i]; - struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i]; - - if (!phy->phy_attached) - return; - - if (sas_phy->phy) { - struct sas_phy *sphy = sas_phy->phy; - - sphy->negotiated_linkrate = sas_phy->linkrate; - sphy->minimum_linkrate = phy->minimum_linkrate; - sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; - sphy->maximum_linkrate = phy->maximum_linkrate; - sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS; - } - - if (phy->phy_type & PORT_TYPE_SAS) { - struct sas_identify_frame *id; + struct ata_queued_cmd *qc = task->uldd_task; - id = (struct sas_identify_frame *)phy->frame_rcvd; - id->dev_type = phy->identify.device_type; - id->initiator_bits = SAS_PROTOCOL_ALL; - id->target_bits = phy->identify.target_port_protocols; - } else if (phy->phy_type & PORT_TYPE_SATA) { - /* TODO */ + if (qc) { + if (qc->tf.command == ATA_CMD_FPDMA_WRITE || + qc->tf.command == ATA_CMD_FPDMA_READ) { + *tag = qc->tag; + return 1; + } } - mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size; - mvi->sas.notify_port_event(mvi->sas.sas_phy[i], - PORTE_BYTES_DMAED); -} -static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time) -{ - /* give the phy enabling interrupt event time to come in (1s - * is empirically about all it takes) */ - if (time < HZ) - return 0; - /* Wait for discovery to finish */ - scsi_flush_work(shost); - return 1; + return 0; } -static void mvs_scan_start(struct Scsi_Host *shost) -{ - int i; - struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha; - - for (i = 0; i < mvi->chip->n_phy; ++i) { - mvs_bytes_dmaed(mvi, i); - } -} - -static int mvs_slave_configure(struct scsi_device *sdev) -{ - struct domain_device *dev = sdev_to_domain_dev(sdev); - int ret = sas_slave_configure(sdev); - - if (ret) - return ret; - - if (dev_is_sata(dev)) { - /* struct ata_port *ap = dev->sata_dev.ap; */ - /* struct ata_device *adev = ap->link.device; */ - - /* clamp at no NCQ for the time being */ - /* adev->flags |= ATA_DFLAG_NCQ_OFF; */ - scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1); - } - return 0; -} - -static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) -{ - struct pci_dev *pdev = mvi->pdev; - struct sas_ha_struct *sas_ha = &mvi->sas; - struct mvs_phy *phy = &mvi->phy[phy_no]; - struct asd_sas_phy *sas_phy = &phy->sas_phy; - - phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no); - /* - * events is port event now , - * we need check the interrupt status which belongs to per port. - */ - dev_printk(KERN_DEBUG, &pdev->dev, - "Port %d Event = %X\n", - phy_no, phy->irq_status); - - if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) { - mvs_release_task(mvi, phy_no); - if (!mvs_is_phy_ready(mvi, phy_no)) { - sas_phy_disconnected(sas_phy); - sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); - dev_printk(KERN_INFO, &pdev->dev, - "Port %d Unplug Notice\n", phy_no); - - } else - mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL); - } - if (!(phy->irq_status & PHYEV_DEC_ERR)) { - if (phy->irq_status & PHYEV_COMWAKE) { - u32 tmp = mvs_read_port_irq_mask(mvi, phy_no); - mvs_write_port_irq_mask(mvi, phy_no, - tmp | PHYEV_SIG_FIS); - } - if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { - phy->phy_status = mvs_is_phy_ready(mvi, phy_no); - if (phy->phy_status) { - mvs_detect_porttype(mvi, phy_no); - - if (phy->phy_type & PORT_TYPE_SATA) { - u32 tmp = mvs_read_port_irq_mask(mvi, - phy_no); - tmp &= ~PHYEV_SIG_FIS; - mvs_write_port_irq_mask(mvi, - phy_no, tmp); - } - - mvs_update_phyinfo(mvi, phy_no, 0); - sas_ha->notify_phy_event(sas_phy, - PHYE_OOB_DONE); - mvs_bytes_dmaed(mvi, phy_no); - } else { - dev_printk(KERN_DEBUG, &pdev->dev, - "plugin interrupt but phy is gone\n"); - mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, - NULL); - } - } else if (phy->irq_status & PHYEV_BROAD_CH) { - mvs_release_task(mvi, phy_no); - sas_ha->notify_port_event(sas_phy, - PORTE_BROADCAST_RCVD); - } - } - mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status); -} - -static void mvs_int_sata(struct mvs_info *mvi) -{ - u32 tmp; - void __iomem *regs = mvi->regs; - tmp = mr32(INT_STAT_SRS); - mw32(INT_STAT_SRS, tmp & 0xFFFF); -} - -static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task, - u32 slot_idx) -{ - void __iomem *regs = mvi->regs; - struct domain_device *dev = task->dev; - struct asd_sas_port *sas_port = dev->port; - struct mvs_port *port = mvi->slot_info[slot_idx].port; - u32 reg_set, phy_mask; - - if (!sas_protocol_ata(task->task_proto)) { - reg_set = 0; - phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap : - sas_port->phy_mask; - } else { - reg_set = port->taskfileset; - phy_mask = sas_port->phy_mask; - } - mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx | - (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) | - (phy_mask << TXQ_PHY_SHIFT) | - (reg_set << TXQ_SRS_SHIFT)); - - mw32(TX_PROD_IDX, mvi->tx_prod); - mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); -} - -static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, - u32 slot_idx, int err) -{ - struct mvs_port *port = mvi->slot_info[slot_idx].port; - struct task_status_struct *tstat = &task->task_status; - struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; - int stat = SAM_GOOD; - - resp->frame_len = sizeof(struct dev_to_host_fis); - memcpy(&resp->ending_fis[0], - SATA_RECEIVED_D2H_FIS(port->taskfileset), - sizeof(struct dev_to_host_fis)); - tstat->buf_valid_size = sizeof(*resp); - if (unlikely(err)) - stat = SAS_PROTO_RESPONSE; - return stat; -} - -static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) -{ - u32 slot_idx = rx_desc & RXQ_SLOT_MASK; - mvs_tag_clear(mvi, slot_idx); -} - -static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, - struct mvs_slot_info *slot, u32 slot_idx) -{ - if (!sas_protocol_ata(task->task_proto)) - if (slot->n_elem) - pci_unmap_sg(mvi->pdev, task->scatter, - slot->n_elem, task->data_dir); - - switch (task->task_proto) { - case SAS_PROTOCOL_SMP: - pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1, - PCI_DMA_FROMDEVICE); - pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1, - PCI_DMA_TODEVICE); - break; - - case SAS_PROTOCOL_SATA: - case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SSP: - default: - /* do nothing */ - break; - } - list_del(&slot->list); - task->lldd_task = NULL; - slot->task = NULL; - slot->port = NULL; -} - -static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, - u32 slot_idx) -{ - struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; - u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response)); - u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4)); - int stat = SAM_CHECK_COND; - - if (err_dw1 & SLOT_BSY_ERR) { - stat = SAS_QUEUE_FULL; - mvs_slot_reset(mvi, task, slot_idx); - } - switch (task->task_proto) { - case SAS_PROTOCOL_SSP: - break; - case SAS_PROTOCOL_SMP: - break; - case SAS_PROTOCOL_SATA: - case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: - if (err_dw0 & TFILE_ERR) - stat = mvs_sata_done(mvi, task, slot_idx, 1); - break; - default: - break; - } - - mvs_hexdump(16, (u8 *) slot->response, 0); - return stat; -} - -static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) -{ - u32 slot_idx = rx_desc & RXQ_SLOT_MASK; - struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; - struct sas_task *task = slot->task; - struct task_status_struct *tstat; - struct mvs_port *port; - bool aborted; - void *to; - - if (unlikely(!task || !task->lldd_task)) - return -1; - - mvs_hba_cq_dump(mvi); - - spin_lock(&task->task_state_lock); - aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; - if (!aborted) { - task->task_state_flags &= - ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); - task->task_state_flags |= SAS_TASK_STATE_DONE; - } - spin_unlock(&task->task_state_lock); - - if (aborted) { - mvs_slot_task_free(mvi, task, slot, slot_idx); - mvs_slot_free(mvi, rx_desc); - return -1; - } - - port = slot->port; - tstat = &task->task_status; - memset(tstat, 0, sizeof(*tstat)); - tstat->resp = SAS_TASK_COMPLETE; - - if (unlikely(!port->port_attached || flags)) { - mvs_slot_err(mvi, task, slot_idx); - if (!sas_protocol_ata(task->task_proto)) - tstat->stat = SAS_PHY_DOWN; - goto out; - } - - /* error info record present */ - if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { - tstat->stat = mvs_slot_err(mvi, task, slot_idx); - goto out; - } - - switch (task->task_proto) { - case SAS_PROTOCOL_SSP: - /* hw says status == 0, datapres == 0 */ - if (rx_desc & RXQ_GOOD) { - tstat->stat = SAM_GOOD; - tstat->resp = SAS_TASK_COMPLETE; - } - /* response frame present */ - else if (rx_desc & RXQ_RSP) { - struct ssp_response_iu *iu = - slot->response + sizeof(struct mvs_err_info); - sas_ssp_task_response(&mvi->pdev->dev, task, iu); - } - - /* should never happen? */ - else - tstat->stat = SAM_CHECK_COND; - break; - - case SAS_PROTOCOL_SMP: { - struct scatterlist *sg_resp = &task->smp_task.smp_resp; - tstat->stat = SAM_GOOD; - to = kmap_atomic(sg_page(sg_resp), KM_IRQ0); - memcpy(to + sg_resp->offset, - slot->response + sizeof(struct mvs_err_info), - sg_dma_len(sg_resp)); - kunmap_atomic(to, KM_IRQ0); - break; - } - - case SAS_PROTOCOL_SATA: - case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { - tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0); - break; - } - - default: - tstat->stat = SAM_CHECK_COND; - break; - } - -out: - mvs_slot_task_free(mvi, task, slot, slot_idx); - if (unlikely(tstat->stat != SAS_QUEUE_FULL)) - mvs_slot_free(mvi, rx_desc); - - spin_unlock(&mvi->lock); - task->task_done(task); - spin_lock(&mvi->lock); - return tstat->stat; -} - -static void mvs_release_task(struct mvs_info *mvi, int phy_no) -{ - struct list_head *pos, *n; - struct mvs_slot_info *slot; - struct mvs_phy *phy = &mvi->phy[phy_no]; - struct mvs_port *port = phy->port; - u32 rx_desc; - - if (!port) - return; - - list_for_each_safe(pos, n, &port->list) { - slot = container_of(pos, struct mvs_slot_info, list); - rx_desc = (u32) (slot - mvi->slot_info); - mvs_slot_complete(mvi, rx_desc, 1); - } -} - -static void mvs_int_full(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs; - u32 tmp, stat; - int i; - - stat = mr32(INT_STAT); - - mvs_int_rx(mvi, false); - - for (i = 0; i < MVS_MAX_PORTS; i++) { - tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED); - if (tmp) - mvs_int_port(mvi, i, tmp); - } - - if (stat & CINT_SRS) - mvs_int_sata(mvi); - - mw32(INT_STAT, stat); -} - -static int mvs_int_rx(struct mvs_info *mvi, bool self_clear) -{ - void __iomem *regs = mvi->regs; - u32 rx_prod_idx, rx_desc; - bool attn = false; - struct pci_dev *pdev = mvi->pdev; - - /* the first dword in the RX ring is special: it contains - * a mirror of the hardware's RX producer index, so that - * we don't have to stall the CPU reading that register. - * The actual RX ring is offset by one dword, due to this. - */ - rx_prod_idx = mvi->rx_cons; - mvi->rx_cons = le32_to_cpu(mvi->rx[0]); - if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */ - return 0; - - /* The CMPL_Q may come late, read from register and try again - * note: if coalescing is enabled, - * it will need to read from register every time for sure - */ - if (mvi->rx_cons == rx_prod_idx) - mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK; - - if (mvi->rx_cons == rx_prod_idx) - return 0; - - while (mvi->rx_cons != rx_prod_idx) { - - /* increment our internal RX consumer pointer */ - rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); - - rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); - - if (likely(rx_desc & RXQ_DONE)) - mvs_slot_complete(mvi, rx_desc, 0); - if (rx_desc & RXQ_ATTN) { - attn = true; - dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n", - rx_desc); - } else if (rx_desc & RXQ_ERR) { - if (!(rx_desc & RXQ_DONE)) - mvs_slot_complete(mvi, rx_desc, 0); - dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n", - rx_desc); - } else if (rx_desc & RXQ_SLOT_RESET) { - dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n", - rx_desc); - mvs_slot_free(mvi, rx_desc); - } - } - - if (attn && self_clear) - mvs_int_full(mvi); - - return 0; -} - -#ifdef MVS_USE_TASKLET -static void mvs_tasklet(unsigned long data) -{ - struct mvs_info *mvi = (struct mvs_info *) data; - unsigned long flags; - - spin_lock_irqsave(&mvi->lock, flags); - -#ifdef MVS_DISABLE_MSI - mvs_int_full(mvi); -#else - mvs_int_rx(mvi, true); -#endif - spin_unlock_irqrestore(&mvi->lock, flags); -} -#endif - -static irqreturn_t mvs_interrupt(int irq, void *opaque) -{ - struct mvs_info *mvi = opaque; - void __iomem *regs = mvi->regs; - u32 stat; - - stat = mr32(GBL_INT_STAT); - - if (stat == 0 || stat == 0xffffffff) - return IRQ_NONE; - - /* clear CMD_CMPLT ASAP */ - mw32_f(INT_STAT, CINT_DONE); - -#ifndef MVS_USE_TASKLET - spin_lock(&mvi->lock); - - mvs_int_full(mvi); - - spin_unlock(&mvi->lock); -#else - tasklet_schedule(&mvi->tasklet); -#endif - return IRQ_HANDLED; -} - -#ifndef MVS_DISABLE_MSI -static irqreturn_t mvs_msi_interrupt(int irq, void *opaque) -{ - struct mvs_info *mvi = opaque; - -#ifndef MVS_USE_TASKLET - spin_lock(&mvi->lock); - - mvs_int_rx(mvi, true); - - spin_unlock(&mvi->lock); -#else - tasklet_schedule(&mvi->tasklet); -#endif - return IRQ_HANDLED; -} -#endif - -struct mvs_task_exec_info { - struct sas_task *task; - struct mvs_cmd_hdr *hdr; - struct mvs_port *port; - u32 tag; - int n_elem; -}; - -static int mvs_task_prep_smp(struct mvs_info *mvi, - struct mvs_task_exec_info *tei) -{ - int elem, rc, i; - struct sas_task *task = tei->task; - struct mvs_cmd_hdr *hdr = tei->hdr; - struct scatterlist *sg_req, *sg_resp; - u32 req_len, resp_len, tag = tei->tag; - void *buf_tmp; - u8 *buf_oaf; - dma_addr_t buf_tmp_dma; - struct mvs_prd *buf_prd; - struct scatterlist *sg; - struct mvs_slot_info *slot = &mvi->slot_info[tag]; - struct asd_sas_port *sas_port = task->dev->port; - u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); -#if _MV_DUMP - u8 *buf_cmd; - void *from; -#endif - /* - * DMA-map SMP request, response buffers - */ - sg_req = &task->smp_task.smp_req; - elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE); - if (!elem) - return -ENOMEM; - req_len = sg_dma_len(sg_req); - - sg_resp = &task->smp_task.smp_resp; - elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE); - if (!elem) { - rc = -ENOMEM; - goto err_out; - } - resp_len = sg_dma_len(sg_resp); - - /* must be in dwords */ - if ((req_len & 0x3) || (resp_len & 0x3)) { - rc = -EINVAL; - goto err_out_2; - } - - /* - * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs - */ - - /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ - buf_tmp = slot->buf; - buf_tmp_dma = slot->buf_dma; - -#if _MV_DUMP - buf_cmd = buf_tmp; - hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); - buf_tmp += req_len; - buf_tmp_dma += req_len; - slot->cmd_size = req_len; -#else - hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req)); -#endif - - /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ - buf_oaf = buf_tmp; - hdr->open_frame = cpu_to_le64(buf_tmp_dma); - - buf_tmp += MVS_OAF_SZ; - buf_tmp_dma += MVS_OAF_SZ; - - /* region 3: PRD table ********************************************* */ - buf_prd = buf_tmp; - if (tei->n_elem) - hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); - else - hdr->prd_tbl = 0; - - i = sizeof(struct mvs_prd) * tei->n_elem; - buf_tmp += i; - buf_tmp_dma += i; - - /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ - slot->response = buf_tmp; - hdr->status_buf = cpu_to_le64(buf_tmp_dma); - - /* - * Fill in TX ring and command slot header - */ - slot->tx = mvi->tx_prod; - mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) | - TXQ_MODE_I | tag | - (sas_port->phy_mask << TXQ_PHY_SHIFT)); - - hdr->flags |= flags; - hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4)); - hdr->tags = cpu_to_le32(tag); - hdr->data_len = 0; - - /* generate open address frame hdr (first 12 bytes) */ - buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */ - buf_oaf[1] = task->dev->linkrate & 0xf; - *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */ - memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); - - /* fill in PRD (scatter/gather) table, if any */ - for_each_sg(task->scatter, sg, tei->n_elem, i) { - buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); - buf_prd->len = cpu_to_le32(sg_dma_len(sg)); - buf_prd++; - } - -#if _MV_DUMP - /* copy cmd table */ - from = kmap_atomic(sg_page(sg_req), KM_IRQ0); - memcpy(buf_cmd, from + sg_req->offset, req_len); - kunmap_atomic(from, KM_IRQ0); -#endif - return 0; - -err_out_2: - pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1, - PCI_DMA_FROMDEVICE); -err_out: - pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1, - PCI_DMA_TODEVICE); - return rc; -} - -static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port) -{ - void __iomem *regs = mvi->regs; - u32 tmp, offs; - u8 *tfs = &port->taskfileset; - - if (*tfs == MVS_ID_NOT_MAPPED) - return; - - offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT); - if (*tfs < 16) { - tmp = mr32(PCS); - mw32(PCS, tmp & ~offs); - } else { - tmp = mr32(CTL); - mw32(CTL, tmp & ~offs); - } - - tmp = mr32(INT_STAT_SRS) & (1U << *tfs); - if (tmp) - mw32(INT_STAT_SRS, tmp); - - *tfs = MVS_ID_NOT_MAPPED; -} - -static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port) -{ - int i; - u32 tmp, offs; - void __iomem *regs = mvi->regs; - - if (port->taskfileset != MVS_ID_NOT_MAPPED) - return 0; - - tmp = mr32(PCS); - - for (i = 0; i < mvi->chip->srs_sz; i++) { - if (i == 16) - tmp = mr32(CTL); - offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT); - if (!(tmp & offs)) { - port->taskfileset = i; - - if (i < 16) - mw32(PCS, tmp | offs); - else - mw32(CTL, tmp | offs); - tmp = mr32(INT_STAT_SRS) & (1U << i); - if (tmp) - mw32(INT_STAT_SRS, tmp); - return 0; - } - } - return MVS_ID_NOT_MAPPED; -} - -static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag) -{ - struct ata_queued_cmd *qc = task->uldd_task; - - if (qc) { - if (qc->tf.command == ATA_CMD_FPDMA_WRITE || - qc->tf.command == ATA_CMD_FPDMA_READ) { - *tag = qc->tag; - return 1; - } - } - - return 0; -} - -static int mvs_task_prep_ata(struct mvs_info *mvi, - struct mvs_task_exec_info *tei) +static int mvs_task_prep_ata(struct mvs_info *mvi, + struct mvs_task_exec_info *tei) { struct sas_task *task = tei->task; struct domain_device *dev = task->dev; @@ -2037,7 +723,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, return 0; } -static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) +int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) { struct domain_device *dev = task->dev; struct mvs_info *mvi = dev->port->ha->lldd_ha; @@ -2055,823 +741,900 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) dev = t->dev; tei.port = &mvi->port[dev->port->id]; - if (!tei.port->port_attached) { - if (sas_protocol_ata(t->task_proto)) { - rc = SAS_PHY_DOWN; - goto out_done; - } else { - struct task_status_struct *ts = &t->task_status; - ts->resp = SAS_TASK_UNDELIVERED; - ts->stat = SAS_PHY_DOWN; - t->task_done(t); - if (n > 1) - t = list_entry(t->list.next, - struct sas_task, list); - continue; - } - } + if (!tei.port->port_attached) { + if (sas_protocol_ata(t->task_proto)) { + rc = SAS_PHY_DOWN; + goto out_done; + } else { + struct task_status_struct *ts = &t->task_status; + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + t->task_done(t); + if (n > 1) + t = list_entry(t->list.next, + struct sas_task, list); + continue; + } + } + + if (!sas_protocol_ata(t->task_proto)) { + if (t->num_scatter) { + n_elem = pci_map_sg(mvi->pdev, t->scatter, + t->num_scatter, + t->data_dir); + if (!n_elem) { + rc = -ENOMEM; + goto err_out; + } + } + } else { + n_elem = t->num_scatter; + } + + rc = mvs_tag_alloc(mvi, &tag); + if (rc) + goto err_out; + + slot = &mvi->slot_info[tag]; + t->lldd_task = NULL; + slot->n_elem = n_elem; + memset(slot->buf, 0, MVS_SLOT_BUF_SZ); + tei.task = t; + tei.hdr = &mvi->slot[tag]; + tei.tag = tag; + tei.n_elem = n_elem; + + switch (t->task_proto) { + case SAS_PROTOCOL_SMP: + rc = mvs_task_prep_smp(mvi, &tei); + break; + case SAS_PROTOCOL_SSP: + rc = mvs_task_prep_ssp(mvi, &tei); + break; + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + rc = mvs_task_prep_ata(mvi, &tei); + break; + default: + dev_printk(KERN_ERR, &pdev->dev, + "unknown sas_task proto: 0x%x\n", + t->task_proto); + rc = -EINVAL; + break; + } + + if (rc) + goto err_out_tag; + + slot->task = t; + slot->port = tei.port; + t->lldd_task = (void *) slot; + list_add_tail(&slot->list, &slot->port->list); + /* TODO: select normal or high priority */ + + spin_lock(&t->task_state_lock); + t->task_state_flags |= SAS_TASK_AT_INITIATOR; + spin_unlock(&t->task_state_lock); + + mvs_hba_memory_dump(mvi, tag, t->task_proto); + + ++pass; + mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); + if (n > 1) + t = list_entry(t->list.next, struct sas_task, list); + } while (--n); + + rc = 0; + goto out_done; + +err_out_tag: + mvs_tag_free(mvi, tag); +err_out: + dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc); + if (!sas_protocol_ata(t->task_proto)) + if (n_elem) + pci_unmap_sg(mvi->pdev, t->scatter, n_elem, + t->data_dir); +out_done: + if (pass) + mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); + spin_unlock_irqrestore(&mvi->lock, flags); + return rc; +} + +static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) +{ + u32 slot_idx = rx_desc & RXQ_SLOT_MASK; + mvs_tag_clear(mvi, slot_idx); +} + +static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, + struct mvs_slot_info *slot, u32 slot_idx) +{ + if (!sas_protocol_ata(task->task_proto)) + if (slot->n_elem) + pci_unmap_sg(mvi->pdev, task->scatter, + slot->n_elem, task->data_dir); + + switch (task->task_proto) { + case SAS_PROTOCOL_SMP: + pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1, + PCI_DMA_FROMDEVICE); + pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1, + PCI_DMA_TODEVICE); + break; + + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SSP: + default: + /* do nothing */ + break; + } + list_del(&slot->list); + task->lldd_task = NULL; + slot->task = NULL; + slot->port = NULL; +} + +static void mvs_update_wideport(struct mvs_info *mvi, int i) +{ + struct mvs_phy *phy = &mvi->phy[i]; + struct mvs_port *port = phy->port; + int j, no; + + for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy) + if (no & 1) { + mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); + mvs_write_port_cfg_data(mvi, no, + port->wide_port_phymap); + } else { + mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); + mvs_write_port_cfg_data(mvi, no, 0); + } +} + +static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) +{ + u32 tmp; + struct mvs_phy *phy = &mvi->phy[i]; + struct mvs_port *port = phy->port;; + + tmp = mvs_read_phy_ctl(mvi, i); + + if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { + if (!port) + phy->phy_attached = 1; + return tmp; + } + + if (port) { + if (phy->phy_type & PORT_TYPE_SAS) { + port->wide_port_phymap &= ~(1U << i); + if (!port->wide_port_phymap) + port->port_attached = 0; + mvs_update_wideport(mvi, i); + } else if (phy->phy_type & PORT_TYPE_SATA) + port->port_attached = 0; + mvs_free_reg_set(mvi, phy->port); + phy->port = NULL; + phy->phy_attached = 0; + phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); + } + return 0; +} + +static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf) +{ + u32 *s = (u32 *) buf; + + if (!s) + return NULL; + + mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); + s[3] = mvs_read_port_cfg_data(mvi, i); + + mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); + s[2] = mvs_read_port_cfg_data(mvi, i); + + mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); + s[1] = mvs_read_port_cfg_data(mvi, i); + + mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); + s[0] = mvs_read_port_cfg_data(mvi, i); + + return (void *)s; +} + +static u32 mvs_is_sig_fis_received(u32 irq_status) +{ + return irq_status & PHYEV_SIG_FIS; +} + +static void mvs_update_phyinfo(struct mvs_info *mvi, int i, + int get_st) +{ + struct mvs_phy *phy = &mvi->phy[i]; + struct pci_dev *pdev = mvi->pdev; + u32 tmp; + u64 tmp64; + + mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY); + phy->dev_info = mvs_read_port_cfg_data(mvi, i); - if (!sas_protocol_ata(t->task_proto)) { - if (t->num_scatter) { - n_elem = pci_map_sg(mvi->pdev, t->scatter, - t->num_scatter, - t->data_dir); - if (!n_elem) { - rc = -ENOMEM; - goto err_out; - } - } - } else { - n_elem = t->num_scatter; - } + mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI); + phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32; - rc = mvs_tag_alloc(mvi, &tag); - if (rc) - goto err_out; + mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO); + phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); - slot = &mvi->slot_info[tag]; - t->lldd_task = NULL; - slot->n_elem = n_elem; - memset(slot->buf, 0, MVS_SLOT_BUF_SZ); - tei.task = t; - tei.hdr = &mvi->slot[tag]; - tei.tag = tag; - tei.n_elem = n_elem; + if (get_st) { + phy->irq_status = mvs_read_port_irq_stat(mvi, i); + phy->phy_status = mvs_is_phy_ready(mvi, i); + } - switch (t->task_proto) { - case SAS_PROTOCOL_SMP: - rc = mvs_task_prep_smp(mvi, &tei); - break; - case SAS_PROTOCOL_SSP: - rc = mvs_task_prep_ssp(mvi, &tei); - break; - case SAS_PROTOCOL_SATA: - case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: - rc = mvs_task_prep_ata(mvi, &tei); - break; - default: - dev_printk(KERN_ERR, &pdev->dev, - "unknown sas_task proto: 0x%x\n", - t->task_proto); - rc = -EINVAL; - break; - } + if (phy->phy_status) { + u32 phy_st; + struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i]; - if (rc) - goto err_out_tag; + mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); + phy_st = mvs_read_port_cfg_data(mvi, i); - slot->task = t; - slot->port = tei.port; - t->lldd_task = (void *) slot; - list_add_tail(&slot->list, &slot->port->list); - /* TODO: select normal or high priority */ + sas_phy->linkrate = + (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; + phy->minimum_linkrate = + (phy->phy_status & + PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8; + phy->maximum_linkrate = + (phy->phy_status & + PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12; - spin_lock(&t->task_state_lock); - t->task_state_flags |= SAS_TASK_AT_INITIATOR; - spin_unlock(&t->task_state_lock); + if (phy->phy_type & PORT_TYPE_SAS) { + /* Updated attached_sas_addr */ + mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI); + phy->att_dev_sas_addr = + (u64) mvs_read_port_cfg_data(mvi, i) << 32; + mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO); + phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); + mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO); + phy->att_dev_info = mvs_read_port_cfg_data(mvi, i); + phy->identify.device_type = + phy->att_dev_info & PORT_DEV_TYPE_MASK; - mvs_hba_memory_dump(mvi, tag, t->task_proto); + if (phy->identify.device_type == SAS_END_DEV) + phy->identify.target_port_protocols = + SAS_PROTOCOL_SSP; + else if (phy->identify.device_type != NO_DEVICE) + phy->identify.target_port_protocols = + SAS_PROTOCOL_SMP; + if (phy_st & PHY_OOB_DTCTD) + sas_phy->oob_mode = SAS_OOB_MODE; + phy->frame_rcvd_size = + sizeof(struct sas_identify_frame); + } else if (phy->phy_type & PORT_TYPE_SATA) { + phy->identify.target_port_protocols = SAS_PROTOCOL_STP; + if (mvs_is_sig_fis_received(phy->irq_status)) { + phy->att_dev_sas_addr = i; /* temp */ + if (phy_st & PHY_OOB_DTCTD) + sas_phy->oob_mode = SATA_OOB_MODE; + phy->frame_rcvd_size = + sizeof(struct dev_to_host_fis); + mvs_get_d2h_reg(mvi, i, + (void *)sas_phy->frame_rcvd); + } else { + dev_printk(KERN_DEBUG, &pdev->dev, + "No sig fis\n"); + phy->phy_type &= ~(PORT_TYPE_SATA); + goto out_done; + } + } + tmp64 = cpu_to_be64(phy->att_dev_sas_addr); + memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE); - ++pass; - mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); - if (n > 1) - t = list_entry(t->list.next, struct sas_task, list); - } while (--n); + dev_printk(KERN_DEBUG, &pdev->dev, + "phy[%d] Get Attached Address 0x%llX ," + " SAS Address 0x%llX\n", + i, + (unsigned long long)phy->att_dev_sas_addr, + (unsigned long long)phy->dev_sas_addr); + dev_printk(KERN_DEBUG, &pdev->dev, + "Rate = %x , type = %d\n", + sas_phy->linkrate, phy->phy_type); - rc = 0; - goto out_done; + /* workaround for HW phy decoding error on 1.5g disk drive */ + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6); + tmp = mvs_read_port_vsr_data(mvi, i); + if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) == + SAS_LINK_RATE_1_5_GBPS) + tmp &= ~PHY_MODE6_LATECLK; + else + tmp |= PHY_MODE6_LATECLK; + mvs_write_port_vsr_data(mvi, i, tmp); -err_out_tag: - mvs_tag_free(mvi, tag); -err_out: - dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc); - if (!sas_protocol_ata(t->task_proto)) - if (n_elem) - pci_unmap_sg(mvi->pdev, t->scatter, n_elem, - t->data_dir); + } out_done: - if (pass) - mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); - spin_unlock_irqrestore(&mvi->lock, flags); - return rc; + if (get_st) + mvs_write_port_irq_stat(mvi, i, phy->irq_status); } -static int mvs_task_abort(struct sas_task *task) +void mvs_port_formed(struct asd_sas_phy *sas_phy) { - int rc; + struct sas_ha_struct *sas_ha = sas_phy->ha; + struct mvs_info *mvi = sas_ha->lldd_ha; + struct asd_sas_port *sas_port = sas_phy->port; + struct mvs_phy *phy = sas_phy->lldd_phy; + struct mvs_port *port = &mvi->port[sas_port->id]; unsigned long flags; - struct mvs_info *mvi = task->dev->port->ha->lldd_ha; - struct pci_dev *pdev = mvi->pdev; - int tag; - spin_lock_irqsave(&task->task_state_lock, flags); - if (task->task_state_flags & SAS_TASK_STATE_DONE) { - rc = TMF_RESP_FUNC_COMPLETE; - spin_unlock_irqrestore(&task->task_state_lock, flags); - goto out_done; + spin_lock_irqsave(&mvi->lock, flags); + port->port_attached = 1; + phy->port = port; + port->taskfileset = MVS_ID_NOT_MAPPED; + if (phy->phy_type & PORT_TYPE_SAS) { + port->wide_port_phymap = sas_port->phy_mask; + mvs_update_wideport(mvi, sas_phy->id); } - spin_unlock_irqrestore(&task->task_state_lock, flags); + spin_unlock_irqrestore(&mvi->lock, flags); +} + +int mvs_I_T_nexus_reset(struct domain_device *dev) +{ + return TMF_RESP_FUNC_FAILED; +} + +static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, + u32 slot_idx, int err) +{ + struct mvs_port *port = mvi->slot_info[slot_idx].port; + struct task_status_struct *tstat = &task->task_status; + struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; + int stat = SAM_GOOD; + + resp->frame_len = sizeof(struct dev_to_host_fis); + memcpy(&resp->ending_fis[0], + SATA_RECEIVED_D2H_FIS(port->taskfileset), + sizeof(struct dev_to_host_fis)); + tstat->buf_valid_size = sizeof(*resp); + if (unlikely(err)) + stat = SAS_PROTO_RESPONSE; + return stat; +} + +static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, + u32 slot_idx) +{ + struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; + u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response)); + u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4)); + int stat = SAM_CHECK_COND; + if (err_dw1 & SLOT_BSY_ERR) { + stat = SAS_QUEUE_FULL; + mvs_slot_reset(mvi, task, slot_idx); + } switch (task->task_proto) { - case SAS_PROTOCOL_SMP: - dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n"); - break; case SAS_PROTOCOL_SSP: - dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n"); + break; + case SAS_PROTOCOL_SMP: break; case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{ - dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n"); -#if _MV_DUMP - dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n"); - mvs_hexdump(sizeof(struct host_to_dev_fis), - (void *)&task->ata_task.fis, 0); - dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n"); - mvs_hexdump(16, task->ata_task.atapi_packet, 0); -#endif - spin_lock_irqsave(&task->task_state_lock, flags); - if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) { - /* TODO */ - ; - } - spin_unlock_irqrestore(&task->task_state_lock, flags); + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + if (err_dw0 & TFILE_ERR) + stat = mvs_sata_done(mvi, task, slot_idx, 1); break; - } default: break; } - if (mvs_find_tag(mvi, task, &tag)) { - spin_lock_irqsave(&mvi->lock, flags); - mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag); - spin_unlock_irqrestore(&mvi->lock, flags); - } - if (!mvs_task_exec(task, 1, GFP_ATOMIC)) - rc = TMF_RESP_FUNC_COMPLETE; - else - rc = TMF_RESP_FUNC_FAILED; -out_done: - return rc; + mvs_hexdump(16, (u8 *) slot->response, 0); + return stat; } -static void mvs_free(struct mvs_info *mvi) +static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) { - int i; + u32 slot_idx = rx_desc & RXQ_SLOT_MASK; + struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; + struct sas_task *task = slot->task; + struct task_status_struct *tstat; + struct mvs_port *port; + bool aborted; + void *to; - if (!mvi) - return; + if (unlikely(!task || !task->lldd_task)) + return -1; - for (i = 0; i < MVS_SLOTS; i++) { - struct mvs_slot_info *slot = &mvi->slot_info[i]; + mvs_hba_cq_dump(mvi); - if (slot->buf) - dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ, - slot->buf, slot->buf_dma); + spin_lock(&task->task_state_lock); + aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; + if (!aborted) { + task->task_state_flags &= + ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); + task->task_state_flags |= SAS_TASK_STATE_DONE; } + spin_unlock(&task->task_state_lock); - if (mvi->tx) - dma_free_coherent(&mvi->pdev->dev, - sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, - mvi->tx, mvi->tx_dma); - if (mvi->rx_fis) - dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ, - mvi->rx_fis, mvi->rx_fis_dma); - if (mvi->rx) - dma_free_coherent(&mvi->pdev->dev, - sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), - mvi->rx, mvi->rx_dma); - if (mvi->slot) - dma_free_coherent(&mvi->pdev->dev, - sizeof(*mvi->slot) * MVS_SLOTS, - mvi->slot, mvi->slot_dma); -#ifdef MVS_ENABLE_PERI - if (mvi->peri_regs) - iounmap(mvi->peri_regs); -#endif - if (mvi->regs) - iounmap(mvi->regs); - if (mvi->shost) - scsi_host_put(mvi->shost); - kfree(mvi->sas.sas_port); - kfree(mvi->sas.sas_phy); - kfree(mvi); -} + if (aborted) { + mvs_slot_task_free(mvi, task, slot, slot_idx); + mvs_slot_free(mvi, rx_desc); + return -1; + } -/* FIXME: locking? */ -static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, - void *funcdata) -{ - struct mvs_info *mvi = sas_phy->ha->lldd_ha; - int rc = 0, phy_id = sas_phy->id; - u32 tmp; + port = slot->port; + tstat = &task->task_status; + memset(tstat, 0, sizeof(*tstat)); + tstat->resp = SAS_TASK_COMPLETE; - tmp = mvs_read_phy_ctl(mvi, phy_id); + if (unlikely(!port->port_attached || flags)) { + mvs_slot_err(mvi, task, slot_idx); + if (!sas_protocol_ata(task->task_proto)) + tstat->stat = SAS_PHY_DOWN; + goto out; + } - switch (func) { - case PHY_FUNC_SET_LINK_RATE:{ - struct sas_phy_linkrates *rates = funcdata; - u32 lrmin = 0, lrmax = 0; + /* error info record present */ + if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { + tstat->stat = mvs_slot_err(mvi, task, slot_idx); + goto out; + } - lrmin = (rates->minimum_linkrate << 8); - lrmax = (rates->maximum_linkrate << 12); + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: + /* hw says status == 0, datapres == 0 */ + if (rx_desc & RXQ_GOOD) { + tstat->stat = SAM_GOOD; + tstat->resp = SAS_TASK_COMPLETE; + } + /* response frame present */ + else if (rx_desc & RXQ_RSP) { + struct ssp_response_iu *iu = + slot->response + sizeof(struct mvs_err_info); + sas_ssp_task_response(&mvi->pdev->dev, task, iu); + } - if (lrmin) { - tmp &= ~(0xf << 8); - tmp |= lrmin; - } - if (lrmax) { - tmp &= ~(0xf << 12); - tmp |= lrmax; - } - mvs_write_phy_ctl(mvi, phy_id, tmp); + /* should never happen? */ + else + tstat->stat = SAM_CHECK_COND; + break; + + case SAS_PROTOCOL_SMP: { + struct scatterlist *sg_resp = &task->smp_task.smp_resp; + tstat->stat = SAM_GOOD; + to = kmap_atomic(sg_page(sg_resp), KM_IRQ0); + memcpy(to + sg_resp->offset, + slot->response + sizeof(struct mvs_err_info), + sg_dma_len(sg_resp)); + kunmap_atomic(to, KM_IRQ0); break; } - case PHY_FUNC_HARD_RESET: - if (tmp & PHY_RST_HARD) + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { + tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0); break; - mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD); - break; - - case PHY_FUNC_LINK_RESET: - mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST); - break; + } - case PHY_FUNC_DISABLE: - case PHY_FUNC_RELEASE_SPINUP_HOLD: default: - rc = -EOPNOTSUPP; + tstat->stat = SAM_CHECK_COND; + break; } - return rc; -} - -static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id) -{ - struct mvs_phy *phy = &mvi->phy[phy_id]; - struct asd_sas_phy *sas_phy = &phy->sas_phy; +out: + mvs_slot_task_free(mvi, task, slot, slot_idx); + if (unlikely(tstat->stat != SAS_QUEUE_FULL)) + mvs_slot_free(mvi, rx_desc); - sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; - sas_phy->class = SAS; - sas_phy->iproto = SAS_PROTOCOL_ALL; - sas_phy->tproto = 0; - sas_phy->type = PHY_TYPE_PHYSICAL; - sas_phy->role = PHY_ROLE_INITIATOR; - sas_phy->oob_mode = OOB_NOT_CONNECTED; - sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; - - sas_phy->id = phy_id; - sas_phy->sas_addr = &mvi->sas_addr[0]; - sas_phy->frame_rcvd = &phy->frame_rcvd[0]; - sas_phy->ha = &mvi->sas; - sas_phy->lldd_phy = phy; + spin_unlock(&mvi->lock); + task->task_done(task); + spin_lock(&mvi->lock); + return tstat->stat; } -static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev, - const struct pci_device_id *ent) +static void mvs_release_task(struct mvs_info *mvi, int phy_no) { - struct mvs_info *mvi; - unsigned long res_start, res_len, res_flag; - struct asd_sas_phy **arr_phy; - struct asd_sas_port **arr_port; - const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data]; - int i; - - /* - * alloc and init our per-HBA mvs_info struct - */ - - mvi = kzalloc(sizeof(*mvi), GFP_KERNEL); - if (!mvi) - return NULL; - - spin_lock_init(&mvi->lock); -#ifdef MVS_USE_TASKLET - tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi); -#endif - mvi->pdev = pdev; - mvi->chip = chip; - - if (pdev->device == 0x6440 && pdev->revision == 0) - mvi->flags |= MVF_PHY_PWR_FIX; - - /* - * alloc and init SCSI, SAS glue - */ - - mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *)); - if (!mvi->shost) - goto err_out; + struct list_head *pos, *n; + struct mvs_slot_info *slot; + struct mvs_phy *phy = &mvi->phy[phy_no]; + struct mvs_port *port = phy->port; + u32 rx_desc; - arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL); - arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL); - if (!arr_phy || !arr_port) - goto err_out; + if (!port) + return; - for (i = 0; i < MVS_MAX_PHYS; i++) { - mvs_phy_init(mvi, i); - arr_phy[i] = &mvi->phy[i].sas_phy; - arr_port[i] = &mvi->port[i].sas_port; - mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED; - mvi->port[i].wide_port_phymap = 0; - mvi->port[i].port_attached = 0; - INIT_LIST_HEAD(&mvi->port[i].list); + list_for_each_safe(pos, n, &port->list) { + slot = container_of(pos, struct mvs_slot_info, list); + rx_desc = (u32) (slot - mvi->slot_info); + mvs_slot_complete(mvi, rx_desc, 1); } +} - SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas; - mvi->shost->transportt = mvs_stt; - mvi->shost->max_id = 21; - mvi->shost->max_lun = ~0; - mvi->shost->max_channel = 0; - mvi->shost->max_cmd_len = 16; - - mvi->sas.sas_ha_name = DRV_NAME; - mvi->sas.dev = &pdev->dev; - mvi->sas.lldd_module = THIS_MODULE; - mvi->sas.sas_addr = &mvi->sas_addr[0]; - mvi->sas.sas_phy = arr_phy; - mvi->sas.sas_port = arr_port; - mvi->sas.num_phys = chip->n_phy; - mvi->sas.lldd_max_execute_num = 1; - mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE; - mvi->shost->can_queue = MVS_CAN_QUEUE; - mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys; - mvi->sas.lldd_ha = mvi; - mvi->sas.core.shost = mvi->shost; - - mvs_tag_init(mvi); - - /* - * ioremap main and peripheral registers - */ - -#ifdef MVS_ENABLE_PERI - res_start = pci_resource_start(pdev, 2); - res_len = pci_resource_len(pdev, 2); - if (!res_start || !res_len) - goto err_out; - - mvi->peri_regs = ioremap_nocache(res_start, res_len); - if (!mvi->peri_regs) - goto err_out; -#endif - - res_start = pci_resource_start(pdev, 4); - res_len = pci_resource_len(pdev, 4); - if (!res_start || !res_len) - goto err_out; - - res_flag = pci_resource_flags(pdev, 4); - if (res_flag & IORESOURCE_CACHEABLE) - mvi->regs = ioremap(res_start, res_len); - else - mvi->regs = ioremap_nocache(res_start, res_len); - - if (!mvi->regs) - goto err_out; +static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) +{ + struct pci_dev *pdev = mvi->pdev; + struct sas_ha_struct *sas_ha = &mvi->sas; + struct mvs_phy *phy = &mvi->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no); /* - * alloc and init our DMA areas - */ - - mvi->tx = dma_alloc_coherent(&pdev->dev, - sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, - &mvi->tx_dma, GFP_KERNEL); - if (!mvi->tx) - goto err_out; - memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ); - - mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ, - &mvi->rx_fis_dma, GFP_KERNEL); - if (!mvi->rx_fis) - goto err_out; - memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ); - - mvi->rx = dma_alloc_coherent(&pdev->dev, - sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), - &mvi->rx_dma, GFP_KERNEL); - if (!mvi->rx) - goto err_out; - memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1)); - - mvi->rx[0] = cpu_to_le32(0xfff); - mvi->rx_cons = 0xfff; - - mvi->slot = dma_alloc_coherent(&pdev->dev, - sizeof(*mvi->slot) * MVS_SLOTS, - &mvi->slot_dma, GFP_KERNEL); - if (!mvi->slot) - goto err_out; - memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS); + * events is port event now , + * we need check the interrupt status which belongs to per port. + */ + dev_printk(KERN_DEBUG, &pdev->dev, + "Port %d Event = %X\n", + phy_no, phy->irq_status); - for (i = 0; i < MVS_SLOTS; i++) { - struct mvs_slot_info *slot = &mvi->slot_info[i]; + if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) { + mvs_release_task(mvi, phy_no); + if (!mvs_is_phy_ready(mvi, phy_no)) { + sas_phy_disconnected(sas_phy); + sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); + dev_printk(KERN_INFO, &pdev->dev, + "Port %d Unplug Notice\n", phy_no); - slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ, - &slot->buf_dma, GFP_KERNEL); - if (!slot->buf) - goto err_out; - memset(slot->buf, 0, MVS_SLOT_BUF_SZ); + } else + mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL); } + if (!(phy->irq_status & PHYEV_DEC_ERR)) { + if (phy->irq_status & PHYEV_COMWAKE) { + u32 tmp = mvs_read_port_irq_mask(mvi, phy_no); + mvs_write_port_irq_mask(mvi, phy_no, + tmp | PHYEV_SIG_FIS); + } + if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { + phy->phy_status = mvs_is_phy_ready(mvi, phy_no); + if (phy->phy_status) { + mvs_detect_porttype(mvi, phy_no); - /* finally, read NVRAM to get our SAS address */ - if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8)) - goto err_out; - return mvi; - -err_out: - mvs_free(mvi); - return NULL; -} - -static u32 mvs_cr32(void __iomem *regs, u32 addr) -{ - mw32(CMD_ADDR, addr); - return mr32(CMD_DATA); -} - -static void mvs_cw32(void __iomem *regs, u32 addr, u32 val) -{ - mw32(CMD_ADDR, addr); - mw32(CMD_DATA, val); -} + if (phy->phy_type & PORT_TYPE_SATA) { + u32 tmp = mvs_read_port_irq_mask(mvi, + phy_no); + tmp &= ~PHYEV_SIG_FIS; + mvs_write_port_irq_mask(mvi, + phy_no, tmp); + } -static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port) -{ - void __iomem *regs = mvi->regs; - return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4): - mr32(P4_SER_CTLSTAT + (port - 4) * 4); + mvs_update_phyinfo(mvi, phy_no, 0); + sas_ha->notify_phy_event(sas_phy, + PHYE_OOB_DONE); + mvs_bytes_dmaed(mvi, phy_no); + } else { + dev_printk(KERN_DEBUG, &pdev->dev, + "plugin interrupt but phy is gone\n"); + mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, + NULL); + } + } else if (phy->irq_status & PHYEV_BROAD_CH) { + mvs_release_task(mvi, phy_no); + sas_ha->notify_port_event(sas_phy, + PORTE_BROADCAST_RCVD); + } + } + mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status); } -static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val) +static int mvs_int_rx(struct mvs_info *mvi, bool self_clear) { void __iomem *regs = mvi->regs; - if (port < 4) - mw32(P0_SER_CTLSTAT + port * 4, val); - else - mw32(P4_SER_CTLSTAT + (port - 4) * 4, val); -} - -static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port) -{ - void __iomem *regs = mvi->regs + off; - void __iomem *regs2 = mvi->regs + off2; - return (port < 4)?readl(regs + port * 8): - readl(regs2 + (port - 4) * 8); -} + u32 rx_prod_idx, rx_desc; + bool attn = false; + struct pci_dev *pdev = mvi->pdev; -static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2, - u32 port, u32 val) -{ - void __iomem *regs = mvi->regs + off; - void __iomem *regs2 = mvi->regs + off2; - if (port < 4) - writel(val, regs + port * 8); - else - writel(val, regs2 + (port - 4) * 8); -} + /* the first dword in the RX ring is special: it contains + * a mirror of the hardware's RX producer index, so that + * we don't have to stall the CPU reading that register. + * The actual RX ring is offset by one dword, due to this. + */ + rx_prod_idx = mvi->rx_cons; + mvi->rx_cons = le32_to_cpu(mvi->rx[0]); + if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */ + return 0; -static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port) -{ - return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port); -} + /* The CMPL_Q may come late, read from register and try again + * note: if coalescing is enabled, + * it will need to read from register every time for sure + */ + if (mvi->rx_cons == rx_prod_idx) + mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK; -static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val) -{ - mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val); -} + if (mvi->rx_cons == rx_prod_idx) + return 0; -static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr) -{ - mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr); -} + while (mvi->rx_cons != rx_prod_idx) { -static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port) -{ - return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port); -} + /* increment our internal RX consumer pointer */ + rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); -static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val) -{ - mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val); -} + rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); -static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr) -{ - mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr); -} + if (likely(rx_desc & RXQ_DONE)) + mvs_slot_complete(mvi, rx_desc, 0); + if (rx_desc & RXQ_ATTN) { + attn = true; + dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n", + rx_desc); + } else if (rx_desc & RXQ_ERR) { + if (!(rx_desc & RXQ_DONE)) + mvs_slot_complete(mvi, rx_desc, 0); + dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n", + rx_desc); + } else if (rx_desc & RXQ_SLOT_RESET) { + dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n", + rx_desc); + mvs_slot_free(mvi, rx_desc); + } + } -static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port) -{ - return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port); -} + if (attn && self_clear) + mvs_int_full(mvi); -static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val) -{ - mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val); + return 0; } -static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port) +#ifndef MVS_DISABLE_NVRAM +static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data) { - return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port); -} + int timeout = 1000; -static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val) -{ - mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val); -} + if (addr & ~SPI_ADDR_MASK) + return -EINVAL; -static void __devinit mvs_phy_hacks(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs; - u32 tmp; + writel(addr, regs + SPI_CMD); + writel(TWSI_RD, regs + SPI_CTL); - /* workaround for SATA R-ERR, to ignore phy glitch */ - tmp = mvs_cr32(regs, CMD_PHY_TIMER); - tmp &= ~(1 << 9); - tmp |= (1 << 10); - mvs_cw32(regs, CMD_PHY_TIMER, tmp); + while (timeout-- > 0) { + if (readl(regs + SPI_CTL) & TWSI_RDY) { + *data = readl(regs + SPI_DATA); + return 0; + } - /* enable retry 127 times */ - mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f); + udelay(10); + } - /* extend open frame timeout to max */ - tmp = mvs_cr32(regs, CMD_SAS_CTL0); - tmp &= ~0xffff; - tmp |= 0x3fff; - mvs_cw32(regs, CMD_SAS_CTL0, tmp); + return -EBUSY; +} - /* workaround for WDTIMEOUT , set to 550 ms */ - mvs_cw32(regs, CMD_WD_TIMER, 0x86470); +static int mvs_eep_read_buf(void __iomem *regs, u32 addr, + void *buf, u32 buflen) +{ + u32 addr_end, tmp_addr, i, j; + u32 tmp = 0; + int rc; + u8 *tmp8, *buf8 = buf; - /* not to halt for different port op during wideport link change */ - mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d); + addr_end = addr + buflen; + tmp_addr = ALIGN(addr, 4); + if (addr > 0xff) + return -EINVAL; - /* workaround for Seagate disk not-found OOB sequence, recv - * COMINIT before sending out COMWAKE */ - tmp = mvs_cr32(regs, CMD_PHY_MODE_21); - tmp &= 0x0000ffff; - tmp |= 0x00fa0000; - mvs_cw32(regs, CMD_PHY_MODE_21, tmp); + j = addr & 0x3; + if (j) { + rc = mvs_eep_read(regs, tmp_addr, &tmp); + if (rc) + return rc; - tmp = mvs_cr32(regs, CMD_PHY_TIMER); - tmp &= 0x1fffffff; - tmp |= (2U << 29); /* 8 ms retry */ - mvs_cw32(regs, CMD_PHY_TIMER, tmp); + tmp8 = (u8 *)&tmp; + for (i = j; i < 4; i++) + *buf8++ = tmp8[i]; - /* TEST - for phy decoding error, adjust voltage levels */ - mw32(P0_VSR_ADDR + 0, 0x8); - mw32(P0_VSR_DATA + 0, 0x2F0); + tmp_addr += 4; + } - mw32(P0_VSR_ADDR + 8, 0x8); - mw32(P0_VSR_DATA + 8, 0x2F0); + for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) { + rc = mvs_eep_read(regs, tmp_addr, &tmp); + if (rc) + return rc; - mw32(P0_VSR_ADDR + 16, 0x8); - mw32(P0_VSR_DATA + 16, 0x2F0); + memcpy(buf8, &tmp, 4); + buf8 += 4; + } - mw32(P0_VSR_ADDR + 24, 0x8); - mw32(P0_VSR_DATA + 24, 0x2F0); + if (tmp_addr < addr_end) { + rc = mvs_eep_read(regs, tmp_addr, &tmp); + if (rc) + return rc; -} + tmp8 = (u8 *)&tmp; + j = addr_end - tmp_addr; + for (i = 0; i < j; i++) + *buf8++ = tmp8[i]; -static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId) -{ - void __iomem *regs = mvi->regs; - u32 tmp; + tmp_addr += 4; + } - tmp = mr32(PCS); - if (mvi->chip->n_phy <= 4) - tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT); - else - tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2); - mw32(PCS, tmp); + return 0; } +#endif -static void mvs_detect_porttype(struct mvs_info *mvi, int i) +int mvs_nvram_read(struct mvs_info *mvi, u32 addr, void *buf, u32 buflen) { +#ifndef MVS_DISABLE_NVRAM void __iomem *regs = mvi->regs; - u32 reg; - struct mvs_phy *phy = &mvi->phy[i]; - - /* TODO check & save device type */ - reg = mr32(GBL_PORT_TYPE); - - if (reg & MODE_SAS_SATA & (1 << i)) - phy->phy_type |= PORT_TYPE_SAS; - else - phy->phy_type |= PORT_TYPE_SATA; -} - -static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf) -{ - u32 *s = (u32 *) buf; - - if (!s) - return NULL; + int rc, i; + u32 sum; + u8 hdr[2], *tmp; + const char *msg; - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); - s[3] = mvs_read_port_cfg_data(mvi, i); + rc = mvs_eep_read_buf(regs, addr, &hdr, 2); + if (rc) { + msg = "nvram hdr read failed"; + goto err_out; + } + rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen); + if (rc) { + msg = "nvram read failed"; + goto err_out; + } - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); - s[2] = mvs_read_port_cfg_data(mvi, i); + if (hdr[0] != 0x5A) { + /* entry id */ + msg = "invalid nvram entry id"; + rc = -ENOENT; + goto err_out; + } - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); - s[1] = mvs_read_port_cfg_data(mvi, i); + tmp = buf; + sum = ((u32)hdr[0]) + ((u32)hdr[1]); + for (i = 0; i < buflen; i++) + sum += ((u32)tmp[i]); - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); - s[0] = mvs_read_port_cfg_data(mvi, i); + if (sum) { + msg = "nvram checksum failure"; + rc = -EILSEQ; + goto err_out; + } - return (void *)s; -} + return 0; -static u32 mvs_is_sig_fis_received(u32 irq_status) -{ - return irq_status & PHYEV_SIG_FIS; +err_out: + dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg); + return rc; +#else + /* FIXME , For SAS target mode */ + memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8); + return 0; +#endif } -static void mvs_update_wideport(struct mvs_info *mvi, int i) +static void mvs_int_sata(struct mvs_info *mvi) { - struct mvs_phy *phy = &mvi->phy[i]; - struct mvs_port *port = phy->port; - int j, no; - - for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy) - if (no & 1) { - mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); - mvs_write_port_cfg_data(mvi, no, - port->wide_port_phymap); - } else { - mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); - mvs_write_port_cfg_data(mvi, no, 0); - } + u32 tmp; + void __iomem *regs = mvi->regs; + tmp = mr32(INT_STAT_SRS); + mw32(INT_STAT_SRS, tmp & 0xFFFF); } -static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) +static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task, + u32 slot_idx) { - u32 tmp; - struct mvs_phy *phy = &mvi->phy[i]; - struct mvs_port *port = phy->port;; - - tmp = mvs_read_phy_ctl(mvi, i); - - if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { - if (!port) - phy->phy_attached = 1; - return tmp; - } + void __iomem *regs = mvi->regs; + struct domain_device *dev = task->dev; + struct asd_sas_port *sas_port = dev->port; + struct mvs_port *port = mvi->slot_info[slot_idx].port; + u32 reg_set, phy_mask; - if (port) { - if (phy->phy_type & PORT_TYPE_SAS) { - port->wide_port_phymap &= ~(1U << i); - if (!port->wide_port_phymap) - port->port_attached = 0; - mvs_update_wideport(mvi, i); - } else if (phy->phy_type & PORT_TYPE_SATA) - port->port_attached = 0; - mvs_free_reg_set(mvi, phy->port); - phy->port = NULL; - phy->phy_attached = 0; - phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); + if (!sas_protocol_ata(task->task_proto)) { + reg_set = 0; + phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap : + sas_port->phy_mask; + } else { + reg_set = port->taskfileset; + phy_mask = sas_port->phy_mask; } - return 0; + mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx | + (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) | + (phy_mask << TXQ_PHY_SHIFT) | + (reg_set << TXQ_SRS_SHIFT)); + + mw32(TX_PROD_IDX, mvi->tx_prod); + mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); } -static void mvs_update_phyinfo(struct mvs_info *mvi, int i, - int get_st) +void mvs_int_full(struct mvs_info *mvi) { - struct mvs_phy *phy = &mvi->phy[i]; - struct pci_dev *pdev = mvi->pdev; - u32 tmp; - u64 tmp64; - - mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY); - phy->dev_info = mvs_read_port_cfg_data(mvi, i); + void __iomem *regs = mvi->regs; + u32 tmp, stat; + int i; - mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI); - phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32; + stat = mr32(INT_STAT); - mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO); - phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); + mvs_int_rx(mvi, false); - if (get_st) { - phy->irq_status = mvs_read_port_irq_stat(mvi, i); - phy->phy_status = mvs_is_phy_ready(mvi, i); + for (i = 0; i < MVS_MAX_PORTS; i++) { + tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED); + if (tmp) + mvs_int_port(mvi, i, tmp); } - if (phy->phy_status) { - u32 phy_st; - struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i]; - - mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); - phy_st = mvs_read_port_cfg_data(mvi, i); - - sas_phy->linkrate = - (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> - PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; - phy->minimum_linkrate = - (phy->phy_status & - PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8; - phy->maximum_linkrate = - (phy->phy_status & - PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12; + if (stat & CINT_SRS) + mvs_int_sata(mvi); - if (phy->phy_type & PORT_TYPE_SAS) { - /* Updated attached_sas_addr */ - mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI); - phy->att_dev_sas_addr = - (u64) mvs_read_port_cfg_data(mvi, i) << 32; - mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO); - phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); - mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO); - phy->att_dev_info = mvs_read_port_cfg_data(mvi, i); - phy->identify.device_type = - phy->att_dev_info & PORT_DEV_TYPE_MASK; + mw32(INT_STAT, stat); +} - if (phy->identify.device_type == SAS_END_DEV) - phy->identify.target_port_protocols = - SAS_PROTOCOL_SSP; - else if (phy->identify.device_type != NO_DEVICE) - phy->identify.target_port_protocols = - SAS_PROTOCOL_SMP; - if (phy_st & PHY_OOB_DTCTD) - sas_phy->oob_mode = SAS_OOB_MODE; - phy->frame_rcvd_size = - sizeof(struct sas_identify_frame); - } else if (phy->phy_type & PORT_TYPE_SATA) { - phy->identify.target_port_protocols = SAS_PROTOCOL_STP; - if (mvs_is_sig_fis_received(phy->irq_status)) { - phy->att_dev_sas_addr = i; /* temp */ - if (phy_st & PHY_OOB_DTCTD) - sas_phy->oob_mode = SATA_OOB_MODE; - phy->frame_rcvd_size = - sizeof(struct dev_to_host_fis); - mvs_get_d2h_reg(mvi, i, - (void *)sas_phy->frame_rcvd); - } else { - dev_printk(KERN_DEBUG, &pdev->dev, - "No sig fis\n"); - phy->phy_type &= ~(PORT_TYPE_SATA); - goto out_done; - } - } - tmp64 = cpu_to_be64(phy->att_dev_sas_addr); - memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE); +#ifndef MVS_DISABLE_MSI +static irqreturn_t mvs_msi_interrupt(int irq, void *opaque) +{ + struct mvs_info *mvi = opaque; - dev_printk(KERN_DEBUG, &pdev->dev, - "phy[%d] Get Attached Address 0x%llX ," - " SAS Address 0x%llX\n", - i, - (unsigned long long)phy->att_dev_sas_addr, - (unsigned long long)phy->dev_sas_addr); - dev_printk(KERN_DEBUG, &pdev->dev, - "Rate = %x , type = %d\n", - sas_phy->linkrate, phy->phy_type); +#ifndef MVS_USE_TASKLET + spin_lock(&mvi->lock); - /* workaround for HW phy decoding error on 1.5g disk drive */ - mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6); - tmp = mvs_read_port_vsr_data(mvi, i); - if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> - PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) == - SAS_LINK_RATE_1_5_GBPS) - tmp &= ~PHY_MODE6_LATECLK; - else - tmp |= PHY_MODE6_LATECLK; - mvs_write_port_vsr_data(mvi, i, tmp); + mvs_int_rx(mvi, true); - } -out_done: - if (get_st) - mvs_write_port_irq_stat(mvi, i, phy->irq_status); + spin_unlock(&mvi->lock); +#else + tasklet_schedule(&mvi->tasklet); +#endif + return IRQ_HANDLED; } +#endif -static void mvs_port_formed(struct asd_sas_phy *sas_phy) +int mvs_task_abort(struct sas_task *task) { - struct sas_ha_struct *sas_ha = sas_phy->ha; - struct mvs_info *mvi = sas_ha->lldd_ha; - struct asd_sas_port *sas_port = sas_phy->port; - struct mvs_phy *phy = sas_phy->lldd_phy; - struct mvs_port *port = &mvi->port[sas_port->id]; + int rc; unsigned long flags; + struct mvs_info *mvi = task->dev->port->ha->lldd_ha; + struct pci_dev *pdev = mvi->pdev; + int tag; - spin_lock_irqsave(&mvi->lock, flags); - port->port_attached = 1; - phy->port = port; - port->taskfileset = MVS_ID_NOT_MAPPED; - if (phy->phy_type & PORT_TYPE_SAS) { - port->wide_port_phymap = sas_port->phy_mask; - mvs_update_wideport(mvi, sas_phy->id); + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) { + rc = TMF_RESP_FUNC_COMPLETE; + spin_unlock_irqrestore(&task->task_state_lock, flags); + goto out_done; } - spin_unlock_irqrestore(&mvi->lock, flags); -} + spin_unlock_irqrestore(&task->task_state_lock, flags); -static int mvs_I_T_nexus_reset(struct domain_device *dev) -{ - return TMF_RESP_FUNC_FAILED; + switch (task->task_proto) { + case SAS_PROTOCOL_SMP: + dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n"); + break; + case SAS_PROTOCOL_SSP: + dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n"); + break; + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{ + dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n"); +#if _MV_DUMP + dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n"); + mvs_hexdump(sizeof(struct host_to_dev_fis), + (void *)&task->ata_task.fis, 0); + dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n"); + mvs_hexdump(16, task->ata_task.atapi_packet, 0); +#endif + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) { + /* TODO */ + ; + } + spin_unlock_irqrestore(&task->task_state_lock, flags); + break; + } + default: + break; + } + + if (mvs_find_tag(mvi, task, &tag)) { + spin_lock_irqsave(&mvi->lock, flags); + mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag); + spin_unlock_irqrestore(&mvi->lock, flags); + } + if (!mvs_task_exec(task, 1, GFP_ATOMIC)) + rc = TMF_RESP_FUNC_COMPLETE; + else + rc = TMF_RESP_FUNC_FAILED; +out_done: + return rc; } -static int __devinit mvs_hw_init(struct mvs_info *mvi) +int __devinit mvs_hw_init(struct mvs_info *mvi) { void __iomem *regs = mvi->regs; int i; @@ -3041,7 +1804,7 @@ static int __devinit mvs_hw_init(struct mvs_info *mvi) return 0; } -static void __devinit mvs_print_info(struct mvs_info *mvi) +void __devinit mvs_print_info(struct mvs_info *mvi) { struct pci_dev *pdev = mvi->pdev; static int printed_version; @@ -3053,170 +1816,3 @@ static void __devinit mvs_print_info(struct mvs_info *mvi) mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr)); } -static int __devinit mvs_pci_init(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - int rc; - struct mvs_info *mvi; - irq_handler_t irq_handler = mvs_interrupt; - - rc = pci_enable_device(pdev); - if (rc) - return rc; - - pci_set_master(pdev); - - rc = pci_request_regions(pdev, DRV_NAME); - if (rc) - goto err_out_disable; - - rc = pci_go_64(pdev); - if (rc) - goto err_out_regions; - - mvi = mvs_alloc(pdev, ent); - if (!mvi) { - rc = -ENOMEM; - goto err_out_regions; - } - - rc = mvs_hw_init(mvi); - if (rc) - goto err_out_mvi; - -#ifndef MVS_DISABLE_MSI - if (!pci_enable_msi(pdev)) { - u32 tmp; - void __iomem *regs = mvi->regs; - mvi->flags |= MVF_MSI; - irq_handler = mvs_msi_interrupt; - tmp = mr32(PCS); - mw32(PCS, tmp | PCS_SELF_CLEAR); - } -#endif - - rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi); - if (rc) - goto err_out_msi; - - rc = scsi_add_host(mvi->shost, &pdev->dev); - if (rc) - goto err_out_irq; - - rc = sas_register_ha(&mvi->sas); - if (rc) - goto err_out_shost; - - pci_set_drvdata(pdev, mvi); - - mvs_print_info(mvi); - - mvs_hba_interrupt_enable(mvi); - - scsi_scan_host(mvi->shost); - - return 0; - -err_out_shost: - scsi_remove_host(mvi->shost); -err_out_irq: - free_irq(pdev->irq, mvi); -err_out_msi: - if (mvi->flags |= MVF_MSI) - pci_disable_msi(pdev); -err_out_mvi: - mvs_free(mvi); -err_out_regions: - pci_release_regions(pdev); -err_out_disable: - pci_disable_device(pdev); - return rc; -} - -static void __devexit mvs_pci_remove(struct pci_dev *pdev) -{ - struct mvs_info *mvi = pci_get_drvdata(pdev); - - pci_set_drvdata(pdev, NULL); - - if (mvi) { - sas_unregister_ha(&mvi->sas); - mvs_hba_interrupt_disable(mvi); - sas_remove_host(mvi->shost); - scsi_remove_host(mvi->shost); - - free_irq(pdev->irq, mvi); - if (mvi->flags & MVF_MSI) - pci_disable_msi(pdev); - mvs_free(mvi); - pci_release_regions(pdev); - } - pci_disable_device(pdev); -} - -static struct sas_domain_function_template mvs_transport_ops = { - .lldd_execute_task = mvs_task_exec, - .lldd_control_phy = mvs_phy_control, - .lldd_abort_task = mvs_task_abort, - .lldd_port_formed = mvs_port_formed, - .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset, -}; - -static struct pci_device_id __devinitdata mvs_pci_table[] = { - { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 }, - { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 }, - { - .vendor = PCI_VENDOR_ID_MARVELL, - .device = 0x6440, - .subvendor = PCI_ANY_ID, - .subdevice = 0x6480, - .class = 0, - .class_mask = 0, - .driver_data = chip_6480, - }, - { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 }, - { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 }, - - { } /* terminate list */ -}; - -static struct pci_driver mvs_pci_driver = { - .name = DRV_NAME, - .id_table = mvs_pci_table, - .probe = mvs_pci_init, - .remove = __devexit_p(mvs_pci_remove), -}; - -static int __init mvs_init(void) -{ - int rc; - - mvs_stt = sas_domain_attach_transport(&mvs_transport_ops); - if (!mvs_stt) - return -ENOMEM; - - rc = pci_register_driver(&mvs_pci_driver); - if (rc) - goto err_out; - - return 0; - -err_out: - sas_release_transport(mvs_stt); - return rc; -} - -static void __exit mvs_exit(void) -{ - pci_unregister_driver(&mvs_pci_driver); - sas_release_transport(mvs_stt); -} - -module_init(mvs_init); -module_exit(mvs_exit); - -MODULE_AUTHOR("Jeff Garzik "); -MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver"); -MODULE_VERSION(DRV_VERSION); -MODULE_LICENSE("GPL"); -MODULE_DEVICE_TABLE(pci, mvs_pci_table); diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h new file mode 100644 index 000000000000..7a954a95a217 --- /dev/null +++ b/drivers/scsi/mvsas/mv_sas.h @@ -0,0 +1,205 @@ +/* + mv_sas.h - Marvell 88SE6440 SAS/SATA support + + Copyright 2007 Red Hat, Inc. + Copyright 2008 Marvell. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2, + or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty + of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + See the GNU General Public License for more details. + + You should have received a copy of the GNU General Public + License along with this program; see the file COPYING. If not, + write to the Free Software Foundation, 675 Mass Ave, Cambridge, + MA 02139, USA. + + */ + +#ifndef _MV_SAS_H_ +#define _MV_SAS_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mv_defs.h" + +#define DRV_NAME "mvsas" +#define DRV_VERSION "0.5.2" +#define _MV_DUMP 0 +#define MVS_DISABLE_NVRAM +#define MVS_DISABLE_MSI + +#define MVS_ID_NOT_MAPPED 0x7f +#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width) + +#define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \ + for ((__mc) = (__lseq_mask), (__lseq) = 0; \ + (__mc) != 0 && __rest; \ + (++__lseq), (__mc) >>= 1) + +struct mvs_chip_info { + u32 n_phy; + u32 srs_sz; + u32 slot_width; +}; + +struct mvs_err_info { + __le32 flags; + __le32 flags2; +}; + +struct mvs_cmd_hdr { + __le32 flags; /* PRD tbl len; SAS, SATA ctl */ + __le32 lens; /* cmd, max resp frame len */ + __le32 tags; /* targ port xfer tag; tag */ + __le32 data_len; /* data xfer len */ + __le64 cmd_tbl; /* command table address */ + __le64 open_frame; /* open addr frame address */ + __le64 status_buf; /* status buffer address */ + __le64 prd_tbl; /* PRD tbl address */ + __le32 reserved[4]; +}; + +struct mvs_port { + struct asd_sas_port sas_port; + u8 port_attached; + u8 taskfileset; + u8 wide_port_phymap; + struct list_head list; +}; + +struct mvs_phy { + struct mvs_port *port; + struct asd_sas_phy sas_phy; + struct sas_identify identify; + struct scsi_device *sdev; + u64 dev_sas_addr; + u64 att_dev_sas_addr; + u32 att_dev_info; + u32 dev_info; + u32 phy_type; + u32 phy_status; + u32 irq_status; + u32 frame_rcvd_size; + u8 frame_rcvd[32]; + u8 phy_attached; + enum sas_linkrate minimum_linkrate; + enum sas_linkrate maximum_linkrate; +}; + +struct mvs_slot_info { + struct list_head list; + struct sas_task *task; + u32 n_elem; + u32 tx; + + /* DMA buffer for storing cmd tbl, open addr frame, status buffer, + * and PRD table + */ + void *buf; + dma_addr_t buf_dma; +#if _MV_DUMP + u32 cmd_size; +#endif + + void *response; + struct mvs_port *port; +}; + +struct mvs_info { + unsigned long flags; + + /* host-wide lock */ + spinlock_t lock; + + /* our device */ + struct pci_dev *pdev; + + /* enhanced mode registers */ + void __iomem *regs; + + /* peripheral registers */ + void __iomem *peri_regs; + + u8 sas_addr[SAS_ADDR_SIZE]; + + /* SCSI/SAS glue */ + struct sas_ha_struct sas; + struct Scsi_Host *shost; + + /* TX (delivery) DMA ring */ + __le32 *tx; + dma_addr_t tx_dma; + + /* cached next-producer idx */ + u32 tx_prod; + + /* RX (completion) DMA ring */ + __le32 *rx; + dma_addr_t rx_dma; + + /* RX consumer idx */ + u32 rx_cons; + + /* RX'd FIS area */ + __le32 *rx_fis; + dma_addr_t rx_fis_dma; + + /* DMA command header slots */ + struct mvs_cmd_hdr *slot; + dma_addr_t slot_dma; + + const struct mvs_chip_info *chip; + + u8 tags[MVS_SLOTS]; + struct mvs_slot_info slot_info[MVS_SLOTS]; + /* further per-slot information */ + struct mvs_phy phy[MVS_MAX_PHYS]; + struct mvs_port port[MVS_MAX_PHYS]; +#ifdef MVS_USE_TASKLET + struct tasklet_struct tasklet; +#endif +}; + +int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, + void *funcdata); +int mvs_slave_configure(struct scsi_device *sdev); +void mvs_scan_start(struct Scsi_Host *shost); +int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time); +int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags); +int mvs_task_abort(struct sas_task *task); +void mvs_port_formed(struct asd_sas_phy *sas_phy); +int mvs_I_T_nexus_reset(struct domain_device *dev); +void mvs_int_full(struct mvs_info *mvi); +void mvs_tag_init(struct mvs_info *mvi); +int mvs_nvram_read(struct mvs_info *mvi, u32 addr, void *buf, u32 buflen); +int __devinit mvs_hw_init(struct mvs_info *mvi); +void __devinit mvs_print_info(struct mvs_info *mvi); +void mvs_hba_interrupt_enable(struct mvs_info *mvi); +void mvs_hba_interrupt_disable(struct mvs_info *mvi); +void mvs_detect_porttype(struct mvs_info *mvi, int i); +u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port); +void mvs_enable_xmt(struct mvs_info *mvi, int PhyId); +void __devinit mvs_phy_hacks(struct mvs_info *mvi); +void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port); + +#endif -- cgit v1.2.3 From 20b09c2992fefbe78f8cede7b404fb143a413c52 Mon Sep 17 00:00:00 2001 From: Andy Yan Date: Fri, 8 May 2009 17:46:40 -0400 Subject: [SCSI] mvsas: add support for 94xx; layout change; bug fixes This version contains following main changes - Switch to new layout to support more types of ASIC. - SSP TMF supported and related Error Handing enhanced. - Support flash feature with delay 2*HZ when PHY changed. - Support Marvell 94xx series ASIC for 6G SAS/SATA, which has 2 88SE64xx chips but any different register description. - Support SPI flash for HBA-related configuration info. - Other patch enhanced from kernel side such as increasing PHY type [jejb: fold back in DMA_BIT_MASK changes] Signed-off-by: Ying Chu Signed-off-by: Andy Yan Signed-off-by: Ke Wei Signed-off-by: Jeff Garzik Signed-off-by: James Bottomley --- drivers/scsi/mvsas/Kconfig | 23 +- drivers/scsi/mvsas/Makefile | 9 +- drivers/scsi/mvsas/mv_64xx.c | 785 +++++++++++++-- drivers/scsi/mvsas/mv_64xx.h | 75 +- drivers/scsi/mvsas/mv_94xx.c | 672 +++++++++++++ drivers/scsi/mvsas/mv_94xx.h | 222 +++++ drivers/scsi/mvsas/mv_chips.h | 212 +++- drivers/scsi/mvsas/mv_defs.h | 197 ++-- drivers/scsi/mvsas/mv_init.c | 629 +++++++----- drivers/scsi/mvsas/mv_sas.c | 2169 ++++++++++++++++++++++++----------------- drivers/scsi/mvsas/mv_sas.h | 329 +++++-- 11 files changed, 3919 insertions(+), 1403 deletions(-) create mode 100644 drivers/scsi/mvsas/mv_94xx.c create mode 100644 drivers/scsi/mvsas/mv_94xx.h (limited to 'drivers') diff --git a/drivers/scsi/mvsas/Kconfig b/drivers/scsi/mvsas/Kconfig index f83f368e6902..6de7af27e507 100644 --- a/drivers/scsi/mvsas/Kconfig +++ b/drivers/scsi/mvsas/Kconfig @@ -1,35 +1,42 @@ # -# Kernel configuration file for 88SE64XX SAS/SATA driver. +# Kernel configuration file for 88SE64XX/88SE94XX SAS/SATA driver. # # Copyright 2007 Red Hat, Inc. # Copyright 2008 Marvell. # # This file is licensed under GPLv2. # -# This file is part of the 88SE64XX driver. +# This file is part of the 88SE64XX/88SE94XX driver. # -# The 88SE64XX driver is free software; you can redistribute +# The 88SE64XX/88SE94XX driver is free software; you can redistribute # it and/or modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2 of the # License. # -# The 88SE64XX driver is distributed in the hope that it will be +# The 88SE64XX/88SE94XX driver is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with 88SE64XX Driver; if not, write to the Free Software +# along with 88SE64XX/88SE94XX Driver; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # # config SCSI_MVSAS - tristate "Marvell 88SE64XX SAS/SATA support" + tristate "Marvell 88SE64XX/88SE94XX SAS/SATA support" depends on PCI select SCSI_SAS_LIBSAS select FW_LOADER help - This driver supports Marvell's SAS/SATA 3Gb/s PCI-E 88SE64XX - chip based host adapters. + This driver supports Marvell's SAS/SATA 3Gb/s PCI-E 88SE64XX and 6Gb/s + PCI-E 88SE94XX chip based host adapters. +config SCSI_MVSAS_DEBUG + bool "Compile in debug mode" + default y + depends on SCSI_MVSAS + help + Compiles the 88SE64XX/88SE94XX driver in debug mode. In debug mode, + the driver prints some messages to the console. diff --git a/drivers/scsi/mvsas/Makefile b/drivers/scsi/mvsas/Makefile index a1ca681e1a57..52ac4264677d 100644 --- a/drivers/scsi/mvsas/Makefile +++ b/drivers/scsi/mvsas/Makefile @@ -1,5 +1,5 @@ # -# Makefile for Marvell 88SE64xx SAS/SATA driver. +# Makefile for Marvell 88SE64xx/88SE84xx SAS/SATA driver. # # Copyright 2007 Red Hat, Inc. # Copyright 2008 Marvell. @@ -21,7 +21,12 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 # USA +ifeq ($(CONFIG_SCSI_MVSAS_DEBUG),y) + EXTRA_CFLAGS += -DMV_DEBUG +endif + obj-$(CONFIG_SCSI_MVSAS) += mvsas.o mvsas-y += mv_init.o \ mv_sas.o \ - mv_64xx.o + mv_64xx.o \ + mv_94xx.o diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c index 697806c856af..10a5077b6aed 100644 --- a/drivers/scsi/mvsas/mv_64xx.c +++ b/drivers/scsi/mvsas/mv_64xx.c @@ -1,184 +1,793 @@ /* - mv_64xx.c - Marvell 88SE6440 SAS/SATA support - - Copyright 2007 Red Hat, Inc. - Copyright 2008 Marvell. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2, - or (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public - License along with this program; see the file COPYING. If not, - write to the Free Software Foundation, 675 Mass Ave, Cambridge, - MA 02139, USA. - - */ + * Marvell 88SE64xx hardware specific + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA +*/ #include "mv_sas.h" #include "mv_64xx.h" #include "mv_chips.h" -void mvs_detect_porttype(struct mvs_info *mvi, int i) +static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i) { void __iomem *regs = mvi->regs; u32 reg; struct mvs_phy *phy = &mvi->phy[i]; /* TODO check & save device type */ - reg = mr32(GBL_PORT_TYPE); - + reg = mr32(MVS_GBL_PORT_TYPE); + phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); if (reg & MODE_SAS_SATA & (1 << i)) phy->phy_type |= PORT_TYPE_SAS; else phy->phy_type |= PORT_TYPE_SATA; } -void mvs_enable_xmt(struct mvs_info *mvi, int PhyId) +static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id) { void __iomem *regs = mvi->regs; u32 tmp; - tmp = mr32(PCS); + tmp = mr32(MVS_PCS); if (mvi->chip->n_phy <= 4) - tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT); + tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT); + else + tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2); + mw32(MVS_PCS, tmp); +} + +static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + + mvs_phy_hacks(mvi); + + if (!(mvi->flags & MVF_FLAG_SOC)) { + /* TEST - for phy decoding error, adjust voltage levels */ + mw32(MVS_P0_VSR_ADDR + 0, 0x8); + mw32(MVS_P0_VSR_DATA + 0, 0x2F0); + + mw32(MVS_P0_VSR_ADDR + 8, 0x8); + mw32(MVS_P0_VSR_DATA + 8, 0x2F0); + + mw32(MVS_P0_VSR_ADDR + 16, 0x8); + mw32(MVS_P0_VSR_DATA + 16, 0x2F0); + + mw32(MVS_P0_VSR_ADDR + 24, 0x8); + mw32(MVS_P0_VSR_DATA + 24, 0x2F0); + } else { + int i; + /* disable auto port detection */ + mw32(MVS_GBL_PORT_TYPE, 0); + for (i = 0; i < mvi->chip->n_phy; i++) { + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE7); + mvs_write_port_vsr_data(mvi, i, 0x90000000); + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE9); + mvs_write_port_vsr_data(mvi, i, 0x50f2); + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE11); + mvs_write_port_vsr_data(mvi, i, 0x0e); + } + } +} + +static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id) +{ + void __iomem *regs = mvi->regs; + u32 reg, tmp; + + if (!(mvi->flags & MVF_FLAG_SOC)) { + if (phy_id < 4) + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, ®); + else + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, ®); + + } else + reg = mr32(MVS_PHY_CTL); + + tmp = reg; + if (phy_id < 4) + tmp |= (1U << phy_id) << PCTL_LINK_OFFS; else - tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2); - mw32(PCS, tmp); + tmp |= (1U << (phy_id - 4)) << PCTL_LINK_OFFS; + + if (!(mvi->flags & MVF_FLAG_SOC)) { + if (phy_id < 4) { + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); + mdelay(10); + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg); + } else { + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); + mdelay(10); + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, reg); + } + } else { + mw32(MVS_PHY_CTL, tmp); + mdelay(10); + mw32(MVS_PHY_CTL, reg); + } +} + +static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard) +{ + u32 tmp; + tmp = mvs_read_port_irq_stat(mvi, phy_id); + tmp &= ~PHYEV_RDY_CH; + mvs_write_port_irq_stat(mvi, phy_id, tmp); + tmp = mvs_read_phy_ctl(mvi, phy_id); + if (hard) + tmp |= PHY_RST_HARD; + else + tmp |= PHY_RST; + mvs_write_phy_ctl(mvi, phy_id, tmp); + if (hard) { + do { + tmp = mvs_read_phy_ctl(mvi, phy_id); + } while (tmp & PHY_RST_HARD); + } +} + +static int __devinit mvs_64xx_chip_reset(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + int i; + + /* make sure interrupts are masked immediately (paranoia) */ + mw32(MVS_GBL_CTL, 0); + tmp = mr32(MVS_GBL_CTL); + + /* Reset Controller */ + if (!(tmp & HBA_RST)) { + if (mvi->flags & MVF_PHY_PWR_FIX) { + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); + tmp &= ~PCTL_PWR_OFF; + tmp |= PCTL_PHY_DSBL; + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); + + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); + tmp &= ~PCTL_PWR_OFF; + tmp |= PCTL_PHY_DSBL; + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); + } + } + + /* make sure interrupts are masked immediately (paranoia) */ + mw32(MVS_GBL_CTL, 0); + tmp = mr32(MVS_GBL_CTL); + + /* Reset Controller */ + if (!(tmp & HBA_RST)) { + /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */ + mw32_f(MVS_GBL_CTL, HBA_RST); + } + + /* wait for reset to finish; timeout is just a guess */ + i = 1000; + while (i-- > 0) { + msleep(10); + + if (!(mr32(MVS_GBL_CTL) & HBA_RST)) + break; + } + if (mr32(MVS_GBL_CTL) & HBA_RST) { + dev_printk(KERN_ERR, mvi->dev, "HBA reset failed\n"); + return -EBUSY; + } + return 0; } -void __devinit mvs_phy_hacks(struct mvs_info *mvi) +static void mvs_64xx_phy_disable(struct mvs_info *mvi, u32 phy_id) { void __iomem *regs = mvi->regs; u32 tmp; + if (!(mvi->flags & MVF_FLAG_SOC)) { + u32 offs; + if (phy_id < 4) + offs = PCR_PHY_CTL; + else { + offs = PCR_PHY_CTL2; + phy_id -= 4; + } + pci_read_config_dword(mvi->pdev, offs, &tmp); + tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id); + pci_write_config_dword(mvi->pdev, offs, tmp); + } else { + tmp = mr32(MVS_PHY_CTL); + tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id); + mw32(MVS_PHY_CTL, tmp); + } +} + +static void mvs_64xx_phy_enable(struct mvs_info *mvi, u32 phy_id) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + if (!(mvi->flags & MVF_FLAG_SOC)) { + u32 offs; + if (phy_id < 4) + offs = PCR_PHY_CTL; + else { + offs = PCR_PHY_CTL2; + phy_id -= 4; + } + pci_read_config_dword(mvi->pdev, offs, &tmp); + tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id)); + pci_write_config_dword(mvi->pdev, offs, tmp); + } else { + tmp = mr32(MVS_PHY_CTL); + tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id)); + mw32(MVS_PHY_CTL, tmp); + } +} - /* workaround for SATA R-ERR, to ignore phy glitch */ - tmp = mvs_cr32(regs, CMD_PHY_TIMER); - tmp &= ~(1 << 9); - tmp |= (1 << 10); - mvs_cw32(regs, CMD_PHY_TIMER, tmp); +static int __devinit mvs_64xx_init(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + int i; + u32 tmp, cctl; + + if (mvi->pdev && mvi->pdev->revision == 0) + mvi->flags |= MVF_PHY_PWR_FIX; + if (!(mvi->flags & MVF_FLAG_SOC)) { + mvs_show_pcie_usage(mvi); + tmp = mvs_64xx_chip_reset(mvi); + if (tmp) + return tmp; + } else { + tmp = mr32(MVS_PHY_CTL); + tmp &= ~PCTL_PWR_OFF; + tmp |= PCTL_PHY_DSBL; + mw32(MVS_PHY_CTL, tmp); + } - /* enable retry 127 times */ - mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f); + /* Init Chip */ + /* make sure RST is set; HBA_RST /should/ have done that for us */ + cctl = mr32(MVS_CTL) & 0xFFFF; + if (cctl & CCTL_RST) + cctl &= ~CCTL_RST; + else + mw32_f(MVS_CTL, cctl | CCTL_RST); + + if (!(mvi->flags & MVF_FLAG_SOC)) { + /* write to device control _AND_ device status register */ + pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp); + tmp &= ~PRD_REQ_MASK; + tmp |= PRD_REQ_SIZE; + pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp); + + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); + tmp &= ~PCTL_PWR_OFF; + tmp &= ~PCTL_PHY_DSBL; + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); + + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); + tmp &= PCTL_PWR_OFF; + tmp &= ~PCTL_PHY_DSBL; + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); + } else { + tmp = mr32(MVS_PHY_CTL); + tmp &= ~PCTL_PWR_OFF; + tmp |= PCTL_COM_ON; + tmp &= ~PCTL_PHY_DSBL; + tmp |= PCTL_LINK_RST; + mw32(MVS_PHY_CTL, tmp); + msleep(100); + tmp &= ~PCTL_LINK_RST; + mw32(MVS_PHY_CTL, tmp); + msleep(100); + } - /* extend open frame timeout to max */ - tmp = mvs_cr32(regs, CMD_SAS_CTL0); - tmp &= ~0xffff; - tmp |= 0x3fff; - mvs_cw32(regs, CMD_SAS_CTL0, tmp); + /* reset control */ + mw32(MVS_PCS, 0); /* MVS_PCS */ + /* init phys */ + mvs_64xx_phy_hacks(mvi); - /* workaround for WDTIMEOUT , set to 550 ms */ - mvs_cw32(regs, CMD_WD_TIMER, 0x86470); + /* enable auto port detection */ + mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN); - /* not to halt for different port op during wideport link change */ - mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d); + mw32(MVS_CMD_LIST_LO, mvi->slot_dma); + mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16); - /* workaround for Seagate disk not-found OOB sequence, recv - * COMINIT before sending out COMWAKE */ - tmp = mvs_cr32(regs, CMD_PHY_MODE_21); - tmp &= 0x0000ffff; - tmp |= 0x00fa0000; - mvs_cw32(regs, CMD_PHY_MODE_21, tmp); + mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma); + mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16); - tmp = mvs_cr32(regs, CMD_PHY_TIMER); - tmp &= 0x1fffffff; - tmp |= (2U << 29); /* 8 ms retry */ - mvs_cw32(regs, CMD_PHY_TIMER, tmp); + mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ); + mw32(MVS_TX_LO, mvi->tx_dma); + mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16); - /* TEST - for phy decoding error, adjust voltage levels */ - mw32(P0_VSR_ADDR + 0, 0x8); - mw32(P0_VSR_DATA + 0, 0x2F0); + mw32(MVS_RX_CFG, MVS_RX_RING_SZ); + mw32(MVS_RX_LO, mvi->rx_dma); + mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16); - mw32(P0_VSR_ADDR + 8, 0x8); - mw32(P0_VSR_DATA + 8, 0x2F0); + for (i = 0; i < mvi->chip->n_phy; i++) { + /* set phy local SAS address */ + /* should set little endian SAS address to 64xx chip */ + mvs_set_sas_addr(mvi, i, PHYR_ADDR_LO, PHYR_ADDR_HI, + cpu_to_be64(mvi->phy[i].dev_sas_addr)); - mw32(P0_VSR_ADDR + 16, 0x8); - mw32(P0_VSR_DATA + 16, 0x2F0); + mvs_64xx_enable_xmt(mvi, i); - mw32(P0_VSR_ADDR + 24, 0x8); - mw32(P0_VSR_DATA + 24, 0x2F0); + mvs_64xx_phy_reset(mvi, i, 1); + msleep(500); + mvs_64xx_detect_porttype(mvi, i); + } + if (mvi->flags & MVF_FLAG_SOC) { + /* set select registers */ + writel(0x0E008000, regs + 0x000); + writel(0x59000008, regs + 0x004); + writel(0x20, regs + 0x008); + writel(0x20, regs + 0x00c); + writel(0x20, regs + 0x010); + writel(0x20, regs + 0x014); + writel(0x20, regs + 0x018); + writel(0x20, regs + 0x01c); + } + for (i = 0; i < mvi->chip->n_phy; i++) { + /* clear phy int status */ + tmp = mvs_read_port_irq_stat(mvi, i); + tmp &= ~PHYEV_SIG_FIS; + mvs_write_port_irq_stat(mvi, i, tmp); + + /* set phy int mask */ + tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS | + PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR | + PHYEV_DEC_ERR; + mvs_write_port_irq_mask(mvi, i, tmp); + + msleep(100); + mvs_update_phyinfo(mvi, i, 1); + } + /* FIXME: update wide port bitmaps */ + + /* little endian for open address and command table, etc. */ + /* + * it seems that ( from the spec ) turning on big-endian won't + * do us any good on big-endian machines, need further confirmation + */ + cctl = mr32(MVS_CTL); + cctl |= CCTL_ENDIAN_CMD; + cctl |= CCTL_ENDIAN_DATA; + cctl &= ~CCTL_ENDIAN_OPEN; + cctl |= CCTL_ENDIAN_RSP; + mw32_f(MVS_CTL, cctl); + + /* reset CMD queue */ + tmp = mr32(MVS_PCS); + tmp |= PCS_CMD_RST; + mw32(MVS_PCS, tmp); + /* interrupt coalescing may cause missing HW interrput in some case, + * and the max count is 0x1ff, while our max slot is 0x200, + * it will make count 0. + */ + tmp = 0; + mw32(MVS_INT_COAL, tmp); + + tmp = 0x100; + mw32(MVS_INT_COAL_TMOUT, tmp); + + /* ladies and gentlemen, start your engines */ + mw32(MVS_TX_CFG, 0); + mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN); + mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN); + /* enable CMD/CMPL_Q/RESP mode */ + mw32(MVS_PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | + PCS_CMD_EN | PCS_CMD_STOP_ERR); + + /* enable completion queue interrupt */ + tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP | + CINT_DMA_PCIE); + + mw32(MVS_INT_MASK, tmp); + + /* Enable SRS interrupt */ + mw32(MVS_INT_MASK_SRS_0, 0xFFFF); + + return 0; } -void mvs_hba_interrupt_enable(struct mvs_info *mvi) +static int mvs_64xx_ioremap(struct mvs_info *mvi) +{ + if (!mvs_ioremap(mvi, 4, 2)) + return 0; + return -1; +} + +static void mvs_64xx_iounmap(struct mvs_info *mvi) +{ + mvs_iounmap(mvi->regs); + mvs_iounmap(mvi->regs_ex); +} + +static void mvs_64xx_interrupt_enable(struct mvs_info *mvi) { void __iomem *regs = mvi->regs; u32 tmp; - tmp = mr32(GBL_CTL); + tmp = mr32(MVS_GBL_CTL); + mw32(MVS_GBL_CTL, tmp | INT_EN); +} - mw32(GBL_CTL, tmp | INT_EN); +static void mvs_64xx_interrupt_disable(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + tmp = mr32(MVS_GBL_CTL); + mw32(MVS_GBL_CTL, tmp & ~INT_EN); } -void mvs_hba_interrupt_disable(struct mvs_info *mvi) +static u32 mvs_64xx_isr_status(struct mvs_info *mvi, int irq) { void __iomem *regs = mvi->regs; + u32 stat; + + if (!(mvi->flags & MVF_FLAG_SOC)) { + stat = mr32(MVS_GBL_INT_STAT); + + if (stat == 0 || stat == 0xffffffff) + return 0; + } else + stat = 1; + return stat; +} + +static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat) +{ + void __iomem *regs = mvi->regs; + + /* clear CMD_CMPLT ASAP */ + mw32_f(MVS_INT_STAT, CINT_DONE); +#ifndef MVS_USE_TASKLET + spin_lock(&mvi->lock); +#endif + mvs_int_full(mvi); +#ifndef MVS_USE_TASKLET + spin_unlock(&mvi->lock); +#endif + return IRQ_HANDLED; +} + +static void mvs_64xx_command_active(struct mvs_info *mvi, u32 slot_idx) +{ u32 tmp; + mvs_cw32(mvi, 0x40 + (slot_idx >> 3), 1 << (slot_idx % 32)); + mvs_cw32(mvi, 0x00 + (slot_idx >> 3), 1 << (slot_idx % 32)); + do { + tmp = mvs_cr32(mvi, 0x00 + (slot_idx >> 3)); + } while (tmp & 1 << (slot_idx % 32)); + do { + tmp = mvs_cr32(mvi, 0x40 + (slot_idx >> 3)); + } while (tmp & 1 << (slot_idx % 32)); +} - tmp = mr32(GBL_CTL); +static void mvs_64xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type, + u32 tfs) +{ + void __iomem *regs = mvi->regs; + u32 tmp; - mw32(GBL_CTL, tmp & ~INT_EN); + if (type == PORT_TYPE_SATA) { + tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs); + mw32(MVS_INT_STAT_SRS_0, tmp); + } + mw32(MVS_INT_STAT, CINT_CI_STOP); + tmp = mr32(MVS_PCS) | 0xFF00; + mw32(MVS_PCS, tmp); } -void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port) +static void mvs_64xx_free_reg_set(struct mvs_info *mvi, u8 *tfs) { void __iomem *regs = mvi->regs; u32 tmp, offs; - u8 *tfs = &port->taskfileset; if (*tfs == MVS_ID_NOT_MAPPED) return; offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT); if (*tfs < 16) { - tmp = mr32(PCS); - mw32(PCS, tmp & ~offs); + tmp = mr32(MVS_PCS); + mw32(MVS_PCS, tmp & ~offs); } else { - tmp = mr32(CTL); - mw32(CTL, tmp & ~offs); + tmp = mr32(MVS_CTL); + mw32(MVS_CTL, tmp & ~offs); } - tmp = mr32(INT_STAT_SRS) & (1U << *tfs); + tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << *tfs); if (tmp) - mw32(INT_STAT_SRS, tmp); + mw32(MVS_INT_STAT_SRS_0, tmp); *tfs = MVS_ID_NOT_MAPPED; + return; } -u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port) +static u8 mvs_64xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs) { int i; u32 tmp, offs; void __iomem *regs = mvi->regs; - if (port->taskfileset != MVS_ID_NOT_MAPPED) + if (*tfs != MVS_ID_NOT_MAPPED) return 0; - tmp = mr32(PCS); + tmp = mr32(MVS_PCS); for (i = 0; i < mvi->chip->srs_sz; i++) { if (i == 16) - tmp = mr32(CTL); + tmp = mr32(MVS_CTL); offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT); if (!(tmp & offs)) { - port->taskfileset = i; + *tfs = i; if (i < 16) - mw32(PCS, tmp | offs); + mw32(MVS_PCS, tmp | offs); else - mw32(CTL, tmp | offs); - tmp = mr32(INT_STAT_SRS) & (1U << i); + mw32(MVS_CTL, tmp | offs); + tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << i); if (tmp) - mw32(INT_STAT_SRS, tmp); + mw32(MVS_INT_STAT_SRS_0, tmp); return 0; } } return MVS_ID_NOT_MAPPED; } +void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd) +{ + int i; + struct scatterlist *sg; + struct mvs_prd *buf_prd = prd; + for_each_sg(scatter, sg, nr, i) { + buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); + buf_prd->len = cpu_to_le32(sg_dma_len(sg)); + buf_prd++; + } +} + +static int mvs_64xx_oob_done(struct mvs_info *mvi, int i) +{ + u32 phy_st; + mvs_write_port_cfg_addr(mvi, i, + PHYR_PHY_STAT); + phy_st = mvs_read_port_cfg_data(mvi, i); + if (phy_st & PHY_OOB_DTCTD) + return 1; + return 0; +} + +static void mvs_64xx_fix_phy_info(struct mvs_info *mvi, int i, + struct sas_identify_frame *id) + +{ + struct mvs_phy *phy = &mvi->phy[i]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + sas_phy->linkrate = + (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; + + phy->minimum_linkrate = + (phy->phy_status & + PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8; + phy->maximum_linkrate = + (phy->phy_status & + PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12; + + mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY); + phy->dev_info = mvs_read_port_cfg_data(mvi, i); + + mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO); + phy->att_dev_info = mvs_read_port_cfg_data(mvi, i); + + mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI); + phy->att_dev_sas_addr = + (u64) mvs_read_port_cfg_data(mvi, i) << 32; + mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO); + phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); + phy->att_dev_sas_addr = SAS_ADDR(&phy->att_dev_sas_addr); +} + +static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i) +{ + u32 tmp; + struct mvs_phy *phy = &mvi->phy[i]; + /* workaround for HW phy decoding error on 1.5g disk drive */ + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6); + tmp = mvs_read_port_vsr_data(mvi, i); + if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) == + SAS_LINK_RATE_1_5_GBPS) + tmp &= ~PHY_MODE6_LATECLK; + else + tmp |= PHY_MODE6_LATECLK; + mvs_write_port_vsr_data(mvi, i, tmp); +} + +void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id, + struct sas_phy_linkrates *rates) +{ + u32 lrmin = 0, lrmax = 0; + u32 tmp; + + tmp = mvs_read_phy_ctl(mvi, phy_id); + lrmin = (rates->minimum_linkrate << 8); + lrmax = (rates->maximum_linkrate << 12); + + if (lrmin) { + tmp &= ~(0xf << 8); + tmp |= lrmin; + } + if (lrmax) { + tmp &= ~(0xf << 12); + tmp |= lrmax; + } + mvs_write_phy_ctl(mvi, phy_id, tmp); + mvs_64xx_phy_reset(mvi, phy_id, 1); +} + +static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi) +{ + u32 tmp; + void __iomem *regs = mvi->regs; + tmp = mr32(MVS_PCS); + mw32(MVS_PCS, tmp & 0xFFFF); + mw32(MVS_PCS, tmp); + tmp = mr32(MVS_CTL); + mw32(MVS_CTL, tmp & 0xFFFF); + mw32(MVS_CTL, tmp); +} + + +u32 mvs_64xx_spi_read_data(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs_ex; + return ior32(SPI_DATA_REG_64XX); +} + +void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data) +{ + void __iomem *regs = mvi->regs_ex; + iow32(SPI_DATA_REG_64XX, data); +} + + +int mvs_64xx_spi_buildcmd(struct mvs_info *mvi, + u32 *dwCmd, + u8 cmd, + u8 read, + u8 length, + u32 addr + ) +{ + u32 dwTmp; + + dwTmp = ((u32)cmd << 24) | ((u32)length << 19); + if (read) + dwTmp |= 1U<<23; + + if (addr != MV_MAX_U32) { + dwTmp |= 1U<<22; + dwTmp |= (addr & 0x0003FFFF); + } + + *dwCmd = dwTmp; + return 0; +} + + +int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd) +{ + void __iomem *regs = mvi->regs_ex; + int retry; + + for (retry = 0; retry < 1; retry++) { + iow32(SPI_CTRL_REG_64XX, SPI_CTRL_VENDOR_ENABLE); + iow32(SPI_CMD_REG_64XX, cmd); + iow32(SPI_CTRL_REG_64XX, + SPI_CTRL_VENDOR_ENABLE | SPI_CTRL_SPISTART); + } + + return 0; +} + +int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout) +{ + void __iomem *regs = mvi->regs_ex; + u32 i, dwTmp; + + for (i = 0; i < timeout; i++) { + dwTmp = ior32(SPI_CTRL_REG_64XX); + if (!(dwTmp & SPI_CTRL_SPISTART)) + return 0; + msleep(10); + } + + return -1; +} + +#ifndef DISABLE_HOTPLUG_DMA_FIX +void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd) +{ + int i; + struct mvs_prd *buf_prd = prd; + buf_prd += from; + for (i = 0; i < MAX_SG_ENTRY - from; i++) { + buf_prd->addr = cpu_to_le64(buf_dma); + buf_prd->len = cpu_to_le32(buf_len); + ++buf_prd; + } +} +#endif + +const struct mvs_dispatch mvs_64xx_dispatch = { + "mv64xx", + mvs_64xx_init, + NULL, + mvs_64xx_ioremap, + mvs_64xx_iounmap, + mvs_64xx_isr, + mvs_64xx_isr_status, + mvs_64xx_interrupt_enable, + mvs_64xx_interrupt_disable, + mvs_read_phy_ctl, + mvs_write_phy_ctl, + mvs_read_port_cfg_data, + mvs_write_port_cfg_data, + mvs_write_port_cfg_addr, + mvs_read_port_vsr_data, + mvs_write_port_vsr_data, + mvs_write_port_vsr_addr, + mvs_read_port_irq_stat, + mvs_write_port_irq_stat, + mvs_read_port_irq_mask, + mvs_write_port_irq_mask, + mvs_get_sas_addr, + mvs_64xx_command_active, + mvs_64xx_issue_stop, + mvs_start_delivery, + mvs_rx_update, + mvs_int_full, + mvs_64xx_assign_reg_set, + mvs_64xx_free_reg_set, + mvs_get_prd_size, + mvs_get_prd_count, + mvs_64xx_make_prd, + mvs_64xx_detect_porttype, + mvs_64xx_oob_done, + mvs_64xx_fix_phy_info, + mvs_64xx_phy_work_around, + mvs_64xx_phy_set_link_rate, + mvs_hw_max_link_rate, + mvs_64xx_phy_disable, + mvs_64xx_phy_enable, + mvs_64xx_phy_reset, + mvs_64xx_stp_reset, + mvs_64xx_clear_active_cmds, + mvs_64xx_spi_read_data, + mvs_64xx_spi_write_data, + mvs_64xx_spi_buildcmd, + mvs_64xx_spi_issuecmd, + mvs_64xx_spi_waitdataready, +#ifndef DISABLE_HOTPLUG_DMA_FIX + mvs_64xx_fix_dma, +#endif +}; + diff --git a/drivers/scsi/mvsas/mv_64xx.h b/drivers/scsi/mvsas/mv_64xx.h index c9f399ebc926..42e947d9795e 100644 --- a/drivers/scsi/mvsas/mv_64xx.h +++ b/drivers/scsi/mvsas/mv_64xx.h @@ -1,11 +1,43 @@ +/* + * Marvell 88SE64xx hardware specific head file + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA +*/ + #ifndef _MVS64XX_REG_H_ #define _MVS64XX_REG_H_ +#include + +#define MAX_LINK_RATE SAS_LINK_RATE_3_0_GBPS + /* enhanced mode registers (BAR4) */ enum hw_registers { MVS_GBL_CTL = 0x04, /* global control */ MVS_GBL_INT_STAT = 0x08, /* global irq status */ MVS_GBL_PI = 0x0C, /* ports implemented bitmask */ + + MVS_PHY_CTL = 0x40, /* SOC PHY Control */ + MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */ + MVS_GBL_PORT_TYPE = 0xa0, /* port type */ MVS_CTL = 0x100, /* SAS/SATA port configuration */ @@ -30,17 +62,19 @@ enum hw_registers { MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */ MVS_INT_STAT = 0x150, /* Central int status */ MVS_INT_MASK = 0x154, /* Central int enable */ - MVS_INT_STAT_SRS = 0x158, /* SATA register set status */ - MVS_INT_MASK_SRS = 0x15C, + MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */ + MVS_INT_MASK_SRS_0 = 0x15C, /* ports 1-3 follow after this */ MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */ MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */ - MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */ - MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */ + /* ports 5-7 follow after this */ + MVS_P4_INT_STAT = 0x200, /* Port4 interrupt status */ + MVS_P4_INT_MASK = 0x204, /* Port4 interrupt enable mask */ /* ports 1-3 follow after this */ MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */ + /* ports 5-7 follow after this */ MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */ MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */ @@ -49,20 +83,23 @@ enum hw_registers { /* ports 1-3 follow after this */ MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */ MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */ - MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */ - MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */ + /* ports 5-7 follow after this */ + MVS_P4_CFG_ADDR = 0x230, /* Port4 config address */ + MVS_P4_CFG_DATA = 0x234, /* Port4 config data */ /* ports 1-3 follow after this */ MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */ MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */ - MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */ - MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */ + /* ports 5-7 follow after this */ + MVS_P4_VSR_ADDR = 0x250, /* port4 VSR addr */ + MVS_P4_VSR_DATA = 0x254, /* port4 VSR data */ }; enum pci_cfg_registers { PCR_PHY_CTL = 0x40, PCR_PHY_CTL2 = 0x90, PCR_DEV_CTRL = 0xE8, + PCR_LINK_STAT = 0xF2, }; /* SAS/SATA Vendor Specific Port Registers */ @@ -83,10 +120,32 @@ enum sas_sata_vsp_regs { VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */ }; +enum chip_register_bits { + PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8), + PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12), + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16), + PHY_NEG_SPP_PHYS_LINK_RATE_MASK = + (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), +}; + +#define MAX_SG_ENTRY 64 + struct mvs_prd { __le64 addr; /* 64-bit buffer address */ __le32 reserved; __le32 len; /* 16-bit length */ }; +#define SPI_CTRL_REG 0xc0 +#define SPI_CTRL_VENDOR_ENABLE (1U<<29) +#define SPI_CTRL_SPIRDY (1U<<22) +#define SPI_CTRL_SPISTART (1U<<20) + +#define SPI_CMD_REG 0xc4 +#define SPI_DATA_REG 0xc8 + +#define SPI_CTRL_REG_64XX 0x10 +#define SPI_CMD_REG_64XX 0x14 +#define SPI_DATA_REG_64XX 0x18 + #endif diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c new file mode 100644 index 000000000000..0940fae19d20 --- /dev/null +++ b/drivers/scsi/mvsas/mv_94xx.c @@ -0,0 +1,672 @@ +/* + * Marvell 88SE94xx hardware specific + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA +*/ + +#include "mv_sas.h" +#include "mv_94xx.h" +#include "mv_chips.h" + +static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i) +{ + u32 reg; + struct mvs_phy *phy = &mvi->phy[i]; + u32 phy_status; + + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE3); + reg = mvs_read_port_vsr_data(mvi, i); + phy_status = ((reg & 0x3f0000) >> 16) & 0xff; + phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); + switch (phy_status) { + case 0x10: + phy->phy_type |= PORT_TYPE_SAS; + break; + case 0x1d: + default: + phy->phy_type |= PORT_TYPE_SATA; + break; + } +} + +static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + tmp = mr32(MVS_PCS); + tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2); + mw32(MVS_PCS, tmp); +} + +static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard) +{ + u32 tmp; + + tmp = mvs_read_port_irq_stat(mvi, phy_id); + tmp &= ~PHYEV_RDY_CH; + mvs_write_port_irq_stat(mvi, phy_id, tmp); + if (hard) { + tmp = mvs_read_phy_ctl(mvi, phy_id); + tmp |= PHY_RST_HARD; + mvs_write_phy_ctl(mvi, phy_id, tmp); + do { + tmp = mvs_read_phy_ctl(mvi, phy_id); + } while (tmp & PHY_RST_HARD); + } else { + mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_STAT); + tmp = mvs_read_port_vsr_data(mvi, phy_id); + tmp |= PHY_RST; + mvs_write_port_vsr_data(mvi, phy_id, tmp); + } +} + +static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id) +{ + u32 tmp; + mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2); + tmp = mvs_read_port_vsr_data(mvi, phy_id); + mvs_write_port_vsr_data(mvi, phy_id, tmp | 0x00800000); +} + +static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id) +{ + mvs_write_port_vsr_addr(mvi, phy_id, 0x1B4); + mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1); + mvs_write_port_vsr_addr(mvi, phy_id, 0x104); + mvs_write_port_vsr_data(mvi, phy_id, 0x00018080); + mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2); + mvs_write_port_vsr_data(mvi, phy_id, 0x00207fff); +} + +static int __devinit mvs_94xx_init(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + int i; + u32 tmp, cctl; + + mvs_show_pcie_usage(mvi); + if (mvi->flags & MVF_FLAG_SOC) { + tmp = mr32(MVS_PHY_CTL); + tmp &= ~PCTL_PWR_OFF; + tmp |= PCTL_PHY_DSBL; + mw32(MVS_PHY_CTL, tmp); + } + + /* Init Chip */ + /* make sure RST is set; HBA_RST /should/ have done that for us */ + cctl = mr32(MVS_CTL) & 0xFFFF; + if (cctl & CCTL_RST) + cctl &= ~CCTL_RST; + else + mw32_f(MVS_CTL, cctl | CCTL_RST); + + if (mvi->flags & MVF_FLAG_SOC) { + tmp = mr32(MVS_PHY_CTL); + tmp &= ~PCTL_PWR_OFF; + tmp |= PCTL_COM_ON; + tmp &= ~PCTL_PHY_DSBL; + tmp |= PCTL_LINK_RST; + mw32(MVS_PHY_CTL, tmp); + msleep(100); + tmp &= ~PCTL_LINK_RST; + mw32(MVS_PHY_CTL, tmp); + msleep(100); + } + + /* reset control */ + mw32(MVS_PCS, 0); /* MVS_PCS */ + mw32(MVS_STP_REG_SET_0, 0); + mw32(MVS_STP_REG_SET_1, 0); + + /* init phys */ + mvs_phy_hacks(mvi); + + /* disable Multiplexing, enable phy implemented */ + mw32(MVS_PORTS_IMP, 0xFF); + + + mw32(MVS_PA_VSR_ADDR, 0x00000104); + mw32(MVS_PA_VSR_PORT, 0x00018080); + mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE8); + mw32(MVS_PA_VSR_PORT, 0x0084ffff); + + /* set LED blink when IO*/ + mw32(MVS_PA_VSR_ADDR, 0x00000030); + tmp = mr32(MVS_PA_VSR_PORT); + tmp &= 0xFFFF00FF; + tmp |= 0x00003300; + mw32(MVS_PA_VSR_PORT, tmp); + + mw32(MVS_CMD_LIST_LO, mvi->slot_dma); + mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16); + + mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma); + mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16); + + mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ); + mw32(MVS_TX_LO, mvi->tx_dma); + mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16); + + mw32(MVS_RX_CFG, MVS_RX_RING_SZ); + mw32(MVS_RX_LO, mvi->rx_dma); + mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16); + + for (i = 0; i < mvi->chip->n_phy; i++) { + mvs_94xx_phy_disable(mvi, i); + /* set phy local SAS address */ + mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4, + (mvi->phy[i].dev_sas_addr)); + + mvs_94xx_enable_xmt(mvi, i); + mvs_94xx_phy_enable(mvi, i); + + mvs_94xx_phy_reset(mvi, i, 1); + msleep(500); + mvs_94xx_detect_porttype(mvi, i); + } + + if (mvi->flags & MVF_FLAG_SOC) { + /* set select registers */ + writel(0x0E008000, regs + 0x000); + writel(0x59000008, regs + 0x004); + writel(0x20, regs + 0x008); + writel(0x20, regs + 0x00c); + writel(0x20, regs + 0x010); + writel(0x20, regs + 0x014); + writel(0x20, regs + 0x018); + writel(0x20, regs + 0x01c); + } + for (i = 0; i < mvi->chip->n_phy; i++) { + /* clear phy int status */ + tmp = mvs_read_port_irq_stat(mvi, i); + tmp &= ~PHYEV_SIG_FIS; + mvs_write_port_irq_stat(mvi, i, tmp); + + /* set phy int mask */ + tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | + PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR ; + mvs_write_port_irq_mask(mvi, i, tmp); + + msleep(100); + mvs_update_phyinfo(mvi, i, 1); + } + + /* FIXME: update wide port bitmaps */ + + /* little endian for open address and command table, etc. */ + /* + * it seems that ( from the spec ) turning on big-endian won't + * do us any good on big-endian machines, need further confirmation + */ + cctl = mr32(MVS_CTL); + cctl |= CCTL_ENDIAN_CMD; + cctl |= CCTL_ENDIAN_DATA; + cctl &= ~CCTL_ENDIAN_OPEN; + cctl |= CCTL_ENDIAN_RSP; + mw32_f(MVS_CTL, cctl); + + /* reset CMD queue */ + tmp = mr32(MVS_PCS); + tmp |= PCS_CMD_RST; + mw32(MVS_PCS, tmp); + /* interrupt coalescing may cause missing HW interrput in some case, + * and the max count is 0x1ff, while our max slot is 0x200, + * it will make count 0. + */ + tmp = 0; + mw32(MVS_INT_COAL, tmp); + + tmp = 0x100; + mw32(MVS_INT_COAL_TMOUT, tmp); + + /* ladies and gentlemen, start your engines */ + mw32(MVS_TX_CFG, 0); + mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN); + mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN); + /* enable CMD/CMPL_Q/RESP mode */ + mw32(MVS_PCS, PCS_SATA_RETRY_2 | PCS_FIS_RX_EN | + PCS_CMD_EN | PCS_CMD_STOP_ERR); + + /* enable completion queue interrupt */ + tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP | + CINT_DMA_PCIE); + tmp |= CINT_PHY_MASK; + mw32(MVS_INT_MASK, tmp); + + /* Enable SRS interrupt */ + mw32(MVS_INT_MASK_SRS_0, 0xFFFF); + + return 0; +} + +static int mvs_94xx_ioremap(struct mvs_info *mvi) +{ + if (!mvs_ioremap(mvi, 2, -1)) { + mvi->regs_ex = mvi->regs + 0x10200; + mvi->regs += 0x20000; + if (mvi->id == 1) + mvi->regs += 0x4000; + return 0; + } + return -1; +} + +static void mvs_94xx_iounmap(struct mvs_info *mvi) +{ + if (mvi->regs) { + mvi->regs -= 0x20000; + if (mvi->id == 1) + mvi->regs -= 0x4000; + mvs_iounmap(mvi->regs); + } +} + +static void mvs_94xx_interrupt_enable(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs_ex; + u32 tmp; + + tmp = mr32(MVS_GBL_CTL); + tmp |= (IRQ_SAS_A | IRQ_SAS_B); + mw32(MVS_GBL_INT_STAT, tmp); + writel(tmp, regs + 0x0C); + writel(tmp, regs + 0x10); + writel(tmp, regs + 0x14); + writel(tmp, regs + 0x18); + mw32(MVS_GBL_CTL, tmp); +} + +static void mvs_94xx_interrupt_disable(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs_ex; + u32 tmp; + + tmp = mr32(MVS_GBL_CTL); + + tmp &= ~(IRQ_SAS_A | IRQ_SAS_B); + mw32(MVS_GBL_INT_STAT, tmp); + writel(tmp, regs + 0x0C); + writel(tmp, regs + 0x10); + writel(tmp, regs + 0x14); + writel(tmp, regs + 0x18); + mw32(MVS_GBL_CTL, tmp); +} + +static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq) +{ + void __iomem *regs = mvi->regs_ex; + u32 stat = 0; + if (!(mvi->flags & MVF_FLAG_SOC)) { + stat = mr32(MVS_GBL_INT_STAT); + + if (!(stat & (IRQ_SAS_A | IRQ_SAS_B))) + return 0; + } + return stat; +} + +static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat) +{ + void __iomem *regs = mvi->regs; + + if (((stat & IRQ_SAS_A) && mvi->id == 0) || + ((stat & IRQ_SAS_B) && mvi->id == 1)) { + mw32_f(MVS_INT_STAT, CINT_DONE); + #ifndef MVS_USE_TASKLET + spin_lock(&mvi->lock); + #endif + mvs_int_full(mvi); + #ifndef MVS_USE_TASKLET + spin_unlock(&mvi->lock); + #endif + } + return IRQ_HANDLED; +} + +static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx) +{ + u32 tmp; + mvs_cw32(mvi, 0x300 + (slot_idx >> 3), 1 << (slot_idx % 32)); + do { + tmp = mvs_cr32(mvi, 0x300 + (slot_idx >> 3)); + } while (tmp & 1 << (slot_idx % 32)); +} + +static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type, + u32 tfs) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + if (type == PORT_TYPE_SATA) { + tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs); + mw32(MVS_INT_STAT_SRS_0, tmp); + } + mw32(MVS_INT_STAT, CINT_CI_STOP); + tmp = mr32(MVS_PCS) | 0xFF00; + mw32(MVS_PCS, tmp); +} + +static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + u8 reg_set = *tfs; + + if (*tfs == MVS_ID_NOT_MAPPED) + return; + + mvi->sata_reg_set &= ~bit(reg_set); + if (reg_set < 32) { + w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set); + tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set; + if (tmp) + mw32(MVS_INT_STAT_SRS_0, tmp); + } else { + w_reg_set_enable(reg_set, mvi->sata_reg_set); + tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set; + if (tmp) + mw32(MVS_INT_STAT_SRS_1, tmp); + } + + *tfs = MVS_ID_NOT_MAPPED; + + return; +} + +static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs) +{ + int i; + void __iomem *regs = mvi->regs; + + if (*tfs != MVS_ID_NOT_MAPPED) + return 0; + + i = mv_ffc64(mvi->sata_reg_set); + if (i > 32) { + mvi->sata_reg_set |= bit(i); + w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32)); + *tfs = i; + return 0; + } else if (i >= 0) { + mvi->sata_reg_set |= bit(i); + w_reg_set_enable(i, (u32)mvi->sata_reg_set); + *tfs = i; + return 0; + } + return MVS_ID_NOT_MAPPED; +} + +static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd) +{ + int i; + struct scatterlist *sg; + struct mvs_prd *buf_prd = prd; + for_each_sg(scatter, sg, nr, i) { + buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); + buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg)); + buf_prd++; + } +} + +static int mvs_94xx_oob_done(struct mvs_info *mvi, int i) +{ + u32 phy_st; + phy_st = mvs_read_phy_ctl(mvi, i); + if (phy_st & PHY_READY_MASK) /* phy ready */ + return 1; + return 0; +} + +static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id, + struct sas_identify_frame *id) +{ + int i; + u32 id_frame[7]; + + for (i = 0; i < 7; i++) { + mvs_write_port_cfg_addr(mvi, port_id, + CONFIG_ID_FRAME0 + i * 4); + id_frame[i] = mvs_read_port_cfg_data(mvi, port_id); + } + memcpy(id, id_frame, 28); +} + +static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id, + struct sas_identify_frame *id) +{ + int i; + u32 id_frame[7]; + + /* mvs_hexdump(28, (u8 *)id_frame, 0); */ + for (i = 0; i < 7; i++) { + mvs_write_port_cfg_addr(mvi, port_id, + CONFIG_ATT_ID_FRAME0 + i * 4); + id_frame[i] = mvs_read_port_cfg_data(mvi, port_id); + mv_dprintk("94xx phy %d atta frame %d %x.\n", + port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]); + } + /* mvs_hexdump(28, (u8 *)id_frame, 0); */ + memcpy(id, id_frame, 28); +} + +static u32 mvs_94xx_make_dev_info(struct sas_identify_frame *id) +{ + u32 att_dev_info = 0; + + att_dev_info |= id->dev_type; + if (id->stp_iport) + att_dev_info |= PORT_DEV_STP_INIT; + if (id->smp_iport) + att_dev_info |= PORT_DEV_SMP_INIT; + if (id->ssp_iport) + att_dev_info |= PORT_DEV_SSP_INIT; + if (id->stp_tport) + att_dev_info |= PORT_DEV_STP_TRGT; + if (id->smp_tport) + att_dev_info |= PORT_DEV_SMP_TRGT; + if (id->ssp_tport) + att_dev_info |= PORT_DEV_SSP_TRGT; + + att_dev_info |= (u32)id->phy_id<<24; + return att_dev_info; +} + +static u32 mvs_94xx_make_att_info(struct sas_identify_frame *id) +{ + return mvs_94xx_make_dev_info(id); +} + +static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i, + struct sas_identify_frame *id) +{ + struct mvs_phy *phy = &mvi->phy[i]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + mv_dprintk("get all reg link rate is 0x%x\n", phy->phy_status); + sas_phy->linkrate = + (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; + sas_phy->linkrate += 0x8; + mv_dprintk("get link rate is %d\n", sas_phy->linkrate); + phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; + phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS; + mvs_94xx_get_dev_identify_frame(mvi, i, id); + phy->dev_info = mvs_94xx_make_dev_info(id); + + if (phy->phy_type & PORT_TYPE_SAS) { + mvs_94xx_get_att_identify_frame(mvi, i, id); + phy->att_dev_info = mvs_94xx_make_att_info(id); + phy->att_dev_sas_addr = *(u64 *)id->sas_addr; + } else { + phy->att_dev_info = PORT_DEV_STP_TRGT | 1; + } + +} + +void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id, + struct sas_phy_linkrates *rates) +{ + /* TODO */ +} + +static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi) +{ + u32 tmp; + void __iomem *regs = mvi->regs; + tmp = mr32(MVS_STP_REG_SET_0); + mw32(MVS_STP_REG_SET_0, 0); + mw32(MVS_STP_REG_SET_0, tmp); + tmp = mr32(MVS_STP_REG_SET_1); + mw32(MVS_STP_REG_SET_1, 0); + mw32(MVS_STP_REG_SET_1, tmp); +} + + +u32 mvs_94xx_spi_read_data(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs_ex - 0x10200; + return mr32(SPI_RD_DATA_REG_94XX); +} + +void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data) +{ + void __iomem *regs = mvi->regs_ex - 0x10200; + mw32(SPI_RD_DATA_REG_94XX, data); +} + + +int mvs_94xx_spi_buildcmd(struct mvs_info *mvi, + u32 *dwCmd, + u8 cmd, + u8 read, + u8 length, + u32 addr + ) +{ + void __iomem *regs = mvi->regs_ex - 0x10200; + u32 dwTmp; + + dwTmp = ((u32)cmd << 8) | ((u32)length << 4); + if (read) + dwTmp |= SPI_CTRL_READ_94XX; + + if (addr != MV_MAX_U32) { + mw32(SPI_ADDR_REG_94XX, (addr & 0x0003FFFFL)); + dwTmp |= SPI_ADDR_VLD_94XX; + } + + *dwCmd = dwTmp; + return 0; +} + + +int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd) +{ + void __iomem *regs = mvi->regs_ex - 0x10200; + mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX); + + return 0; +} + +int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout) +{ + void __iomem *regs = mvi->regs_ex - 0x10200; + u32 i, dwTmp; + + for (i = 0; i < timeout; i++) { + dwTmp = mr32(SPI_CTRL_REG_94XX); + if (!(dwTmp & SPI_CTRL_SpiStart_94XX)) + return 0; + msleep(10); + } + + return -1; +} + +#ifndef DISABLE_HOTPLUG_DMA_FIX +void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd) +{ + int i; + struct mvs_prd *buf_prd = prd; + buf_prd += from; + for (i = 0; i < MAX_SG_ENTRY - from; i++) { + buf_prd->addr = cpu_to_le64(buf_dma); + buf_prd->im_len.len = cpu_to_le32(buf_len); + ++buf_prd; + } +} +#endif + +const struct mvs_dispatch mvs_94xx_dispatch = { + "mv94xx", + mvs_94xx_init, + NULL, + mvs_94xx_ioremap, + mvs_94xx_iounmap, + mvs_94xx_isr, + mvs_94xx_isr_status, + mvs_94xx_interrupt_enable, + mvs_94xx_interrupt_disable, + mvs_read_phy_ctl, + mvs_write_phy_ctl, + mvs_read_port_cfg_data, + mvs_write_port_cfg_data, + mvs_write_port_cfg_addr, + mvs_read_port_vsr_data, + mvs_write_port_vsr_data, + mvs_write_port_vsr_addr, + mvs_read_port_irq_stat, + mvs_write_port_irq_stat, + mvs_read_port_irq_mask, + mvs_write_port_irq_mask, + mvs_get_sas_addr, + mvs_94xx_command_active, + mvs_94xx_issue_stop, + mvs_start_delivery, + mvs_rx_update, + mvs_int_full, + mvs_94xx_assign_reg_set, + mvs_94xx_free_reg_set, + mvs_get_prd_size, + mvs_get_prd_count, + mvs_94xx_make_prd, + mvs_94xx_detect_porttype, + mvs_94xx_oob_done, + mvs_94xx_fix_phy_info, + NULL, + mvs_94xx_phy_set_link_rate, + mvs_hw_max_link_rate, + mvs_94xx_phy_disable, + mvs_94xx_phy_enable, + mvs_94xx_phy_reset, + NULL, + mvs_94xx_clear_active_cmds, + mvs_94xx_spi_read_data, + mvs_94xx_spi_write_data, + mvs_94xx_spi_buildcmd, + mvs_94xx_spi_issuecmd, + mvs_94xx_spi_waitdataready, +#ifndef DISABLE_HOTPLUG_DMA_FIX + mvs_94xx_fix_dma, +#endif +}; + diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h new file mode 100644 index 000000000000..23ed9b164669 --- /dev/null +++ b/drivers/scsi/mvsas/mv_94xx.h @@ -0,0 +1,222 @@ +/* + * Marvell 88SE94xx hardware specific head file + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA +*/ + +#ifndef _MVS94XX_REG_H_ +#define _MVS94XX_REG_H_ + +#include + +#define MAX_LINK_RATE SAS_LINK_RATE_6_0_GBPS + +enum hw_registers { + MVS_GBL_CTL = 0x04, /* global control */ + MVS_GBL_INT_STAT = 0x00, /* global irq status */ + MVS_GBL_PI = 0x0C, /* ports implemented bitmask */ + + MVS_PHY_CTL = 0x40, /* SOC PHY Control */ + MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */ + + MVS_GBL_PORT_TYPE = 0xa0, /* port type */ + + MVS_CTL = 0x100, /* SAS/SATA port configuration */ + MVS_PCS = 0x104, /* SAS/SATA port control/status */ + MVS_CMD_LIST_LO = 0x108, /* cmd list addr */ + MVS_CMD_LIST_HI = 0x10C, + MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */ + MVS_RX_FIS_HI = 0x114, + MVS_STP_REG_SET_0 = 0x118, /* STP/SATA Register Set Enable */ + MVS_STP_REG_SET_1 = 0x11C, + MVS_TX_CFG = 0x120, /* TX configuration */ + MVS_TX_LO = 0x124, /* TX (delivery) ring addr */ + MVS_TX_HI = 0x128, + + MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */ + MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */ + MVS_RX_CFG = 0x134, /* RX configuration */ + MVS_RX_LO = 0x138, /* RX (completion) ring addr */ + MVS_RX_HI = 0x13C, + MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */ + + MVS_INT_COAL = 0x148, /* Int coalescing config */ + MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */ + MVS_INT_STAT = 0x150, /* Central int status */ + MVS_INT_MASK = 0x154, /* Central int enable */ + MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */ + MVS_INT_MASK_SRS_0 = 0x15C, + MVS_INT_STAT_SRS_1 = 0x160, + MVS_INT_MASK_SRS_1 = 0x164, + MVS_NON_NCQ_ERR_0 = 0x168, /* SRS Non-specific NCQ Error */ + MVS_NON_NCQ_ERR_1 = 0x16C, + MVS_CMD_ADDR = 0x170, /* Command register port (addr) */ + MVS_CMD_DATA = 0x174, /* Command register port (data) */ + MVS_MEM_PARITY_ERR = 0x178, /* Memory parity error */ + + /* ports 1-3 follow after this */ + MVS_P0_INT_STAT = 0x180, /* port0 interrupt status */ + MVS_P0_INT_MASK = 0x184, /* port0 interrupt mask */ + /* ports 5-7 follow after this */ + MVS_P4_INT_STAT = 0x1A0, /* Port4 interrupt status */ + MVS_P4_INT_MASK = 0x1A4, /* Port4 interrupt enable mask */ + + /* ports 1-3 follow after this */ + MVS_P0_SER_CTLSTAT = 0x1D0, /* port0 serial control/status */ + /* ports 5-7 follow after this */ + MVS_P4_SER_CTLSTAT = 0x1E0, /* port4 serial control/status */ + + /* ports 1-3 follow after this */ + MVS_P0_CFG_ADDR = 0x200, /* port0 phy register address */ + MVS_P0_CFG_DATA = 0x204, /* port0 phy register data */ + /* ports 5-7 follow after this */ + MVS_P4_CFG_ADDR = 0x220, /* Port4 config address */ + MVS_P4_CFG_DATA = 0x224, /* Port4 config data */ + + /* phys 1-3 follow after this */ + MVS_P0_VSR_ADDR = 0x250, /* phy0 VSR address */ + MVS_P0_VSR_DATA = 0x254, /* phy0 VSR data */ + /* phys 1-3 follow after this */ + /* multiplexing */ + MVS_P4_VSR_ADDR = 0x250, /* phy4 VSR address */ + MVS_P4_VSR_DATA = 0x254, /* phy4 VSR data */ + MVS_PA_VSR_ADDR = 0x290, /* All port VSR addr */ + MVS_PA_VSR_PORT = 0x294, /* All port VSR data */ +}; + +enum pci_cfg_registers { + PCR_PHY_CTL = 0x40, + PCR_PHY_CTL2 = 0x90, + PCR_DEV_CTRL = 0x78, + PCR_LINK_STAT = 0x82, +}; + +/* SAS/SATA Vendor Specific Port Registers */ +enum sas_sata_vsp_regs { + VSR_PHY_STAT = 0x00 * 4, /* Phy Status */ + VSR_PHY_MODE1 = 0x01 * 4, /* phy tx */ + VSR_PHY_MODE2 = 0x02 * 4, /* tx scc */ + VSR_PHY_MODE3 = 0x03 * 4, /* pll */ + VSR_PHY_MODE4 = 0x04 * 4, /* VCO */ + VSR_PHY_MODE5 = 0x05 * 4, /* Rx */ + VSR_PHY_MODE6 = 0x06 * 4, /* CDR */ + VSR_PHY_MODE7 = 0x07 * 4, /* Impedance */ + VSR_PHY_MODE8 = 0x08 * 4, /* Voltage */ + VSR_PHY_MODE9 = 0x09 * 4, /* Test */ + VSR_PHY_MODE10 = 0x0A * 4, /* Power */ + VSR_PHY_MODE11 = 0x0B * 4, /* Phy Mode */ + VSR_PHY_VS0 = 0x0C * 4, /* Vednor Specific 0 */ + VSR_PHY_VS1 = 0x0D * 4, /* Vednor Specific 1 */ +}; + +enum chip_register_bits { + PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8), + PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8), + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (12), + PHY_NEG_SPP_PHYS_LINK_RATE_MASK = + (0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), +}; + +enum pci_interrupt_cause { + /* MAIN_IRQ_CAUSE (R10200) Bits*/ + IRQ_COM_IN_I2O_IOP0 = (1 << 0), + IRQ_COM_IN_I2O_IOP1 = (1 << 1), + IRQ_COM_IN_I2O_IOP2 = (1 << 2), + IRQ_COM_IN_I2O_IOP3 = (1 << 3), + IRQ_COM_OUT_I2O_HOS0 = (1 << 4), + IRQ_COM_OUT_I2O_HOS1 = (1 << 5), + IRQ_COM_OUT_I2O_HOS2 = (1 << 6), + IRQ_COM_OUT_I2O_HOS3 = (1 << 7), + IRQ_PCIF_TO_CPU_DRBL0 = (1 << 8), + IRQ_PCIF_TO_CPU_DRBL1 = (1 << 9), + IRQ_PCIF_TO_CPU_DRBL2 = (1 << 10), + IRQ_PCIF_TO_CPU_DRBL3 = (1 << 11), + IRQ_PCIF_DRBL0 = (1 << 12), + IRQ_PCIF_DRBL1 = (1 << 13), + IRQ_PCIF_DRBL2 = (1 << 14), + IRQ_PCIF_DRBL3 = (1 << 15), + IRQ_XOR_A = (1 << 16), + IRQ_XOR_B = (1 << 17), + IRQ_SAS_A = (1 << 18), + IRQ_SAS_B = (1 << 19), + IRQ_CPU_CNTRL = (1 << 20), + IRQ_GPIO = (1 << 21), + IRQ_UART = (1 << 22), + IRQ_SPI = (1 << 23), + IRQ_I2C = (1 << 24), + IRQ_SGPIO = (1 << 25), + IRQ_COM_ERR = (1 << 29), + IRQ_I2O_ERR = (1 << 30), + IRQ_PCIE_ERR = (1 << 31), +}; + +#define MAX_SG_ENTRY 255 + +struct mvs_prd_imt { + __le32 len:22; + u8 _r_a:2; + u8 misc_ctl:4; + u8 inter_sel:4; +}; + +struct mvs_prd { + /* 64-bit buffer address */ + __le64 addr; + /* 22-bit length */ + struct mvs_prd_imt im_len; +} __attribute__ ((packed)); + +#define SPI_CTRL_REG_94XX 0xc800 +#define SPI_ADDR_REG_94XX 0xc804 +#define SPI_WR_DATA_REG_94XX 0xc808 +#define SPI_RD_DATA_REG_94XX 0xc80c +#define SPI_CTRL_READ_94XX (1U << 2) +#define SPI_ADDR_VLD_94XX (1U << 1) +#define SPI_CTRL_SpiStart_94XX (1U << 0) + +#define mv_ffc(x) ffz(x) + +static inline int +mv_ffc64(u64 v) +{ + int i; + i = mv_ffc((u32)v); + if (i >= 0) + return i; + i = mv_ffc((u32)(v>>32)); + + if (i != 0) + return 32 + i; + + return -1; +} + +#define r_reg_set_enable(i) \ + (((i) > 31) ? mr32(MVS_STP_REG_SET_1) : \ + mr32(MVS_STP_REG_SET_0)) + +#define w_reg_set_enable(i, tmp) \ + (((i) > 31) ? mw32(MVS_STP_REG_SET_1, tmp) : \ + mw32(MVS_STP_REG_SET_0, tmp)) + +extern const struct mvs_dispatch mvs_94xx_dispatch; +#endif + diff --git a/drivers/scsi/mvsas/mv_chips.h b/drivers/scsi/mvsas/mv_chips.h index cf74b7a3f643..a67e1c4172f9 100644 --- a/drivers/scsi/mvsas/mv_chips.h +++ b/drivers/scsi/mvsas/mv_chips.h @@ -1,46 +1,81 @@ +/* + * Marvell 88SE64xx/88SE94xx register IO interface + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA +*/ + + #ifndef _MV_CHIPS_H_ #define _MV_CHIPS_H_ -#define mr32(reg) readl(regs + MVS_##reg) -#define mw32(reg,val) writel((val), regs + MVS_##reg) -#define mw32_f(reg,val) do { \ - writel((val), regs + MVS_##reg); \ - readl(regs + MVS_##reg); \ - } while (0) +#define mr32(reg) readl(regs + reg) +#define mw32(reg, val) writel((val), regs + reg) +#define mw32_f(reg, val) do { \ + mw32(reg, val); \ + mr32(reg); \ + } while (0) -static inline u32 mvs_cr32(void __iomem *regs, u32 addr) +#define iow32(reg, val) outl(val, (unsigned long)(regs + reg)) +#define ior32(reg) inl((unsigned long)(regs + reg)) +#define iow16(reg, val) outw((unsigned long)(val, regs + reg)) +#define ior16(reg) inw((unsigned long)(regs + reg)) +#define iow8(reg, val) outb((unsigned long)(val, regs + reg)) +#define ior8(reg) inb((unsigned long)(regs + reg)) + +static inline u32 mvs_cr32(struct mvs_info *mvi, u32 addr) { - mw32(CMD_ADDR, addr); - return mr32(CMD_DATA); + void __iomem *regs = mvi->regs; + mw32(MVS_CMD_ADDR, addr); + return mr32(MVS_CMD_DATA); } -static inline void mvs_cw32(void __iomem *regs, u32 addr, u32 val) +static inline void mvs_cw32(struct mvs_info *mvi, u32 addr, u32 val) { - mw32(CMD_ADDR, addr); - mw32(CMD_DATA, val); + void __iomem *regs = mvi->regs; + mw32(MVS_CMD_ADDR, addr); + mw32(MVS_CMD_DATA, val); } static inline u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port) { void __iomem *regs = mvi->regs; - return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4): - mr32(P4_SER_CTLSTAT + (port - 4) * 4); + return (port < 4) ? mr32(MVS_P0_SER_CTLSTAT + port * 4) : + mr32(MVS_P4_SER_CTLSTAT + (port - 4) * 4); } static inline void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val) { void __iomem *regs = mvi->regs; if (port < 4) - mw32(P0_SER_CTLSTAT + port * 4, val); + mw32(MVS_P0_SER_CTLSTAT + port * 4, val); else - mw32(P4_SER_CTLSTAT + (port - 4) * 4, val); + mw32(MVS_P4_SER_CTLSTAT + (port - 4) * 4, val); } -static inline u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port) +static inline u32 mvs_read_port(struct mvs_info *mvi, u32 off, + u32 off2, u32 port) { void __iomem *regs = mvi->regs + off; void __iomem *regs2 = mvi->regs + off2; - return (port < 4)?readl(regs + port * 8): + return (port < 4) ? readl(regs + port * 8) : readl(regs2 + (port - 4) * 8); } @@ -61,16 +96,19 @@ static inline u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port) MVS_P4_CFG_DATA, port); } -static inline void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val) +static inline void mvs_write_port_cfg_data(struct mvs_info *mvi, + u32 port, u32 val) { mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val); } -static inline void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr) +static inline void mvs_write_port_cfg_addr(struct mvs_info *mvi, + u32 port, u32 addr) { mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr); + mdelay(10); } static inline u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port) @@ -79,16 +117,19 @@ static inline u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port) MVS_P4_VSR_DATA, port); } -static inline void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val) +static inline void mvs_write_port_vsr_data(struct mvs_info *mvi, + u32 port, u32 val) { mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val); } -static inline void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr) +static inline void mvs_write_port_vsr_addr(struct mvs_info *mvi, + u32 port, u32 addr) { mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr); + mdelay(10); } static inline u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port) @@ -97,7 +138,8 @@ static inline u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port) MVS_P4_INT_STAT, port); } -static inline void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val) +static inline void mvs_write_port_irq_stat(struct mvs_info *mvi, + u32 port, u32 val) { mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val); @@ -107,12 +149,132 @@ static inline u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port) { return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port); + } -static inline void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val) +static inline void mvs_write_port_irq_mask(struct mvs_info *mvi, + u32 port, u32 val) { mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val); } -#endif +static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi) +{ + u32 tmp; + + /* workaround for SATA R-ERR, to ignore phy glitch */ + tmp = mvs_cr32(mvi, CMD_PHY_TIMER); + tmp &= ~(1 << 9); + tmp |= (1 << 10); + mvs_cw32(mvi, CMD_PHY_TIMER, tmp); + + /* enable retry 127 times */ + mvs_cw32(mvi, CMD_SAS_CTL1, 0x7f7f); + + /* extend open frame timeout to max */ + tmp = mvs_cr32(mvi, CMD_SAS_CTL0); + tmp &= ~0xffff; + tmp |= 0x3fff; + mvs_cw32(mvi, CMD_SAS_CTL0, tmp); + + /* workaround for WDTIMEOUT , set to 550 ms */ + mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000); + + /* not to halt for different port op during wideport link change */ + mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d); + + /* workaround for Seagate disk not-found OOB sequence, recv + * COMINIT before sending out COMWAKE */ + tmp = mvs_cr32(mvi, CMD_PHY_MODE_21); + tmp &= 0x0000ffff; + tmp |= 0x00fa0000; + mvs_cw32(mvi, CMD_PHY_MODE_21, tmp); + + tmp = mvs_cr32(mvi, CMD_PHY_TIMER); + tmp &= 0x1fffffff; + tmp |= (2U << 29); /* 8 ms retry */ + mvs_cw32(mvi, CMD_PHY_TIMER, tmp); +} + +static inline void mvs_int_sata(struct mvs_info *mvi) +{ + u32 tmp; + void __iomem *regs = mvi->regs; + tmp = mr32(MVS_INT_STAT_SRS_0); + if (tmp) + mw32(MVS_INT_STAT_SRS_0, tmp); + MVS_CHIP_DISP->clear_active_cmds(mvi); +} + +static inline void mvs_int_full(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + u32 tmp, stat; + int i; + + stat = mr32(MVS_INT_STAT); + mvs_int_rx(mvi, false); + + for (i = 0; i < mvi->chip->n_phy; i++) { + tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED); + if (tmp) + mvs_int_port(mvi, i, tmp); + } + + if (stat & CINT_SRS) + mvs_int_sata(mvi); + + mw32(MVS_INT_STAT, stat); +} + +static inline void mvs_start_delivery(struct mvs_info *mvi, u32 tx) +{ + void __iomem *regs = mvi->regs; + mw32(MVS_TX_PROD_IDX, tx); +} + +static inline u32 mvs_rx_update(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + return mr32(MVS_RX_CONS_IDX); +} + +static inline u32 mvs_get_prd_size(void) +{ + return sizeof(struct mvs_prd); +} + +static inline u32 mvs_get_prd_count(void) +{ + return MAX_SG_ENTRY; +} + +static inline void mvs_show_pcie_usage(struct mvs_info *mvi) +{ + u16 link_stat, link_spd; + const char *spd[] = { + "UnKnown", + "2.5", + "5.0", + }; + if (mvi->flags & MVF_FLAG_SOC || mvi->id > 0) + return; + + pci_read_config_word(mvi->pdev, PCR_LINK_STAT, &link_stat); + link_spd = (link_stat & PLS_LINK_SPD) >> PLS_LINK_SPD_OFFS; + if (link_spd >= 3) + link_spd = 0; + dev_printk(KERN_INFO, mvi->dev, + "mvsas: PCI-E x%u, Bandwidth Usage: %s Gbps\n", + (link_stat & PLS_NEG_LINK_WD) >> PLS_NEG_LINK_WD_OFFS, + spd[link_spd]); +} + +static inline u32 mvs_hw_max_link_rate(void) +{ + return MAX_LINK_RATE; +} + +#endif /* _MV_CHIPS_H_ */ + diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h index d8e96a3e5a21..f8cb9defb961 100644 --- a/drivers/scsi/mvsas/mv_defs.h +++ b/drivers/scsi/mvsas/mv_defs.h @@ -1,53 +1,66 @@ /* - mv_defs.h - Marvell 88SE6440 SAS/SATA support - - Copyright 2007 Red Hat, Inc. - Copyright 2008 Marvell. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2, - or (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public - License along with this program; see the file COPYING. If not, - write to the Free Software Foundation, 675 Mass Ave, Cambridge, - MA 02139, USA. - - */ + * Marvell 88SE64xx/88SE94xx const head file + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA +*/ #ifndef _MV_DEFS_H_ #define _MV_DEFS_H_ + +enum chip_flavors { + chip_6320, + chip_6440, + chip_6485, + chip_9480, + chip_9180, +}; + /* driver compile-time configuration */ enum driver_configuration { + MVS_SLOTS = 512, /* command slots */ MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */ MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */ /* software requires power-of-2 ring size */ + MVS_SOC_SLOTS = 64, + MVS_SOC_TX_RING_SZ = MVS_SOC_SLOTS * 2, + MVS_SOC_RX_RING_SZ = MVS_SOC_SLOTS * 2, - MVS_SLOTS = 512, /* command slots */ MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */ MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */ MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */ MVS_OAF_SZ = 64, /* Open address frame buffer size */ - - MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */ - - MVS_QUEUE_SIZE = 30, /* Support Queue depth */ - MVS_CAN_QUEUE = MVS_SLOTS - 1, /* SCSI Queue depth */ + MVS_QUEUE_SIZE = 32, /* Support Queue depth */ + MVS_CAN_QUEUE = MVS_SLOTS - 2, /* SCSI Queue depth */ + MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2, }; /* unchangeable hardware details */ enum hardware_details { MVS_MAX_PHYS = 8, /* max. possible phys */ MVS_MAX_PORTS = 8, /* max. possible ports */ - MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100), + MVS_SOC_PHYS = 4, /* soc phys */ + MVS_SOC_PORTS = 4, /* soc phys */ + MVS_MAX_DEVICES = 1024, /* max supported device */ }; /* peripheral registers (BAR2) */ @@ -133,6 +146,8 @@ enum hw_register_bits { CINT_PORT = (1U << 8), /* port0 event */ CINT_PORT_MASK_OFFSET = 8, CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET), + CINT_PHY_MASK_OFFSET = 4, + CINT_PHY_MASK = (0x0F << CINT_PHY_MASK_OFFSET), /* TX (delivery) ring bits */ TXQ_CMD_SHIFT = 29, @@ -142,7 +157,11 @@ enum hw_register_bits { TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */ TXQ_CMD_SLOT_RESET = 7, /* reset command slot */ TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */ + TXQ_MODE_TARGET = 0, + TXQ_MODE_INITIATOR = 1, TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */ + TXQ_PRI_NORMAL = 0, + TXQ_PRI_HIGH = 1, TXQ_SRS_SHIFT = 20, /* SATA register set */ TXQ_SRS_MASK = 0x7f, TXQ_PHY_SHIFT = 12, /* PHY bitmap */ @@ -175,6 +194,8 @@ enum hw_register_bits { MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */ MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */ + MCH_SSP_MODE_PASSTHRU = 1, + MCH_SSP_MODE_NORMAL = 0, MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */ MCH_FBURST = (1U << 11), /* first burst (SSP) */ MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */ @@ -199,15 +220,12 @@ enum hw_register_bits { PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */ PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */ PHY_RST = (1U << 0), /* phy reset */ - PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8), - PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12), - PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16), - PHY_NEG_SPP_PHYS_LINK_RATE_MASK = - (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), PHY_READY_MASK = (1U << 20), /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */ PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */ + PHYEV_DCDR_ERR = (1U << 23), /* STP Deocder Error */ + PHYEV_CRC_ERR = (1U << 22), /* STP CRC Error */ PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */ PHYEV_AN = (1U << 18), /* SATA async notification */ PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */ @@ -229,9 +247,10 @@ enum hw_register_bits { /* MVS_PCS */ PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */ PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */ - PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */ + PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6485 */ PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */ PCS_RSP_RX_EN = (1U << 7), /* raw response rx */ + PCS_SATA_RETRY_2 = (1U << 6), /* For 9180 */ PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */ PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */ PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */ @@ -246,6 +265,8 @@ enum hw_register_bits { PORT_DEV_SMP_INIT = (1U << 10), PORT_DEV_STP_INIT = (1U << 9), PORT_PHY_ID_MASK = (0xFFU << 24), + PORT_SSP_TRGT_MASK = (0x1U << 19), + PORT_SSP_INIT_MASK = (0x1U << 11), PORT_DEV_TRGT_MASK = (0x7U << 17), PORT_DEV_INIT_MASK = (0x7U << 9), PORT_DEV_TYPE_MASK = (0x7U << 0), @@ -283,21 +304,30 @@ enum sas_sata_config_port_regs { PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */ PHYR_SATA_CTL = 0x18, /* SATA control */ PHYR_PHY_STAT = 0x1C, /* PHY status */ - PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */ - PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */ - PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */ - PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */ + PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */ + PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */ + PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */ + PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */ PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */ PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */ - PHYR_WIDE_PORT = 0x38, /* wide port participating */ + PHYR_WIDE_PORT = 0x38, /* wide port participating */ PHYR_CURRENT0 = 0x80, /* current connection info 0 */ PHYR_CURRENT1 = 0x84, /* current connection info 1 */ PHYR_CURRENT2 = 0x88, /* current connection info 2 */ -}; - -enum mvs_info_flags { - MVF_MSI = (1U << 0), /* MSI is enabled */ - MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */ + CONFIG_ID_FRAME0 = 0x100, /* Port device ID frame register 0 */ + CONFIG_ID_FRAME1 = 0x104, /* Port device ID frame register 1 */ + CONFIG_ID_FRAME2 = 0x108, /* Port device ID frame register 2 */ + CONFIG_ID_FRAME3 = 0x10c, /* Port device ID frame register 3 */ + CONFIG_ID_FRAME4 = 0x110, /* Port device ID frame register 4 */ + CONFIG_ID_FRAME5 = 0x114, /* Port device ID frame register 5 */ + CONFIG_ID_FRAME6 = 0x118, /* Port device ID frame register 6 */ + CONFIG_ATT_ID_FRAME0 = 0x11c, /* attached ID frame register 0 */ + CONFIG_ATT_ID_FRAME1 = 0x120, /* attached ID frame register 1 */ + CONFIG_ATT_ID_FRAME2 = 0x124, /* attached ID frame register 2 */ + CONFIG_ATT_ID_FRAME3 = 0x128, /* attached ID frame register 3 */ + CONFIG_ATT_ID_FRAME4 = 0x12c, /* attached ID frame register 4 */ + CONFIG_ATT_ID_FRAME5 = 0x130, /* attached ID frame register 5 */ + CONFIG_ATT_ID_FRAME6 = 0x134, /* attached ID frame register 6 */ }; enum sas_cmd_port_registers { @@ -305,11 +335,11 @@ enum sas_cmd_port_registers { CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */ CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */ CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */ - CMD_OOB_SPACE = 0x110, /* OOB space control register */ - CMD_OOB_BURST = 0x114, /* OOB burst control register */ + CMD_OOB_SPACE = 0x110, /* OOB space control register */ + CMD_OOB_BURST = 0x114, /* OOB burst control register */ CMD_PHY_TIMER = 0x118, /* PHY timer control register */ - CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */ - CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */ + CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */ + CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */ CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */ CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */ CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */ @@ -318,9 +348,9 @@ enum sas_cmd_port_registers { CMD_PL_TIMER = 0x138, /* PL timer register */ CMD_WD_TIMER = 0x13c, /* WD timer register */ CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */ - CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */ - CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */ - CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */ + CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */ + CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */ + CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */ CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */ CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */ CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */ @@ -353,27 +383,25 @@ enum sas_cmd_port_registers { CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */ }; -enum pci_cfg_register_bits { - PCTL_PWR_ON = (0xFU << 24), - PCTL_OFF = (0xFU << 12), - PRD_REQ_SIZE = (0x4000), - PRD_REQ_MASK = (0x00007000), +enum mvs_info_flags { + MVF_MSI = (1U << 0), /* MSI is enabled */ + MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */ + MVF_FLAG_SOC = (1U << 2), /* SoC integrated controllers */ }; -enum nvram_layout_offsets { - NVR_SIG = 0x00, /* 0xAA, 0x55 */ - NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */ +enum mvs_event_flags { + PHY_PLUG_EVENT = (3U), + PHY_PLUG_IN = (1U << 0), /* phy plug in */ + PHY_PLUG_OUT = (1U << 1), /* phy plug out */ }; -enum chip_flavors { - chip_6320, - chip_6440, - chip_6480, -}; - -enum port_type { - PORT_TYPE_SAS = (1L << 1), - PORT_TYPE_SATA = (1L << 0), +enum mvs_port_type { + PORT_TGT_MASK = (1U << 5), + PORT_INIT_PORT = (1U << 4), + PORT_TGT_PORT = (1U << 3), + PORT_INIT_TGT_PORT = (PORT_INIT_PORT | PORT_TGT_PORT), + PORT_TYPE_SAS = (1U << 1), + PORT_TYPE_SATA = (1U << 0), }; /* Command Table Format */ @@ -438,4 +466,37 @@ enum error_info_rec_2 { USR_BLK_NM = (1U << 0), /* User Block Number */ }; +enum pci_cfg_register_bits { + PCTL_PWR_OFF = (0xFU << 24), + PCTL_COM_ON = (0xFU << 20), + PCTL_LINK_RST = (0xFU << 16), + PCTL_LINK_OFFS = (16), + PCTL_PHY_DSBL = (0xFU << 12), + PCTL_PHY_DSBL_OFFS = (12), + PRD_REQ_SIZE = (0x4000), + PRD_REQ_MASK = (0x00007000), + PLS_NEG_LINK_WD = (0x3FU << 4), + PLS_NEG_LINK_WD_OFFS = 4, + PLS_LINK_SPD = (0x0FU << 0), + PLS_LINK_SPD_OFFS = 0, +}; + +enum open_frame_protocol { + PROTOCOL_SMP = 0x0, + PROTOCOL_SSP = 0x1, + PROTOCOL_STP = 0x2, +}; + +/* define for response frame datapres field */ +enum datapres_field { + NO_DATA = 0, + RESPONSE_DATA = 1, + SENSE_DATA = 2, +}; + +/* define task management IU */ +struct mvs_tmf_task{ + u8 tmf; + u16 tag_of_task_to_be_managed; +}; #endif diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 258a1a923290..8646a19f999d 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -1,38 +1,41 @@ /* - mv_init.c - Marvell 88SE6440 SAS/SATA init support + * Marvell 88SE64xx/88SE94xx pci init + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA +*/ - Copyright 2007 Red Hat, Inc. - Copyright 2008 Marvell. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2, - or (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public - License along with this program; see the file COPYING. If not, - write to the Free Software Foundation, 675 Mass Ave, Cambridge, - MA 02139, USA. - - */ #include "mv_sas.h" -#include "mv_64xx.h" -#include "mv_chips.h" static struct scsi_transport_template *mvs_stt; - static const struct mvs_chip_info mvs_chips[] = { - [chip_6320] = { 2, 16, 9 }, - [chip_6440] = { 4, 16, 9 }, - [chip_6480] = { 8, 32, 10 }, + [chip_6320] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, + [chip_6440] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, + [chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, }, + [chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, + [chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, }; +#define SOC_SAS_NUM 2 + static struct scsi_host_template mvs_sht = { .module = THIS_MODULE, .name = DRV_NAME, @@ -53,17 +56,29 @@ static struct scsi_host_template mvs_sht = { .use_clustering = ENABLE_CLUSTERING, .eh_device_reset_handler = sas_eh_device_reset_handler, .eh_bus_reset_handler = sas_eh_bus_reset_handler, - .slave_alloc = sas_slave_alloc, + .slave_alloc = mvs_slave_alloc, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, }; static struct sas_domain_function_template mvs_transport_ops = { - .lldd_execute_task = mvs_task_exec, + .lldd_dev_found = mvs_dev_found, + .lldd_dev_gone = mvs_dev_gone, + + .lldd_execute_task = mvs_queue_command, .lldd_control_phy = mvs_phy_control, - .lldd_abort_task = mvs_task_abort, - .lldd_port_formed = mvs_port_formed, + + .lldd_abort_task = mvs_abort_task, + .lldd_abort_task_set = mvs_abort_task_set, + .lldd_clear_aca = mvs_clear_aca, + .lldd_clear_task_set = mvs_clear_task_set, .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset, + .lldd_lu_reset = mvs_lu_reset, + .lldd_query_task = mvs_query_task, + + .lldd_port_formed = mvs_port_formed, + .lldd_port_deformed = mvs_port_deformed, + }; static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id) @@ -71,6 +86,8 @@ static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id) struct mvs_phy *phy = &mvi->phy[phy_id]; struct asd_sas_phy *sas_phy = &phy->sas_phy; + phy->mvi = mvi; + init_timer(&phy->timer); sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; sas_phy->class = SAS; sas_phy->iproto = SAS_PROTOCOL_ALL; @@ -83,248 +100,283 @@ static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id) sas_phy->id = phy_id; sas_phy->sas_addr = &mvi->sas_addr[0]; sas_phy->frame_rcvd = &phy->frame_rcvd[0]; - sas_phy->ha = &mvi->sas; + sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata; sas_phy->lldd_phy = phy; } static void mvs_free(struct mvs_info *mvi) { int i; + struct mvs_wq *mwq; + int slot_nr; if (!mvi) return; - for (i = 0; i < MVS_SLOTS; i++) { - struct mvs_slot_info *slot = &mvi->slot_info[i]; + if (mvi->flags & MVF_FLAG_SOC) + slot_nr = MVS_SOC_SLOTS; + else + slot_nr = MVS_SLOTS; + for (i = 0; i < mvi->tags_num; i++) { + struct mvs_slot_info *slot = &mvi->slot_info[i]; if (slot->buf) - dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ, + dma_free_coherent(mvi->dev, MVS_SLOT_BUF_SZ, slot->buf, slot->buf_dma); } if (mvi->tx) - dma_free_coherent(&mvi->pdev->dev, + dma_free_coherent(mvi->dev, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, mvi->tx, mvi->tx_dma); if (mvi->rx_fis) - dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ, + dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ, mvi->rx_fis, mvi->rx_fis_dma); if (mvi->rx) - dma_free_coherent(&mvi->pdev->dev, + dma_free_coherent(mvi->dev, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), mvi->rx, mvi->rx_dma); if (mvi->slot) - dma_free_coherent(&mvi->pdev->dev, - sizeof(*mvi->slot) * MVS_SLOTS, + dma_free_coherent(mvi->dev, + sizeof(*mvi->slot) * slot_nr, mvi->slot, mvi->slot_dma); -#ifdef MVS_ENABLE_PERI - if (mvi->peri_regs) - iounmap(mvi->peri_regs); +#ifndef DISABLE_HOTPLUG_DMA_FIX + if (mvi->bulk_buffer) + dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE, + mvi->bulk_buffer, mvi->bulk_buffer_dma); #endif - if (mvi->regs) - iounmap(mvi->regs); + + MVS_CHIP_DISP->chip_iounmap(mvi); if (mvi->shost) scsi_host_put(mvi->shost); - kfree(mvi->sas.sas_port); - kfree(mvi->sas.sas_phy); + list_for_each_entry(mwq, &mvi->wq_list, entry) + cancel_delayed_work(&mwq->work_q); kfree(mvi); } #ifdef MVS_USE_TASKLET -static void mvs_tasklet(unsigned long data) +struct tasklet_struct mv_tasklet; +static void mvs_tasklet(unsigned long opaque) { - struct mvs_info *mvi = (struct mvs_info *) data; unsigned long flags; + u32 stat; + u16 core_nr, i = 0; - spin_lock_irqsave(&mvi->lock, flags); + struct mvs_info *mvi; + struct sas_ha_struct *sha = (struct sas_ha_struct *)opaque; + + core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; + + if (unlikely(!mvi)) + BUG_ON(1); + + for (i = 0; i < core_nr; i++) { + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; + stat = MVS_CHIP_DISP->isr_status(mvi, mvi->irq); + if (stat) + MVS_CHIP_DISP->isr(mvi, mvi->irq, stat); + } -#ifdef MVS_DISABLE_MSI - mvs_int_full(mvi); -#else - mvs_int_rx(mvi, true); -#endif - spin_unlock_irqrestore(&mvi->lock, flags); } #endif static irqreturn_t mvs_interrupt(int irq, void *opaque) { - struct mvs_info *mvi = opaque; - void __iomem *regs = mvi->regs; + u32 core_nr, i = 0; u32 stat; + struct mvs_info *mvi; + struct sas_ha_struct *sha = opaque; - stat = mr32(GBL_INT_STAT); + core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; - if (stat == 0 || stat == 0xffffffff) + if (unlikely(!mvi)) return IRQ_NONE; - /* clear CMD_CMPLT ASAP */ - mw32_f(INT_STAT, CINT_DONE); - -#ifndef MVS_USE_TASKLET - spin_lock(&mvi->lock); - - mvs_int_full(mvi); + stat = MVS_CHIP_DISP->isr_status(mvi, irq); + if (!stat) + return IRQ_NONE; - spin_unlock(&mvi->lock); +#ifdef MVS_USE_TASKLET + tasklet_schedule(&mv_tasklet); #else - tasklet_schedule(&mvi->tasklet); + for (i = 0; i < core_nr; i++) { + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; + MVS_CHIP_DISP->isr(mvi, irq, stat); + } #endif return IRQ_HANDLED; } -static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev, - const struct pci_device_id *ent) +static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) { - struct mvs_info *mvi; - unsigned long res_start, res_len, res_flag; - struct asd_sas_phy **arr_phy; - struct asd_sas_port **arr_port; - const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data]; - int i; + int i, slot_nr; - /* - * alloc and init our per-HBA mvs_info struct - */ - - mvi = kzalloc(sizeof(*mvi), GFP_KERNEL); - if (!mvi) - return NULL; + if (mvi->flags & MVF_FLAG_SOC) + slot_nr = MVS_SOC_SLOTS; + else + slot_nr = MVS_SLOTS; spin_lock_init(&mvi->lock); -#ifdef MVS_USE_TASKLET - tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi); -#endif - mvi->pdev = pdev; - mvi->chip = chip; - - if (pdev->device == 0x6440 && pdev->revision == 0) - mvi->flags |= MVF_PHY_PWR_FIX; - - /* - * alloc and init SCSI, SAS glue - */ - - mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *)); - if (!mvi->shost) - goto err_out; - - arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL); - arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL); - if (!arr_phy || !arr_port) - goto err_out; - - for (i = 0; i < MVS_MAX_PHYS; i++) { + for (i = 0; i < mvi->chip->n_phy; i++) { mvs_phy_init(mvi, i); - arr_phy[i] = &mvi->phy[i].sas_phy; - arr_port[i] = &mvi->port[i].sas_port; - mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED; mvi->port[i].wide_port_phymap = 0; mvi->port[i].port_attached = 0; INIT_LIST_HEAD(&mvi->port[i].list); } - - SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas; - mvi->shost->transportt = mvs_stt; - mvi->shost->max_id = 21; - mvi->shost->max_lun = ~0; - mvi->shost->max_channel = 0; - mvi->shost->max_cmd_len = 16; - - mvi->sas.sas_ha_name = DRV_NAME; - mvi->sas.dev = &pdev->dev; - mvi->sas.lldd_module = THIS_MODULE; - mvi->sas.sas_addr = &mvi->sas_addr[0]; - mvi->sas.sas_phy = arr_phy; - mvi->sas.sas_port = arr_port; - mvi->sas.num_phys = chip->n_phy; - mvi->sas.lldd_max_execute_num = 1; - mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE; - mvi->shost->can_queue = MVS_CAN_QUEUE; - mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys; - mvi->sas.lldd_ha = mvi; - mvi->sas.core.shost = mvi->shost; - - mvs_tag_init(mvi); - - /* - * ioremap main and peripheral registers - */ - -#ifdef MVS_ENABLE_PERI - res_start = pci_resource_start(pdev, 2); - res_len = pci_resource_len(pdev, 2); - if (!res_start || !res_len) - goto err_out; - - mvi->peri_regs = ioremap_nocache(res_start, res_len); - if (!mvi->peri_regs) - goto err_out; -#endif - - res_start = pci_resource_start(pdev, 4); - res_len = pci_resource_len(pdev, 4); - if (!res_start || !res_len) - goto err_out; - - res_flag = pci_resource_flags(pdev, 4); - if (res_flag & IORESOURCE_CACHEABLE) - mvi->regs = ioremap(res_start, res_len); - else - mvi->regs = ioremap_nocache(res_start, res_len); - - if (!mvi->regs) - goto err_out; + for (i = 0; i < MVS_MAX_DEVICES; i++) { + mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED; + mvi->devices[i].dev_type = NO_DEVICE; + mvi->devices[i].device_id = i; + mvi->devices[i].dev_status = MVS_DEV_NORMAL; + } /* * alloc and init our DMA areas */ - - mvi->tx = dma_alloc_coherent(&pdev->dev, + mvi->tx = dma_alloc_coherent(mvi->dev, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, &mvi->tx_dma, GFP_KERNEL); if (!mvi->tx) goto err_out; memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ); - - mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ, + mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ, &mvi->rx_fis_dma, GFP_KERNEL); if (!mvi->rx_fis) goto err_out; memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ); - mvi->rx = dma_alloc_coherent(&pdev->dev, + mvi->rx = dma_alloc_coherent(mvi->dev, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), &mvi->rx_dma, GFP_KERNEL); if (!mvi->rx) goto err_out; memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1)); - mvi->rx[0] = cpu_to_le32(0xfff); mvi->rx_cons = 0xfff; - mvi->slot = dma_alloc_coherent(&pdev->dev, - sizeof(*mvi->slot) * MVS_SLOTS, + mvi->slot = dma_alloc_coherent(mvi->dev, + sizeof(*mvi->slot) * slot_nr, &mvi->slot_dma, GFP_KERNEL); if (!mvi->slot) goto err_out; - memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS); + memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr); - for (i = 0; i < MVS_SLOTS; i++) { +#ifndef DISABLE_HOTPLUG_DMA_FIX + mvi->bulk_buffer = dma_alloc_coherent(mvi->dev, + TRASH_BUCKET_SIZE, + &mvi->bulk_buffer_dma, GFP_KERNEL); + if (!mvi->bulk_buffer) + goto err_out; +#endif + for (i = 0; i < slot_nr; i++) { struct mvs_slot_info *slot = &mvi->slot_info[i]; - slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ, + slot->buf = dma_alloc_coherent(mvi->dev, MVS_SLOT_BUF_SZ, &slot->buf_dma, GFP_KERNEL); - if (!slot->buf) + if (!slot->buf) { + printk(KERN_DEBUG"failed to allocate slot->buf.\n"); goto err_out; + } memset(slot->buf, 0, MVS_SLOT_BUF_SZ); + ++mvi->tags_num; } + /* Initialize tags */ + mvs_tag_init(mvi); + return 0; +err_out: + return 1; +} + - /* finally, read NVRAM to get our SAS address */ - if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8)) +int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex) +{ + unsigned long res_start, res_len, res_flag, res_flag_ex = 0; + struct pci_dev *pdev = mvi->pdev; + if (bar_ex != -1) { + /* + * ioremap main and peripheral registers + */ + res_start = pci_resource_start(pdev, bar_ex); + res_len = pci_resource_len(pdev, bar_ex); + if (!res_start || !res_len) + goto err_out; + + res_flag_ex = pci_resource_flags(pdev, bar_ex); + if (res_flag_ex & IORESOURCE_MEM) { + if (res_flag_ex & IORESOURCE_CACHEABLE) + mvi->regs_ex = ioremap(res_start, res_len); + else + mvi->regs_ex = ioremap_nocache(res_start, + res_len); + } else + mvi->regs_ex = (void *)res_start; + if (!mvi->regs_ex) + goto err_out; + } + + res_start = pci_resource_start(pdev, bar); + res_len = pci_resource_len(pdev, bar); + if (!res_start || !res_len) + goto err_out; + + res_flag = pci_resource_flags(pdev, bar); + if (res_flag & IORESOURCE_CACHEABLE) + mvi->regs = ioremap(res_start, res_len); + else + mvi->regs = ioremap_nocache(res_start, res_len); + + if (!mvi->regs) { + if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM)) + iounmap(mvi->regs_ex); + mvi->regs_ex = NULL; goto err_out; - return mvi; + } + + return 0; +err_out: + return -1; +} + +void mvs_iounmap(void __iomem *regs) +{ + iounmap(regs); +} + +static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev, + const struct pci_device_id *ent, + struct Scsi_Host *shost, unsigned int id) +{ + struct mvs_info *mvi; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + + mvi = kzalloc(sizeof(*mvi) + MVS_SLOTS * sizeof(struct mvs_slot_info), + GFP_KERNEL); + if (!mvi) + return NULL; + mvi->pdev = pdev; + mvi->dev = &pdev->dev; + mvi->chip_id = ent->driver_data; + mvi->chip = &mvs_chips[mvi->chip_id]; + INIT_LIST_HEAD(&mvi->wq_list); + mvi->irq = pdev->irq; + + ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi; + ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy; + + mvi->id = id; + mvi->sas = sha; + mvi->shost = shost; +#ifdef MVS_USE_TASKLET + tasklet_init(&mv_tasklet, mvs_tasklet, (unsigned long)sha); +#endif + + if (MVS_CHIP_DISP->chip_ioremap(mvi)) + goto err_out; + if (!mvs_alloc(mvi, shost)) + return mvi; err_out: mvs_free(mvi); return NULL; @@ -363,16 +415,111 @@ static int pci_go_64(struct pci_dev *pdev) return rc; } +static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost, + const struct mvs_chip_info *chip_info) +{ + int phy_nr, port_nr; unsigned short core_nr; + struct asd_sas_phy **arr_phy; + struct asd_sas_port **arr_port; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + + core_nr = chip_info->n_host; + phy_nr = core_nr * chip_info->n_phy; + port_nr = phy_nr; + + memset(sha, 0x00, sizeof(struct sas_ha_struct)); + arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL); + arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL); + if (!arr_phy || !arr_port) + goto exit_free; + + sha->sas_phy = arr_phy; + sha->sas_port = arr_port; + + sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL); + if (!sha->lldd_ha) + goto exit_free; + + ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr; + + shost->transportt = mvs_stt; + shost->max_id = 128; + shost->max_lun = ~0; + shost->max_channel = 1; + shost->max_cmd_len = 16; + + return 0; +exit_free: + kfree(arr_phy); + kfree(arr_port); + return -1; + +} + +static void __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost, + const struct mvs_chip_info *chip_info) +{ + int can_queue, i = 0, j = 0; + struct mvs_info *mvi = NULL; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + unsigned short nr_core = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; + + for (j = 0; j < nr_core; j++) { + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j]; + for (i = 0; i < chip_info->n_phy; i++) { + sha->sas_phy[j * chip_info->n_phy + i] = + &mvi->phy[i].sas_phy; + sha->sas_port[j * chip_info->n_phy + i] = + &mvi->port[i].sas_port; + } + } + + sha->sas_ha_name = DRV_NAME; + sha->dev = mvi->dev; + sha->lldd_module = THIS_MODULE; + sha->sas_addr = &mvi->sas_addr[0]; + + sha->num_phys = nr_core * chip_info->n_phy; + + sha->lldd_max_execute_num = 1; + + if (mvi->flags & MVF_FLAG_SOC) + can_queue = MVS_SOC_CAN_QUEUE; + else + can_queue = MVS_CAN_QUEUE; + + sha->lldd_queue_size = can_queue; + shost->can_queue = can_queue; + mvi->shost->cmd_per_lun = MVS_SLOTS/sha->num_phys; + sha->core.shost = mvi->shost; +} + +static void mvs_init_sas_add(struct mvs_info *mvi) +{ + u8 i; + for (i = 0; i < mvi->chip->n_phy; i++) { + mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL; + mvi->phy[i].dev_sas_addr = + cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr)); + } + + memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE); +} + static int __devinit mvs_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent) { - int rc; + unsigned int rc, nhost = 0; struct mvs_info *mvi; irq_handler_t irq_handler = mvs_interrupt; + struct Scsi_Host *shost = NULL; + const struct mvs_chip_info *chip; + dev_printk(KERN_INFO, &pdev->dev, + "mvsas: driver version %s\n", DRV_VERSION); rc = pci_enable_device(pdev); if (rc) - return rc; + goto err_out_enable; pci_set_master(pdev); @@ -384,84 +531,110 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev, if (rc) goto err_out_regions; - mvi = mvs_alloc(pdev, ent); - if (!mvi) { + shost = scsi_host_alloc(&mvs_sht, sizeof(void *)); + if (!shost) { rc = -ENOMEM; goto err_out_regions; } - rc = mvs_hw_init(mvi); - if (rc) - goto err_out_mvi; - -#ifndef MVS_DISABLE_MSI - if (!pci_enable_msi(pdev)) { - u32 tmp; - void __iomem *regs = mvi->regs; - mvi->flags |= MVF_MSI; - irq_handler = mvs_msi_interrupt; - tmp = mr32(PCS); - mw32(PCS, tmp | PCS_SELF_CLEAR); + chip = &mvs_chips[ent->driver_data]; + SHOST_TO_SAS_HA(shost) = + kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL); + if (!SHOST_TO_SAS_HA(shost)) { + kfree(shost); + rc = -ENOMEM; + goto err_out_regions; } -#endif - rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi); - if (rc) - goto err_out_msi; + rc = mvs_prep_sas_ha_init(shost, chip); + if (rc) { + kfree(shost); + rc = -ENOMEM; + goto err_out_regions; + } - rc = scsi_add_host(mvi->shost, &pdev->dev); - if (rc) - goto err_out_irq; + pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost)); - rc = sas_register_ha(&mvi->sas); + do { + mvi = mvs_pci_alloc(pdev, ent, shost, nhost); + if (!mvi) { + rc = -ENOMEM; + goto err_out_regions; + } + + mvs_init_sas_add(mvi); + + mvi->instance = nhost; + rc = MVS_CHIP_DISP->chip_init(mvi); + if (rc) { + mvs_free(mvi); + goto err_out_regions; + } + nhost++; + } while (nhost < chip->n_host); + + mvs_post_sas_ha_init(shost, chip); + + rc = scsi_add_host(shost, &pdev->dev); if (rc) goto err_out_shost; - pci_set_drvdata(pdev, mvi); - - mvs_print_info(mvi); + rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); + if (rc) + goto err_out_shost; + rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, + DRV_NAME, SHOST_TO_SAS_HA(shost)); + if (rc) + goto err_not_sas; - mvs_hba_interrupt_enable(mvi); + MVS_CHIP_DISP->interrupt_enable(mvi); scsi_scan_host(mvi->shost); return 0; +err_not_sas: + sas_unregister_ha(SHOST_TO_SAS_HA(shost)); err_out_shost: scsi_remove_host(mvi->shost); -err_out_irq: - free_irq(pdev->irq, mvi); -err_out_msi: - if (mvi->flags |= MVF_MSI) - pci_disable_msi(pdev); -err_out_mvi: - mvs_free(mvi); err_out_regions: pci_release_regions(pdev); err_out_disable: pci_disable_device(pdev); +err_out_enable: return rc; } static void __devexit mvs_pci_remove(struct pci_dev *pdev) { - struct mvs_info *mvi = pci_get_drvdata(pdev); + unsigned short core_nr, i = 0; + struct sas_ha_struct *sha = pci_get_drvdata(pdev); + struct mvs_info *mvi = NULL; - pci_set_drvdata(pdev, NULL); + core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; - if (mvi) { - sas_unregister_ha(&mvi->sas); - mvs_hba_interrupt_disable(mvi); - sas_remove_host(mvi->shost); - scsi_remove_host(mvi->shost); +#ifdef MVS_USE_TASKLET + tasklet_kill(&mv_tasklet); +#endif - free_irq(pdev->irq, mvi); - if (mvi->flags & MVF_MSI) - pci_disable_msi(pdev); + pci_set_drvdata(pdev, NULL); + sas_unregister_ha(sha); + sas_remove_host(mvi->shost); + scsi_remove_host(mvi->shost); + + MVS_CHIP_DISP->interrupt_disable(mvi); + free_irq(mvi->irq, sha); + for (i = 0; i < core_nr; i++) { + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; mvs_free(mvi); - pci_release_regions(pdev); } + kfree(sha->sas_phy); + kfree(sha->sas_port); + kfree(sha); + pci_release_regions(pdev); pci_disable_device(pdev); + return; } static struct pci_device_id __devinitdata mvs_pci_table[] = { @@ -474,10 +647,12 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = { .subdevice = 0x6480, .class = 0, .class_mask = 0, - .driver_data = chip_6480, + .driver_data = chip_6485, }, { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 }, - { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 }, + { PCI_VDEVICE(MARVELL, 0x6485), chip_6485 }, + { PCI_VDEVICE(MARVELL, 0x9480), chip_9480 }, + { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 }, { } /* terminate list */ }; @@ -489,15 +664,17 @@ static struct pci_driver mvs_pci_driver = { .remove = __devexit_p(mvs_pci_remove), }; +/* task handler */ +struct task_struct *mvs_th; static int __init mvs_init(void) { int rc; - mvs_stt = sas_domain_attach_transport(&mvs_transport_ops); if (!mvs_stt) return -ENOMEM; rc = pci_register_driver(&mvs_pci_driver); + if (rc) goto err_out; @@ -521,4 +698,6 @@ MODULE_AUTHOR("Jeff Garzik "); MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver"); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); +#ifdef CONFIG_PCI MODULE_DEVICE_TABLE(pci, mvs_pci_table); +#endif diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 6a583c19c6e5..d79ac179eaff 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -1,97 +1,65 @@ /* - mv_sas.c - Marvell 88SE6440 SAS/SATA support - - Copyright 2007 Red Hat, Inc. - Copyright 2008 Marvell. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2, - or (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public - License along with this program; see the file COPYING. If not, - write to the Free Software Foundation, 675 Mass Ave, Cambridge, - MA 02139, USA. - - --------------------------------------------------------------- - - Random notes: - * hardware supports controlling the endian-ness of data - structures. this permits elimination of all the le32_to_cpu() - and cpu_to_le32() conversions. - - */ + * Marvell 88SE64xx/88SE94xx main function + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA +*/ #include "mv_sas.h" -#include "mv_64xx.h" -#include "mv_chips.h" - -/* offset for D2H FIS in the Received FIS List Structure */ -#define SATA_RECEIVED_D2H_FIS(reg_set) \ - ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40) -#define SATA_RECEIVED_PIO_FIS(reg_set) \ - ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20) -#define UNASSOC_D2H_FIS(id) \ - ((void *) mvi->rx_fis + 0x100 * id) - -struct mvs_task_exec_info { - struct sas_task *task; - struct mvs_cmd_hdr *hdr; - struct mvs_port *port; - u32 tag; - int n_elem; -}; - -static void mvs_release_task(struct mvs_info *mvi, int phy_no); -static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i); -static void mvs_update_phyinfo(struct mvs_info *mvi, int i, - int get_st); -static int mvs_int_rx(struct mvs_info *mvi, bool self_clear); -static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task, - u32 slot_idx); static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) { if (task->lldd_task) { struct mvs_slot_info *slot; slot = (struct mvs_slot_info *) task->lldd_task; - *tag = slot - mvi->slot_info; + *tag = slot->slot_tag; return 1; } return 0; } -static void mvs_tag_clear(struct mvs_info *mvi, u32 tag) +void mvs_tag_clear(struct mvs_info *mvi, u32 tag) { void *bitmap = (void *) &mvi->tags; clear_bit(tag, bitmap); } -static void mvs_tag_free(struct mvs_info *mvi, u32 tag) +void mvs_tag_free(struct mvs_info *mvi, u32 tag) { mvs_tag_clear(mvi, tag); } -static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) +void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) { void *bitmap = (void *) &mvi->tags; set_bit(tag, bitmap); } -static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) +inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) { unsigned int index, tag; void *bitmap = (void *) &mvi->tags; - index = find_first_zero_bit(bitmap, MVS_SLOTS); + index = find_first_zero_bit(bitmap, mvi->tags_num); tag = index; - if (tag >= MVS_SLOTS) + if (tag >= mvi->tags_num) return -SAS_QUEUE_FULL; mvs_tag_set(mvi, tag); *tag_out = tag; @@ -101,11 +69,11 @@ static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) void mvs_tag_init(struct mvs_info *mvi) { int i; - for (i = 0; i < MVS_SLOTS; ++i) + for (i = 0; i < mvi->tags_num; ++i) mvs_tag_clear(mvi, i); } -static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) +void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) { u32 i; u32 run; @@ -113,7 +81,7 @@ static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) offset = 0; while (size) { - printk("%08X : ", baseaddr + offset); + printk(KERN_DEBUG"%08X : ", baseaddr + offset); if (size >= 16) run = 16; else @@ -121,31 +89,31 @@ static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) size -= run; for (i = 0; i < 16; i++) { if (i < run) - printk("%02X ", (u32)data[i]); + printk(KERN_DEBUG"%02X ", (u32)data[i]); else - printk(" "); + printk(KERN_DEBUG" "); } - printk(": "); + printk(KERN_DEBUG": "); for (i = 0; i < run; i++) - printk("%c", isalnum(data[i]) ? data[i] : '.'); - printk("\n"); + printk(KERN_DEBUG"%c", + isalnum(data[i]) ? data[i] : '.'); + printk(KERN_DEBUG"\n"); data = &data[16]; offset += run; } - printk("\n"); + printk(KERN_DEBUG"\n"); } -#if _MV_DUMP +#if (_MV_DUMP > 1) static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag, enum sas_protocol proto) { u32 offset; - struct pci_dev *pdev = mvi->pdev; struct mvs_slot_info *slot = &mvi->slot_info[tag]; offset = slot->cmd_size + MVS_OAF_SZ + - sizeof(struct mvs_prd) * slot->n_elem; - dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n", + MVS_CHIP_DISP->prd_size() * slot->n_elem; + dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n", tag); mvs_hexdump(32, (u8 *) slot->response, (u32) slot->buf_dma + offset); @@ -155,47 +123,45 @@ static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag, static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag, enum sas_protocol proto) { -#if _MV_DUMP +#if (_MV_DUMP > 1) u32 sz, w_ptr; u64 addr; - void __iomem *regs = mvi->regs; - struct pci_dev *pdev = mvi->pdev; struct mvs_slot_info *slot = &mvi->slot_info[tag]; /*Delivery Queue */ - sz = mr32(TX_CFG) & TX_RING_SZ_MASK; + sz = MVS_CHIP_SLOT_SZ; w_ptr = slot->tx; - addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO); - dev_printk(KERN_DEBUG, &pdev->dev, + addr = mvi->tx_dma; + dev_printk(KERN_DEBUG, mvi->dev, "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr); - dev_printk(KERN_DEBUG, &pdev->dev, + dev_printk(KERN_DEBUG, mvi->dev, "Delivery Queue Base Address=0x%llX (PA)" "(tx_dma=0x%llX), Entry=%04d\n", - addr, mvi->tx_dma, w_ptr); + addr, (unsigned long long)mvi->tx_dma, w_ptr); mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]), (u32) mvi->tx_dma + sizeof(u32) * w_ptr); /*Command List */ addr = mvi->slot_dma; - dev_printk(KERN_DEBUG, &pdev->dev, + dev_printk(KERN_DEBUG, mvi->dev, "Command List Base Address=0x%llX (PA)" "(slot_dma=0x%llX), Header=%03d\n", - addr, slot->buf_dma, tag); - dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag); + addr, (unsigned long long)slot->buf_dma, tag); + dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag); /*mvs_cmd_hdr */ mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]), (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr)); /*1.command table area */ - dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n"); + dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n"); mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma); /*2.open address frame area */ - dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n"); + dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n"); mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size, (u32) slot->buf_dma + slot->cmd_size); /*3.status buffer */ mvs_hba_sb_dump(mvi, tag, proto); /*4.PRD table */ - dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n"); - mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem, + dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n"); + mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem, (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ, (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ); #endif @@ -206,15 +172,14 @@ static void mvs_hba_cq_dump(struct mvs_info *mvi) #if (_MV_DUMP > 2) u64 addr; void __iomem *regs = mvi->regs; - struct pci_dev *pdev = mvi->pdev; u32 entry = mvi->rx_cons + 1; u32 rx_desc = le32_to_cpu(mvi->rx[entry]); /*Completion Queue */ addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO); - dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n", + dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n", mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task); - dev_printk(KERN_DEBUG, &pdev->dev, + dev_printk(KERN_DEBUG, mvi->dev, "Completion List Base Address=0x%llX (PA), " "CQ_Entry=%04d, CQ_WP=0x%08X\n", addr, entry - 1, mvi->rx[0]); @@ -223,62 +188,174 @@ static void mvs_hba_cq_dump(struct mvs_info *mvi) #endif } -/* FIXME: locking? */ -int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, void *funcdata) +void mvs_get_sas_addr(void *buf, u32 buflen) { - struct mvs_info *mvi = sas_phy->ha->lldd_ha; - int rc = 0, phy_id = sas_phy->id; - u32 tmp; + /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/ +} - tmp = mvs_read_phy_ctl(mvi, phy_id); +struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) +{ + unsigned long i = 0, j = 0, hi = 0; + struct sas_ha_struct *sha = dev->port->ha; + struct mvs_info *mvi = NULL; + struct asd_sas_phy *phy; + + while (sha->sas_port[i]) { + if (sha->sas_port[i] == dev->port) { + phy = container_of(sha->sas_port[i]->phy_list.next, + struct asd_sas_phy, port_phy_el); + j = 0; + while (sha->sas_phy[j]) { + if (sha->sas_phy[j] == phy) + break; + j++; + } + break; + } + i++; + } + hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; - switch (func) { - case PHY_FUNC_SET_LINK_RATE:{ - struct sas_phy_linkrates *rates = funcdata; - u32 lrmin = 0, lrmax = 0; + return mvi; - lrmin = (rates->minimum_linkrate << 8); - lrmax = (rates->maximum_linkrate << 12); +} - if (lrmin) { - tmp &= ~(0xf << 8); - tmp |= lrmin; - } - if (lrmax) { - tmp &= ~(0xf << 12); - tmp |= lrmax; +/* FIXME */ +int mvs_find_dev_phyno(struct domain_device *dev, int *phyno) +{ + unsigned long i = 0, j = 0, n = 0, num = 0; + struct mvs_info *mvi = mvs_find_dev_mvi(dev); + struct sas_ha_struct *sha = dev->port->ha; + + while (sha->sas_port[i]) { + if (sha->sas_port[i] == dev->port) { + struct asd_sas_phy *phy; + list_for_each_entry(phy, + &sha->sas_port[i]->phy_list, port_phy_el) { + j = 0; + while (sha->sas_phy[j]) { + if (sha->sas_phy[j] == phy) + break; + j++; + } + phyno[n] = (j >= mvi->chip->n_phy) ? + (j - mvi->chip->n_phy) : j; + num++; + n++; } - mvs_write_phy_ctl(mvi, phy_id, tmp); break; } + i++; + } + return num; +} + +static inline void mvs_free_reg_set(struct mvs_info *mvi, + struct mvs_device *dev) +{ + if (!dev) { + mv_printk("device has been free.\n"); + return; + } + if (dev->runing_req != 0) + return; + if (dev->taskfileset == MVS_ID_NOT_MAPPED) + return; + MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset); +} + +static inline u8 mvs_assign_reg_set(struct mvs_info *mvi, + struct mvs_device *dev) +{ + if (dev->taskfileset != MVS_ID_NOT_MAPPED) + return 0; + return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset); +} + +void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard) +{ + u32 no; + for_each_phy(phy_mask, phy_mask, no) { + if (!(phy_mask & 1)) + continue; + MVS_CHIP_DISP->phy_reset(mvi, no, hard); + } +} + +/* FIXME: locking? */ +int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, + void *funcdata) +{ + int rc = 0, phy_id = sas_phy->id; + u32 tmp, i = 0, hi; + struct sas_ha_struct *sha = sas_phy->ha; + struct mvs_info *mvi = NULL; + + while (sha->sas_phy[i]) { + if (sha->sas_phy[i] == sas_phy) + break; + i++; + } + hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; + + switch (func) { + case PHY_FUNC_SET_LINK_RATE: + MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata); + break; case PHY_FUNC_HARD_RESET: + tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id); if (tmp & PHY_RST_HARD) break; - mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD); + MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1); break; case PHY_FUNC_LINK_RESET: - mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST); + MVS_CHIP_DISP->phy_enable(mvi, phy_id); + MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0); break; case PHY_FUNC_DISABLE: + MVS_CHIP_DISP->phy_disable(mvi, phy_id); + break; case PHY_FUNC_RELEASE_SPINUP_HOLD: default: rc = -EOPNOTSUPP; } - + msleep(200); return rc; } +void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id, + u32 off_lo, u32 off_hi, u64 sas_addr) +{ + u32 lo = (u32)sas_addr; + u32 hi = (u32)(sas_addr>>32); + + MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo); + MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo); + MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi); + MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi); +} + static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) { struct mvs_phy *phy = &mvi->phy[i]; - struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i]; - + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct sas_ha_struct *sas_ha; if (!phy->phy_attached) return; + if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK) + && phy->phy_type & PORT_TYPE_SAS) { + return; + } + + sas_ha = mvi->sas; + sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE); + if (sas_phy->phy) { struct sas_phy *sphy = sas_phy->phy; @@ -286,7 +363,7 @@ static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) sphy->minimum_linkrate = phy->minimum_linkrate; sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; sphy->maximum_linkrate = phy->maximum_linkrate; - sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS; + sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate(); } if (phy->phy_type & PORT_TYPE_SAS) { @@ -297,13 +374,31 @@ static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) id->initiator_bits = SAS_PROTOCOL_ALL; id->target_bits = phy->identify.target_port_protocols; } else if (phy->phy_type & PORT_TYPE_SATA) { - /* TODO */ + /*Nothing*/ } - mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size; - mvi->sas.notify_port_event(mvi->sas.sas_phy[i], + mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy); + + sas_phy->frame_rcvd_size = phy->frame_rcvd_size; + + mvi->sas->notify_port_event(sas_phy, PORTE_BYTES_DMAED); } +int mvs_slave_alloc(struct scsi_device *scsi_dev) +{ + struct domain_device *dev = sdev_to_domain_dev(scsi_dev); + if (dev_is_sata(dev)) { + /* We don't need to rescan targets + * if REPORT_LUNS request is failed + */ + if (scsi_dev->lun > 0) + return -ENXIO; + scsi_dev->tagged_supported = 1; + } + + return sas_slave_alloc(scsi_dev); +} + int mvs_slave_configure(struct scsi_device *sdev) { struct domain_device *dev = sdev_to_domain_dev(sdev); @@ -311,25 +406,31 @@ int mvs_slave_configure(struct scsi_device *sdev) if (ret) return ret; - if (dev_is_sata(dev)) { - /* struct ata_port *ap = dev->sata_dev.ap; */ - /* struct ata_device *adev = ap->link.device; */ - - /* clamp at no NCQ for the time being */ - /* adev->flags |= ATA_DFLAG_NCQ_OFF; */ + /* may set PIO mode */ + #if MV_DISABLE_NCQ + struct ata_port *ap = dev->sata_dev.ap; + struct ata_device *adev = ap->link.device; + adev->flags |= ATA_DFLAG_NCQ_OFF; scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1); + #endif } return 0; } void mvs_scan_start(struct Scsi_Host *shost) { - int i; - struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha; + int i, j; + unsigned short core_nr; + struct mvs_info *mvi; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + + core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; - for (i = 0; i < mvi->chip->n_phy; ++i) { - mvs_bytes_dmaed(mvi, i); + for (j = 0; j < core_nr; j++) { + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j]; + for (i = 0; i < mvi->chip->n_phy; ++i) + mvs_bytes_dmaed(mvi, i); } } @@ -350,15 +451,15 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, int elem, rc, i; struct sas_task *task = tei->task; struct mvs_cmd_hdr *hdr = tei->hdr; + struct domain_device *dev = task->dev; + struct asd_sas_port *sas_port = dev->port; struct scatterlist *sg_req, *sg_resp; u32 req_len, resp_len, tag = tei->tag; void *buf_tmp; u8 *buf_oaf; dma_addr_t buf_tmp_dma; - struct mvs_prd *buf_prd; - struct scatterlist *sg; + void *buf_prd; struct mvs_slot_info *slot = &mvi->slot_info[tag]; - struct asd_sas_port *sas_port = task->dev->port; u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); #if _MV_DUMP u8 *buf_cmd; @@ -368,18 +469,18 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, * DMA-map SMP request, response buffers */ sg_req = &task->smp_task.smp_req; - elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE); + elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE); if (!elem) return -ENOMEM; req_len = sg_dma_len(sg_req); sg_resp = &task->smp_task.smp_resp; - elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE); + elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE); if (!elem) { rc = -ENOMEM; goto err_out; } - resp_len = sg_dma_len(sg_resp); + resp_len = SB_RFB_MAX; /* must be in dwords */ if ((req_len & 0x3) || (resp_len & 0x3)) { @@ -391,7 +492,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs */ - /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ + /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */ buf_tmp = slot->buf; buf_tmp_dma = slot->buf_dma; @@ -412,20 +513,22 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, buf_tmp += MVS_OAF_SZ; buf_tmp_dma += MVS_OAF_SZ; - /* region 3: PRD table ********************************************* */ + /* region 3: PRD table *********************************** */ buf_prd = buf_tmp; if (tei->n_elem) hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); else hdr->prd_tbl = 0; - i = sizeof(struct mvs_prd) * tei->n_elem; + i = MVS_CHIP_DISP->prd_size() * tei->n_elem; buf_tmp += i; buf_tmp_dma += i; /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ slot->response = buf_tmp; hdr->status_buf = cpu_to_le64(buf_tmp_dma); + if (mvi->flags & MVF_FLAG_SOC) + hdr->reserved[0] = 0; /* * Fill in TX ring and command slot header @@ -441,17 +544,14 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, hdr->data_len = 0; /* generate open address frame hdr (first 12 bytes) */ - buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */ - buf_oaf[1] = task->dev->linkrate & 0xf; + /* initiator, SMP, ftype 1h */ + buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01; + buf_oaf[1] = dev->linkrate & 0xf; *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */ - memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); + memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); /* fill in PRD (scatter/gather) table, if any */ - for_each_sg(task->scatter, sg, tei->n_elem, i) { - buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); - buf_prd->len = cpu_to_le32(sg_dma_len(sg)); - buf_prd++; - } + MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); #if _MV_DUMP /* copy cmd table */ @@ -462,10 +562,10 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, return 0; err_out_2: - pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1, + dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1, PCI_DMA_FROMDEVICE); err_out: - pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1, + dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1, PCI_DMA_TODEVICE); return rc; } @@ -490,30 +590,41 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, { struct sas_task *task = tei->task; struct domain_device *dev = task->dev; + struct mvs_device *mvi_dev = + (struct mvs_device *)dev->lldd_dev; struct mvs_cmd_hdr *hdr = tei->hdr; struct asd_sas_port *sas_port = dev->port; struct mvs_slot_info *slot; - struct scatterlist *sg; - struct mvs_prd *buf_prd; - struct mvs_port *port = tei->port; - u32 tag = tei->tag; - u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); + void *buf_prd; + u32 tag = tei->tag, hdr_tag; + u32 flags, del_q; void *buf_tmp; u8 *buf_cmd, *buf_oaf; dma_addr_t buf_tmp_dma; u32 i, req_len, resp_len; const u32 max_resp_len = SB_RFB_MAX; - if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED) + if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) { + mv_dprintk("Have not enough regiset for dev %d.\n", + mvi_dev->device_id); return -EBUSY; - + } slot = &mvi->slot_info[tag]; slot->tx = mvi->tx_prod; - mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | - (TXQ_CMD_STP << TXQ_CMD_SHIFT) | - (sas_port->phy_mask << TXQ_PHY_SHIFT) | - (port->taskfileset << TXQ_SRS_SHIFT)); - + del_q = TXQ_MODE_I | tag | + (TXQ_CMD_STP << TXQ_CMD_SHIFT) | + (sas_port->phy_mask << TXQ_PHY_SHIFT) | + (mvi_dev->taskfileset << TXQ_SRS_SHIFT); + mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q); + +#ifndef DISABLE_HOTPLUG_DMA_FIX + if (task->data_dir == DMA_FROM_DEVICE) + flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT); + else + flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); +#else + flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); +#endif if (task->ata_task.use_ncq) flags |= MCH_FPDMA; if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) { @@ -526,10 +637,13 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, hdr->flags = cpu_to_le32(flags); /* FIXME: the low order order 5 bits for the TAG if enable NCQ */ - if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr->tags)) - task->ata_task.fis.sector_count |= hdr->tags << 3; + if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag)) + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); else - hdr->tags = cpu_to_le32(tag); + hdr_tag = tag; + + hdr->tags = cpu_to_le32(hdr_tag); + hdr->data_len = cpu_to_le32(task->total_xfer_len); /* @@ -558,12 +672,13 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, /* region 3: PRD table ********************************************* */ buf_prd = buf_tmp; + if (tei->n_elem) hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); else hdr->prd_tbl = 0; + i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count(); - i = sizeof(struct mvs_prd) * tei->n_elem; buf_tmp += i; buf_tmp_dma += i; @@ -573,6 +688,8 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, */ slot->response = buf_tmp; hdr->status_buf = cpu_to_le64(buf_tmp_dma); + if (mvi->flags & MVF_FLAG_SOC) + hdr->reserved[0] = 0; req_len = sizeof(struct host_to_dev_fis); resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ - @@ -582,7 +699,8 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, resp_len = min(resp_len, max_resp_len); hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); - task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ + if (likely(!task->ata_task.device_control_reg_update)) + task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ /* fill in command FIS and ATAPI CDB */ memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) @@ -590,30 +708,35 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, task->ata_task.atapi_packet, 16); /* generate open address frame hdr (first 12 bytes) */ - buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */ - buf_oaf[1] = task->dev->linkrate & 0xf; - *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag); - memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); + /* initiator, STP, ftype 1h */ + buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1; + buf_oaf[1] = dev->linkrate & 0xf; + *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); + memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); /* fill in PRD (scatter/gather) table, if any */ - for_each_sg(task->scatter, sg, tei->n_elem, i) { - buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); - buf_prd->len = cpu_to_le32(sg_dma_len(sg)); - buf_prd++; - } - + MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); +#ifndef DISABLE_HOTPLUG_DMA_FIX + if (task->data_dir == DMA_FROM_DEVICE) + MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma, + TRASH_BUCKET_SIZE, tei->n_elem, buf_prd); +#endif return 0; } static int mvs_task_prep_ssp(struct mvs_info *mvi, - struct mvs_task_exec_info *tei) + struct mvs_task_exec_info *tei, int is_tmf, + struct mvs_tmf_task *tmf) { struct sas_task *task = tei->task; struct mvs_cmd_hdr *hdr = tei->hdr; struct mvs_port *port = tei->port; + struct domain_device *dev = task->dev; + struct mvs_device *mvi_dev = + (struct mvs_device *)dev->lldd_dev; + struct asd_sas_port *sas_port = dev->port; struct mvs_slot_info *slot; - struct scatterlist *sg; - struct mvs_prd *buf_prd; + void *buf_prd; struct ssp_frame_hdr *ssp_hdr; void *buf_tmp; u8 *buf_cmd, *buf_oaf, fburst = 0; @@ -621,12 +744,13 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, u32 flags; u32 resp_len, req_len, i, tag = tei->tag; const u32 max_resp_len = SB_RFB_MAX; - u8 phy_mask; + u32 phy_mask; slot = &mvi->slot_info[tag]; - phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap : - task->dev->port->phy_mask; + phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap : + sas_port->phy_mask) & TXQ_PHY_MASK; + slot->tx = mvi->tx_prod; mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | (TXQ_CMD_SSP << TXQ_CMD_SHIFT) | @@ -640,7 +764,6 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT) | (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT)); - hdr->tags = cpu_to_le32(tag); hdr->data_len = cpu_to_le32(task->total_xfer_len); @@ -674,13 +797,15 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, else hdr->prd_tbl = 0; - i = sizeof(struct mvs_prd) * tei->n_elem; + i = MVS_CHIP_DISP->prd_size() * tei->n_elem; buf_tmp += i; buf_tmp_dma += i; /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ slot->response = buf_tmp; hdr->status_buf = cpu_to_le64(buf_tmp_dma); + if (mvi->flags & MVF_FLAG_SOC) + hdr->reserved[0] = 0; resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ - sizeof(struct mvs_err_info) - i; @@ -692,57 +817,105 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); /* generate open address frame hdr (first 12 bytes) */ - buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */ - buf_oaf[1] = task->dev->linkrate & 0xf; - *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag); - memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); + /* initiator, SSP, ftype 1h */ + buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1; + buf_oaf[1] = dev->linkrate & 0xf; + *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); + memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); /* fill in SSP frame header (Command Table.SSP frame header) */ ssp_hdr = (struct ssp_frame_hdr *)buf_cmd; - ssp_hdr->frame_type = SSP_COMMAND; - memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr, + + if (is_tmf) + ssp_hdr->frame_type = SSP_TASK; + else + ssp_hdr->frame_type = SSP_COMMAND; + + memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); memcpy(ssp_hdr->hashed_src_addr, - task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); + dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); ssp_hdr->tag = cpu_to_be16(tag); - /* fill in command frame IU */ + /* fill in IU for TASK and Command Frame */ buf_cmd += sizeof(*ssp_hdr); memcpy(buf_cmd, &task->ssp_task.LUN, 8); - buf_cmd[9] = fburst | task->ssp_task.task_attr | - (task->ssp_task.task_prio << 3); - memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16); - /* fill in PRD (scatter/gather) table, if any */ - for_each_sg(task->scatter, sg, tei->n_elem, i) { - buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); - buf_prd->len = cpu_to_le32(sg_dma_len(sg)); - buf_prd++; + if (ssp_hdr->frame_type != SSP_TASK) { + buf_cmd[9] = fburst | task->ssp_task.task_attr | + (task->ssp_task.task_prio << 3); + memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16); + } else{ + buf_cmd[10] = tmf->tmf; + switch (tmf->tmf) { + case TMF_ABORT_TASK: + case TMF_QUERY_TASK: + buf_cmd[12] = + (tmf->tag_of_task_to_be_managed >> 8) & 0xff; + buf_cmd[13] = + tmf->tag_of_task_to_be_managed & 0xff; + break; + default: + break; + } } - + /* fill in PRD (scatter/gather) table, if any */ + MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); return 0; } -int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) +#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE))) +static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, + struct completion *completion, int lock, + int is_tmf, struct mvs_tmf_task *tmf) { struct domain_device *dev = task->dev; - struct mvs_info *mvi = dev->port->ha->lldd_ha; - struct pci_dev *pdev = mvi->pdev; - void __iomem *regs = mvi->regs; + struct mvs_info *mvi; + struct mvs_device *mvi_dev; struct mvs_task_exec_info tei; struct sas_task *t = task; struct mvs_slot_info *slot; u32 tag = 0xdeadbeef, rc, n_elem = 0; - unsigned long flags; u32 n = num, pass = 0; + unsigned long flags = 0; - spin_lock_irqsave(&mvi->lock, flags); + if (!dev->port) { + struct task_status_struct *tsm = &t->task_status; + + tsm->resp = SAS_TASK_UNDELIVERED; + tsm->stat = SAS_PHY_DOWN; + t->task_done(t); + return 0; + } + + mvi = mvs_find_dev_mvi(task->dev); + + if (lock) + spin_lock_irqsave(&mvi->lock, flags); do { dev = t->dev; - tei.port = &mvi->port[dev->port->id]; + mvi_dev = (struct mvs_device *)dev->lldd_dev; + if (DEV_IS_GONE(mvi_dev)) { + if (mvi_dev) + mv_dprintk("device %d not ready.\n", + mvi_dev->device_id); + else + mv_dprintk("device %016llx not ready.\n", + SAS_ADDR(dev->sas_addr)); + + rc = SAS_PHY_DOWN; + goto out_done; + } + + if (dev->port->id >= mvi->chip->n_phy) + tei.port = &mvi->port[dev->port->id - mvi->chip->n_phy]; + else + tei.port = &mvi->port[dev->port->id]; if (!tei.port->port_attached) { if (sas_protocol_ata(t->task_proto)) { + mv_dprintk("port %d does not" + "attached device.\n", dev->port->id); rc = SAS_PHY_DOWN; goto out_done; } else { @@ -759,7 +932,8 @@ int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) if (!sas_protocol_ata(t->task_proto)) { if (t->num_scatter) { - n_elem = pci_map_sg(mvi->pdev, t->scatter, + n_elem = dma_map_sg(mvi->dev, + t->scatter, t->num_scatter, t->data_dir); if (!n_elem) { @@ -776,20 +950,23 @@ int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) goto err_out; slot = &mvi->slot_info[tag]; + + t->lldd_task = NULL; slot->n_elem = n_elem; + slot->slot_tag = tag; memset(slot->buf, 0, MVS_SLOT_BUF_SZ); + tei.task = t; tei.hdr = &mvi->slot[tag]; tei.tag = tag; tei.n_elem = n_elem; - switch (t->task_proto) { case SAS_PROTOCOL_SMP: rc = mvs_task_prep_smp(mvi, &tei); break; case SAS_PROTOCOL_SSP: - rc = mvs_task_prep_ssp(mvi, &tei); + rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf); break; case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: @@ -797,52 +974,61 @@ int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) rc = mvs_task_prep_ata(mvi, &tei); break; default: - dev_printk(KERN_ERR, &pdev->dev, + dev_printk(KERN_ERR, mvi->dev, "unknown sas_task proto: 0x%x\n", t->task_proto); rc = -EINVAL; break; } - if (rc) + if (rc) { + mv_dprintk("rc is %x\n", rc); goto err_out_tag; - + } slot->task = t; slot->port = tei.port; t->lldd_task = (void *) slot; - list_add_tail(&slot->list, &slot->port->list); + list_add_tail(&slot->entry, &tei.port->list); /* TODO: select normal or high priority */ - spin_lock(&t->task_state_lock); t->task_state_flags |= SAS_TASK_AT_INITIATOR; spin_unlock(&t->task_state_lock); mvs_hba_memory_dump(mvi, tag, t->task_proto); - + mvi_dev->runing_req++; ++pass; mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); if (n > 1) t = list_entry(t->list.next, struct sas_task, list); } while (--n); - rc = 0; goto out_done; err_out_tag: mvs_tag_free(mvi, tag); err_out: - dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc); + + dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc); if (!sas_protocol_ata(t->task_proto)) if (n_elem) - pci_unmap_sg(mvi->pdev, t->scatter, n_elem, + dma_unmap_sg(mvi->dev, t->scatter, n_elem, t->data_dir); out_done: - if (pass) - mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); - spin_unlock_irqrestore(&mvi->lock, flags); + if (likely(pass)) { + MVS_CHIP_DISP->start_delivery(mvi, + (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); + } + if (lock) + spin_unlock_irqrestore(&mvi->lock, flags); return rc; } +int mvs_queue_command(struct sas_task *task, const int num, + gfp_t gfp_flags) +{ + return mvs_task_exec(task, num, gfp_flags, NULL, 1, 0, NULL); +} + static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) { u32 slot_idx = rx_desc & RXQ_SLOT_MASK; @@ -852,16 +1038,18 @@ static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, struct mvs_slot_info *slot, u32 slot_idx) { + if (!slot->task) + return; if (!sas_protocol_ata(task->task_proto)) if (slot->n_elem) - pci_unmap_sg(mvi->pdev, task->scatter, + dma_unmap_sg(mvi->dev, task->scatter, slot->n_elem, task->data_dir); switch (task->task_proto) { case SAS_PROTOCOL_SMP: - pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1, + dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1, PCI_DMA_FROMDEVICE); - pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1, + dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1, PCI_DMA_TODEVICE); break; @@ -872,10 +1060,12 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, /* do nothing */ break; } - list_del(&slot->list); + list_del_init(&slot->entry); task->lldd_task = NULL; slot->task = NULL; slot->port = NULL; + slot->slot_tag = 0xFFFFFFFF; + mvs_slot_free(mvi, slot_idx); } static void mvs_update_wideport(struct mvs_info *mvi, int i) @@ -884,25 +1074,28 @@ static void mvs_update_wideport(struct mvs_info *mvi, int i) struct mvs_port *port = phy->port; int j, no; - for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy) - if (no & 1) { - mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); - mvs_write_port_cfg_data(mvi, no, + for_each_phy(port->wide_port_phymap, j, no) { + if (j & 1) { + MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, + PHYR_WIDE_PORT); + MVS_CHIP_DISP->write_port_cfg_data(mvi, no, port->wide_port_phymap); } else { - mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); - mvs_write_port_cfg_data(mvi, no, 0); + MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, + PHYR_WIDE_PORT); + MVS_CHIP_DISP->write_port_cfg_data(mvi, no, + 0); } + } } static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) { u32 tmp; struct mvs_phy *phy = &mvi->phy[i]; - struct mvs_port *port = phy->port;; - - tmp = mvs_read_phy_ctl(mvi, i); + struct mvs_port *port = phy->port; + tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i); if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { if (!port) phy->phy_attached = 1; @@ -917,7 +1110,6 @@ static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) mvs_update_wideport(mvi, i); } else if (phy->phy_type & PORT_TYPE_SATA) port->port_attached = 0; - mvs_free_reg_set(mvi, phy->port); phy->port = NULL; phy->phy_attached = 0; phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); @@ -932,17 +1124,21 @@ static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf) if (!s) return NULL; - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); - s[3] = mvs_read_port_cfg_data(mvi, i); + MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); + s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); - s[2] = mvs_read_port_cfg_data(mvi, i); + MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); + s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); - s[1] = mvs_read_port_cfg_data(mvi, i); + MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); + s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); - s[0] = mvs_read_port_cfg_data(mvi, i); + MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); + s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); + + /* Workaround: take some ATAPI devices for ATA */ + if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01)) + s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10); return (void *)s; } @@ -952,56 +1148,53 @@ static u32 mvs_is_sig_fis_received(u32 irq_status) return irq_status & PHYEV_SIG_FIS; } -static void mvs_update_phyinfo(struct mvs_info *mvi, int i, - int get_st) +void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) { struct mvs_phy *phy = &mvi->phy[i]; - struct pci_dev *pdev = mvi->pdev; - u32 tmp; - u64 tmp64; - - mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY); - phy->dev_info = mvs_read_port_cfg_data(mvi, i); - - mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI); - phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32; + struct sas_identify_frame *id; - mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO); - phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); + id = (struct sas_identify_frame *)phy->frame_rcvd; if (get_st) { - phy->irq_status = mvs_read_port_irq_stat(mvi, i); + phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i); phy->phy_status = mvs_is_phy_ready(mvi, i); } if (phy->phy_status) { - u32 phy_st; - struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i]; - - mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); - phy_st = mvs_read_port_cfg_data(mvi, i); - - sas_phy->linkrate = - (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> - PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; - phy->minimum_linkrate = - (phy->phy_status & - PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8; - phy->maximum_linkrate = - (phy->phy_status & - PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12; + int oob_done = 0; + struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy; - if (phy->phy_type & PORT_TYPE_SAS) { - /* Updated attached_sas_addr */ - mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI); - phy->att_dev_sas_addr = - (u64) mvs_read_port_cfg_data(mvi, i) << 32; - mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO); - phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); - mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO); - phy->att_dev_info = mvs_read_port_cfg_data(mvi, i); + oob_done = MVS_CHIP_DISP->oob_done(mvi, i); + + MVS_CHIP_DISP->fix_phy_info(mvi, i, id); + if (phy->phy_type & PORT_TYPE_SATA) { + phy->identify.target_port_protocols = SAS_PROTOCOL_STP; + if (mvs_is_sig_fis_received(phy->irq_status)) { + phy->phy_attached = 1; + phy->att_dev_sas_addr = + i + mvi->id * mvi->chip->n_phy; + if (oob_done) + sas_phy->oob_mode = SATA_OOB_MODE; + phy->frame_rcvd_size = + sizeof(struct dev_to_host_fis); + mvs_get_d2h_reg(mvi, i, (void *)id); + } else { + u32 tmp; + dev_printk(KERN_DEBUG, mvi->dev, + "Phy%d : No sig fis\n", i); + tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i); + MVS_CHIP_DISP->write_port_irq_mask(mvi, i, + tmp | PHYEV_SIG_FIS); + phy->phy_attached = 0; + phy->phy_type &= ~PORT_TYPE_SATA; + MVS_CHIP_DISP->phy_reset(mvi, i, 0); + goto out_done; + } + } else if (phy->phy_type & PORT_TYPE_SAS + || phy->att_dev_info & PORT_SSP_INIT_MASK) { + phy->phy_attached = 1; phy->identify.device_type = - phy->att_dev_info & PORT_DEV_TYPE_MASK; + phy->att_dev_info & PORT_DEV_TYPE_MASK; if (phy->identify.device_type == SAS_END_DEV) phy->identify.target_port_protocols = @@ -1009,810 +1202,956 @@ static void mvs_update_phyinfo(struct mvs_info *mvi, int i, else if (phy->identify.device_type != NO_DEVICE) phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; - if (phy_st & PHY_OOB_DTCTD) + if (oob_done) sas_phy->oob_mode = SAS_OOB_MODE; phy->frame_rcvd_size = sizeof(struct sas_identify_frame); - } else if (phy->phy_type & PORT_TYPE_SATA) { - phy->identify.target_port_protocols = SAS_PROTOCOL_STP; - if (mvs_is_sig_fis_received(phy->irq_status)) { - phy->att_dev_sas_addr = i; /* temp */ - if (phy_st & PHY_OOB_DTCTD) - sas_phy->oob_mode = SATA_OOB_MODE; - phy->frame_rcvd_size = - sizeof(struct dev_to_host_fis); - mvs_get_d2h_reg(mvi, i, - (void *)sas_phy->frame_rcvd); - } else { - dev_printk(KERN_DEBUG, &pdev->dev, - "No sig fis\n"); - phy->phy_type &= ~(PORT_TYPE_SATA); - goto out_done; - } } - tmp64 = cpu_to_be64(phy->att_dev_sas_addr); - memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE); - - dev_printk(KERN_DEBUG, &pdev->dev, - "phy[%d] Get Attached Address 0x%llX ," - " SAS Address 0x%llX\n", - i, - (unsigned long long)phy->att_dev_sas_addr, - (unsigned long long)phy->dev_sas_addr); - dev_printk(KERN_DEBUG, &pdev->dev, - "Rate = %x , type = %d\n", - sas_phy->linkrate, phy->phy_type); - - /* workaround for HW phy decoding error on 1.5g disk drive */ - mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6); - tmp = mvs_read_port_vsr_data(mvi, i); - if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> - PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) == - SAS_LINK_RATE_1_5_GBPS) - tmp &= ~PHY_MODE6_LATECLK; - else - tmp |= PHY_MODE6_LATECLK; - mvs_write_port_vsr_data(mvi, i, tmp); + memcpy(sas_phy->attached_sas_addr, + &phy->att_dev_sas_addr, SAS_ADDR_SIZE); + if (MVS_CHIP_DISP->phy_work_around) + MVS_CHIP_DISP->phy_work_around(mvi, i); } + mv_dprintk("port %d attach dev info is %x\n", + i + mvi->id * mvi->chip->n_phy, phy->att_dev_info); + mv_dprintk("port %d attach sas addr is %llx\n", + i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr); out_done: if (get_st) - mvs_write_port_irq_stat(mvi, i, phy->irq_status); + MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status); } -void mvs_port_formed(struct asd_sas_phy *sas_phy) +static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock) { struct sas_ha_struct *sas_ha = sas_phy->ha; - struct mvs_info *mvi = sas_ha->lldd_ha; - struct asd_sas_port *sas_port = sas_phy->port; + struct mvs_info *mvi = NULL; int i = 0, hi; struct mvs_phy *phy = sas_phy->lldd_phy; - struct mvs_port *port = &mvi->port[sas_port->id]; - unsigned long flags; + struct asd_sas_port *sas_port = sas_phy->port; + struct mvs_port *port; + unsigned long flags = 0; + if (!sas_port) + return; - spin_lock_irqsave(&mvi->lock, flags); + while (sas_ha->sas_phy[i]) { + if (sas_ha->sas_phy[i] == sas_phy) + break; + i++; + } + hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy; + mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi]; + if (sas_port->id >= mvi->chip->n_phy) + port = &mvi->port[sas_port->id - mvi->chip->n_phy]; + else + port = &mvi->port[sas_port->id]; + if (lock) + spin_lock_irqsave(&mvi->lock, flags); port->port_attached = 1; phy->port = port; - port->taskfileset = MVS_ID_NOT_MAPPED; if (phy->phy_type & PORT_TYPE_SAS) { port->wide_port_phymap = sas_port->phy_mask; + mv_printk("set wide port phy map %x\n", sas_port->phy_mask); mvs_update_wideport(mvi, sas_phy->id); } - spin_unlock_irqrestore(&mvi->lock, flags); + if (lock) + spin_unlock_irqrestore(&mvi->lock, flags); } -int mvs_I_T_nexus_reset(struct domain_device *dev) +static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock) { - return TMF_RESP_FUNC_FAILED; + /*Nothing*/ } -static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, - u32 slot_idx, int err) -{ - struct mvs_port *port = mvi->slot_info[slot_idx].port; - struct task_status_struct *tstat = &task->task_status; - struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; - int stat = SAM_GOOD; - resp->frame_len = sizeof(struct dev_to_host_fis); - memcpy(&resp->ending_fis[0], - SATA_RECEIVED_D2H_FIS(port->taskfileset), - sizeof(struct dev_to_host_fis)); - tstat->buf_valid_size = sizeof(*resp); - if (unlikely(err)) - stat = SAS_PROTO_RESPONSE; - return stat; +void mvs_port_formed(struct asd_sas_phy *sas_phy) +{ + mvs_port_notify_formed(sas_phy, 1); } -static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, - u32 slot_idx) +void mvs_port_deformed(struct asd_sas_phy *sas_phy) { - struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; - u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response)); - u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4)); - int stat = SAM_CHECK_COND; + mvs_port_notify_deformed(sas_phy, 1); +} - if (err_dw1 & SLOT_BSY_ERR) { - stat = SAS_QUEUE_FULL; - mvs_slot_reset(mvi, task, slot_idx); - } - switch (task->task_proto) { - case SAS_PROTOCOL_SSP: - break; - case SAS_PROTOCOL_SMP: - break; - case SAS_PROTOCOL_SATA: - case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: - if (err_dw0 & TFILE_ERR) - stat = mvs_sata_done(mvi, task, slot_idx, 1); - break; - default: - break; +struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi) +{ + u32 dev; + for (dev = 0; dev < MVS_MAX_DEVICES; dev++) { + if (mvi->devices[dev].dev_type == NO_DEVICE) { + mvi->devices[dev].device_id = dev; + return &mvi->devices[dev]; + } } - mvs_hexdump(16, (u8 *) slot->response, 0); - return stat; + if (dev == MVS_MAX_DEVICES) + mv_printk("max support %d devices, ignore ..\n", + MVS_MAX_DEVICES); + + return NULL; } -static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) +void mvs_free_dev(struct mvs_device *mvi_dev) { - u32 slot_idx = rx_desc & RXQ_SLOT_MASK; - struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; - struct sas_task *task = slot->task; - struct task_status_struct *tstat; - struct mvs_port *port; - bool aborted; - void *to; + u32 id = mvi_dev->device_id; + memset(mvi_dev, 0, sizeof(*mvi_dev)); + mvi_dev->device_id = id; + mvi_dev->dev_type = NO_DEVICE; + mvi_dev->dev_status = MVS_DEV_NORMAL; + mvi_dev->taskfileset = MVS_ID_NOT_MAPPED; +} - if (unlikely(!task || !task->lldd_task)) - return -1; +int mvs_dev_found_notify(struct domain_device *dev, int lock) +{ + unsigned long flags = 0; + int res = 0; + struct mvs_info *mvi = NULL; + struct domain_device *parent_dev = dev->parent; + struct mvs_device *mvi_device; - mvs_hba_cq_dump(mvi); + mvi = mvs_find_dev_mvi(dev); - spin_lock(&task->task_state_lock); - aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; - if (!aborted) { - task->task_state_flags &= - ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); - task->task_state_flags |= SAS_TASK_STATE_DONE; + if (lock) + spin_lock_irqsave(&mvi->lock, flags); + + mvi_device = mvs_alloc_dev(mvi); + if (!mvi_device) { + res = -1; + goto found_out; } - spin_unlock(&task->task_state_lock); + dev->lldd_dev = (void *)mvi_device; + mvi_device->dev_type = dev->dev_type; + + if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { + int phy_id; + u8 phy_num = parent_dev->ex_dev.num_phys; + struct ex_phy *phy; + for (phy_id = 0; phy_id < phy_num; phy_id++) { + phy = &parent_dev->ex_dev.ex_phy[phy_id]; + if (SAS_ADDR(phy->attached_sas_addr) == + SAS_ADDR(dev->sas_addr)) { + mvi_device->attached_phy = phy_id; + break; + } + } - if (aborted) { - mvs_slot_task_free(mvi, task, slot, slot_idx); - mvs_slot_free(mvi, rx_desc); - return -1; + if (phy_id == phy_num) { + mv_printk("Error: no attached dev:%016llx" + "at ex:%016llx.\n", + SAS_ADDR(dev->sas_addr), + SAS_ADDR(parent_dev->sas_addr)); + res = -1; + } } - port = slot->port; - tstat = &task->task_status; - memset(tstat, 0, sizeof(*tstat)); - tstat->resp = SAS_TASK_COMPLETE; +found_out: + if (lock) + spin_unlock_irqrestore(&mvi->lock, flags); + return res; +} - if (unlikely(!port->port_attached || flags)) { - mvs_slot_err(mvi, task, slot_idx); - if (!sas_protocol_ata(task->task_proto)) - tstat->stat = SAS_PHY_DOWN; - goto out; - } +int mvs_dev_found(struct domain_device *dev) +{ + return mvs_dev_found_notify(dev, 1); +} - /* error info record present */ - if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { - tstat->stat = mvs_slot_err(mvi, task, slot_idx); - goto out; - } - - switch (task->task_proto) { - case SAS_PROTOCOL_SSP: - /* hw says status == 0, datapres == 0 */ - if (rx_desc & RXQ_GOOD) { - tstat->stat = SAM_GOOD; - tstat->resp = SAS_TASK_COMPLETE; - } - /* response frame present */ - else if (rx_desc & RXQ_RSP) { - struct ssp_response_iu *iu = - slot->response + sizeof(struct mvs_err_info); - sas_ssp_task_response(&mvi->pdev->dev, task, iu); - } - - /* should never happen? */ - else - tstat->stat = SAM_CHECK_COND; - break; +void mvs_dev_gone_notify(struct domain_device *dev, int lock) +{ + unsigned long flags = 0; + struct mvs_info *mvi; + struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; - case SAS_PROTOCOL_SMP: { - struct scatterlist *sg_resp = &task->smp_task.smp_resp; - tstat->stat = SAM_GOOD; - to = kmap_atomic(sg_page(sg_resp), KM_IRQ0); - memcpy(to + sg_resp->offset, - slot->response + sizeof(struct mvs_err_info), - sg_dma_len(sg_resp)); - kunmap_atomic(to, KM_IRQ0); - break; - } + mvi = mvs_find_dev_mvi(dev); - case SAS_PROTOCOL_SATA: - case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { - tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0); - break; - } + if (lock) + spin_lock_irqsave(&mvi->lock, flags); - default: - tstat->stat = SAM_CHECK_COND; - break; + if (mvi_dev) { + mv_dprintk("found dev[%d:%x] is gone.\n", + mvi_dev->device_id, mvi_dev->dev_type); + mvs_free_reg_set(mvi, mvi_dev); + mvs_free_dev(mvi_dev); + } else { + mv_dprintk("found dev has gone.\n"); } + dev->lldd_dev = NULL; -out: - mvs_slot_task_free(mvi, task, slot, slot_idx); - if (unlikely(tstat->stat != SAS_QUEUE_FULL)) - mvs_slot_free(mvi, rx_desc); - - spin_unlock(&mvi->lock); - task->task_done(task); - spin_lock(&mvi->lock); - return tstat->stat; + if (lock) + spin_unlock_irqrestore(&mvi->lock, flags); } -static void mvs_release_task(struct mvs_info *mvi, int phy_no) -{ - struct list_head *pos, *n; - struct mvs_slot_info *slot; - struct mvs_phy *phy = &mvi->phy[phy_no]; - struct mvs_port *port = phy->port; - u32 rx_desc; - if (!port) - return; +void mvs_dev_gone(struct domain_device *dev) +{ + mvs_dev_gone_notify(dev, 1); +} - list_for_each_safe(pos, n, &port->list) { - slot = container_of(pos, struct mvs_slot_info, list); - rx_desc = (u32) (slot - mvi->slot_info); - mvs_slot_complete(mvi, rx_desc, 1); +static struct sas_task *mvs_alloc_task(void) +{ + struct sas_task *task = kzalloc(sizeof(struct sas_task), GFP_KERNEL); + + if (task) { + INIT_LIST_HEAD(&task->list); + spin_lock_init(&task->task_state_lock); + task->task_state_flags = SAS_TASK_STATE_PENDING; + init_timer(&task->timer); + init_completion(&task->completion); } + return task; } -static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) +static void mvs_free_task(struct sas_task *task) { - struct pci_dev *pdev = mvi->pdev; - struct sas_ha_struct *sas_ha = &mvi->sas; - struct mvs_phy *phy = &mvi->phy[phy_no]; - struct asd_sas_phy *sas_phy = &phy->sas_phy; - - phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no); - /* - * events is port event now , - * we need check the interrupt status which belongs to per port. - */ - dev_printk(KERN_DEBUG, &pdev->dev, - "Port %d Event = %X\n", - phy_no, phy->irq_status); - - if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) { - mvs_release_task(mvi, phy_no); - if (!mvs_is_phy_ready(mvi, phy_no)) { - sas_phy_disconnected(sas_phy); - sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); - dev_printk(KERN_INFO, &pdev->dev, - "Port %d Unplug Notice\n", phy_no); - - } else - mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL); + if (task) { + BUG_ON(!list_empty(&task->list)); + kfree(task); } - if (!(phy->irq_status & PHYEV_DEC_ERR)) { - if (phy->irq_status & PHYEV_COMWAKE) { - u32 tmp = mvs_read_port_irq_mask(mvi, phy_no); - mvs_write_port_irq_mask(mvi, phy_no, - tmp | PHYEV_SIG_FIS); - } - if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { - phy->phy_status = mvs_is_phy_ready(mvi, phy_no); - if (phy->phy_status) { - mvs_detect_porttype(mvi, phy_no); - - if (phy->phy_type & PORT_TYPE_SATA) { - u32 tmp = mvs_read_port_irq_mask(mvi, - phy_no); - tmp &= ~PHYEV_SIG_FIS; - mvs_write_port_irq_mask(mvi, - phy_no, tmp); - } +} - mvs_update_phyinfo(mvi, phy_no, 0); - sas_ha->notify_phy_event(sas_phy, - PHYE_OOB_DONE); - mvs_bytes_dmaed(mvi, phy_no); - } else { - dev_printk(KERN_DEBUG, &pdev->dev, - "plugin interrupt but phy is gone\n"); - mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, - NULL); - } - } else if (phy->irq_status & PHYEV_BROAD_CH) { - mvs_release_task(mvi, phy_no); - sas_ha->notify_port_event(sas_phy, - PORTE_BROADCAST_RCVD); - } - } - mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status); +static void mvs_task_done(struct sas_task *task) +{ + if (!del_timer(&task->timer)) + return; + complete(&task->completion); } -static int mvs_int_rx(struct mvs_info *mvi, bool self_clear) +static void mvs_tmf_timedout(unsigned long data) { - void __iomem *regs = mvi->regs; - u32 rx_prod_idx, rx_desc; - bool attn = false; - struct pci_dev *pdev = mvi->pdev; + struct sas_task *task = (struct sas_task *)data; - /* the first dword in the RX ring is special: it contains - * a mirror of the hardware's RX producer index, so that - * we don't have to stall the CPU reading that register. - * The actual RX ring is offset by one dword, due to this. - */ - rx_prod_idx = mvi->rx_cons; - mvi->rx_cons = le32_to_cpu(mvi->rx[0]); - if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */ - return 0; + task->task_state_flags |= SAS_TASK_STATE_ABORTED; + complete(&task->completion); +} - /* The CMPL_Q may come late, read from register and try again - * note: if coalescing is enabled, - * it will need to read from register every time for sure - */ - if (mvi->rx_cons == rx_prod_idx) - mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK; +/* XXX */ +#define MVS_TASK_TIMEOUT 20 +static int mvs_exec_internal_tmf_task(struct domain_device *dev, + void *parameter, u32 para_len, struct mvs_tmf_task *tmf) +{ + int res, retry; + struct sas_task *task = NULL; - if (mvi->rx_cons == rx_prod_idx) - return 0; + for (retry = 0; retry < 3; retry++) { + task = mvs_alloc_task(); + if (!task) + return -ENOMEM; - while (mvi->rx_cons != rx_prod_idx) { + task->dev = dev; + task->task_proto = dev->tproto; - /* increment our internal RX consumer pointer */ - rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); + memcpy(&task->ssp_task, parameter, para_len); + task->task_done = mvs_task_done; - rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); + task->timer.data = (unsigned long) task; + task->timer.function = mvs_tmf_timedout; + task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ; + add_timer(&task->timer); - if (likely(rx_desc & RXQ_DONE)) - mvs_slot_complete(mvi, rx_desc, 0); - if (rx_desc & RXQ_ATTN) { - attn = true; - dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n", - rx_desc); - } else if (rx_desc & RXQ_ERR) { - if (!(rx_desc & RXQ_DONE)) - mvs_slot_complete(mvi, rx_desc, 0); - dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n", - rx_desc); - } else if (rx_desc & RXQ_SLOT_RESET) { - dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n", - rx_desc); - mvs_slot_free(mvi, rx_desc); - } - } + res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 0, 1, tmf); - if (attn && self_clear) - mvs_int_full(mvi); + if (res) { + del_timer(&task->timer); + mv_printk("executing internel task failed:%d\n", res); + goto ex_err; + } - return 0; -} + wait_for_completion(&task->completion); + res = -TMF_RESP_FUNC_FAILED; + /* Even TMF timed out, return direct. */ + if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { + mv_printk("TMF task[%x] timeout.\n", tmf->tmf); + goto ex_err; + } + } -#ifndef MVS_DISABLE_NVRAM -static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data) -{ - int timeout = 1000; + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAM_GOOD) { + res = TMF_RESP_FUNC_COMPLETE; + break; + } - if (addr & ~SPI_ADDR_MASK) - return -EINVAL; + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAS_DATA_UNDERRUN) { + /* no error, but return the number of bytes of + * underrun */ + res = task->task_status.residual; + break; + } - writel(addr, regs + SPI_CMD); - writel(TWSI_RD, regs + SPI_CTL); + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAS_DATA_OVERRUN) { + mv_dprintk("blocked task error.\n"); + res = -EMSGSIZE; + break; + } else { + mv_dprintk(" task to dev %016llx response: 0x%x " + "status 0x%x\n", + SAS_ADDR(dev->sas_addr), + task->task_status.resp, + task->task_status.stat); + mvs_free_task(task); + task = NULL; - while (timeout-- > 0) { - if (readl(regs + SPI_CTL) & TWSI_RDY) { - *data = readl(regs + SPI_DATA); - return 0; } - - udelay(10); } - - return -EBUSY; +ex_err: + BUG_ON(retry == 3 && task != NULL); + if (task != NULL) + mvs_free_task(task); + return res; } -static int mvs_eep_read_buf(void __iomem *regs, u32 addr, - void *buf, u32 buflen) +static int mvs_debug_issue_ssp_tmf(struct domain_device *dev, + u8 *lun, struct mvs_tmf_task *tmf) { - u32 addr_end, tmp_addr, i, j; - u32 tmp = 0; - int rc; - u8 *tmp8, *buf8 = buf; + struct sas_ssp_task ssp_task; + DECLARE_COMPLETION_ONSTACK(completion); + if (!(dev->tproto & SAS_PROTOCOL_SSP)) + return TMF_RESP_FUNC_ESUPP; - addr_end = addr + buflen; - tmp_addr = ALIGN(addr, 4); - if (addr > 0xff) - return -EINVAL; + strncpy((u8 *)&ssp_task.LUN, lun, 8); - j = addr & 0x3; - if (j) { - rc = mvs_eep_read(regs, tmp_addr, &tmp); - if (rc) - return rc; - - tmp8 = (u8 *)&tmp; - for (i = j; i < 4; i++) - *buf8++ = tmp8[i]; + return mvs_exec_internal_tmf_task(dev, &ssp_task, + sizeof(ssp_task), tmf); +} - tmp_addr += 4; - } - for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) { - rc = mvs_eep_read(regs, tmp_addr, &tmp); - if (rc) - return rc; +/* Standard mandates link reset for ATA (type 0) + and hard reset for SSP (type 1) , only for RECOVERY */ +static int mvs_debug_I_T_nexus_reset(struct domain_device *dev) +{ + int rc; + struct sas_phy *phy = sas_find_local_phy(dev); + int reset_type = (dev->dev_type == SATA_DEV || + (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; + rc = sas_phy_reset(phy, reset_type); + msleep(2000); + return rc; +} - memcpy(buf8, &tmp, 4); - buf8 += 4; +/* mandatory SAM-3 */ +int mvs_lu_reset(struct domain_device *dev, u8 *lun) +{ + unsigned long flags; + int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED; + struct mvs_tmf_task tmf_task; + struct mvs_info *mvi = mvs_find_dev_mvi(dev); + struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev; + + tmf_task.tmf = TMF_LU_RESET; + mvi_dev->dev_status = MVS_DEV_EH; + rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); + if (rc == TMF_RESP_FUNC_COMPLETE) { + num = mvs_find_dev_phyno(dev, phyno); + spin_lock_irqsave(&mvi->lock, flags); + for (i = 0; i < num; i++) + mvs_release_task(mvi, phyno[i], dev); + spin_unlock_irqrestore(&mvi->lock, flags); } + /* If failed, fall-through I_T_Nexus reset */ + mv_printk("%s for device[%x]:rc= %d\n", __func__, + mvi_dev->device_id, rc); + return rc; +} - if (tmp_addr < addr_end) { - rc = mvs_eep_read(regs, tmp_addr, &tmp); - if (rc) +int mvs_I_T_nexus_reset(struct domain_device *dev) +{ + unsigned long flags; + int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED; + struct mvs_info *mvi = mvs_find_dev_mvi(dev); + struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; + + if (mvi_dev->dev_status != MVS_DEV_EH) + return TMF_RESP_FUNC_COMPLETE; + rc = mvs_debug_I_T_nexus_reset(dev); + mv_printk("%s for device[%x]:rc= %d\n", + __func__, mvi_dev->device_id, rc); + + /* housekeeper */ + num = mvs_find_dev_phyno(dev, phyno); + spin_lock_irqsave(&mvi->lock, flags); + for (i = 0; i < num; i++) + mvs_release_task(mvi, phyno[i], dev); + spin_unlock_irqrestore(&mvi->lock, flags); + + return rc; +} +/* optional SAM-3 */ +int mvs_query_task(struct sas_task *task) +{ + u32 tag; + struct scsi_lun lun; + struct mvs_tmf_task tmf_task; + int rc = TMF_RESP_FUNC_FAILED; + + if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { + struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; + struct domain_device *dev = task->dev; + struct mvs_info *mvi = mvs_find_dev_mvi(dev); + + int_to_scsilun(cmnd->device->lun, &lun); + rc = mvs_find_tag(mvi, task, &tag); + if (rc == 0) { + rc = TMF_RESP_FUNC_FAILED; return rc; + } - tmp8 = (u8 *)&tmp; - j = addr_end - tmp_addr; - for (i = 0; i < j; i++) - *buf8++ = tmp8[i]; + tmf_task.tmf = TMF_QUERY_TASK; + tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); - tmp_addr += 4; + rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); + switch (rc) { + /* The task is still in Lun, release it then */ + case TMF_RESP_FUNC_SUCC: + /* The task is not in Lun or failed, reset the phy */ + case TMF_RESP_FUNC_FAILED: + case TMF_RESP_FUNC_COMPLETE: + break; + } } - - return 0; + mv_printk("%s:rc= %d\n", __func__, rc); + return rc; } -#endif -int mvs_nvram_read(struct mvs_info *mvi, u32 addr, void *buf, u32 buflen) +/* mandatory SAM-3, still need free task/slot info */ +int mvs_abort_task(struct sas_task *task) { -#ifndef MVS_DISABLE_NVRAM - void __iomem *regs = mvi->regs; - int rc, i; - u32 sum; - u8 hdr[2], *tmp; - const char *msg; - - rc = mvs_eep_read_buf(regs, addr, &hdr, 2); - if (rc) { - msg = "nvram hdr read failed"; - goto err_out; - } - rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen); - if (rc) { - msg = "nvram read failed"; - goto err_out; + struct scsi_lun lun; + struct mvs_tmf_task tmf_task; + struct domain_device *dev = task->dev; + struct mvs_info *mvi = mvs_find_dev_mvi(dev); + int rc = TMF_RESP_FUNC_FAILED; + unsigned long flags; + u32 tag; + if (mvi->exp_req) + mvi->exp_req--; + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + rc = TMF_RESP_FUNC_COMPLETE; + goto out; } + spin_unlock_irqrestore(&task->task_state_lock, flags); + if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { + struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; + + int_to_scsilun(cmnd->device->lun, &lun); + rc = mvs_find_tag(mvi, task, &tag); + if (rc == 0) { + mv_printk("No such tag in %s\n", __func__); + rc = TMF_RESP_FUNC_FAILED; + return rc; + } - if (hdr[0] != 0x5A) { - /* entry id */ - msg = "invalid nvram entry id"; - rc = -ENOENT; - goto err_out; - } + tmf_task.tmf = TMF_ABORT_TASK; + tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); - tmp = buf; - sum = ((u32)hdr[0]) + ((u32)hdr[1]); - for (i = 0; i < buflen; i++) - sum += ((u32)tmp[i]); + rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); - if (sum) { - msg = "nvram checksum failure"; - rc = -EILSEQ; - goto err_out; - } + /* if successful, clear the task and callback forwards.*/ + if (rc == TMF_RESP_FUNC_COMPLETE) { + u32 slot_no; + struct mvs_slot_info *slot; + struct mvs_info *mvi = mvs_find_dev_mvi(dev); - return 0; + if (task->lldd_task) { + slot = (struct mvs_slot_info *)task->lldd_task; + slot_no = (u32) (slot - mvi->slot_info); + mvs_slot_complete(mvi, slot_no, 1); + } + } + } else if (task->task_proto & SAS_PROTOCOL_SATA || + task->task_proto & SAS_PROTOCOL_STP) { + /* to do free register_set */ + } else { + /* SMP */ -err_out: - dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg); + } +out: + if (rc != TMF_RESP_FUNC_COMPLETE) + mv_printk("%s:rc= %d\n", __func__, rc); return rc; -#else - /* FIXME , For SAS target mode */ - memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8); - return 0; -#endif } -static void mvs_int_sata(struct mvs_info *mvi) +int mvs_abort_task_set(struct domain_device *dev, u8 *lun) { - u32 tmp; - void __iomem *regs = mvi->regs; - tmp = mr32(INT_STAT_SRS); - mw32(INT_STAT_SRS, tmp & 0xFFFF); -} + int rc = TMF_RESP_FUNC_FAILED; + struct mvs_tmf_task tmf_task; -static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task, - u32 slot_idx) -{ - void __iomem *regs = mvi->regs; - struct domain_device *dev = task->dev; - struct asd_sas_port *sas_port = dev->port; - struct mvs_port *port = mvi->slot_info[slot_idx].port; - u32 reg_set, phy_mask; - - if (!sas_protocol_ata(task->task_proto)) { - reg_set = 0; - phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap : - sas_port->phy_mask; - } else { - reg_set = port->taskfileset; - phy_mask = sas_port->phy_mask; - } - mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx | - (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) | - (phy_mask << TXQ_PHY_SHIFT) | - (reg_set << TXQ_SRS_SHIFT)); + tmf_task.tmf = TMF_ABORT_TASK_SET; + rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); - mw32(TX_PROD_IDX, mvi->tx_prod); - mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); + return rc; } -void mvs_int_full(struct mvs_info *mvi) +int mvs_clear_aca(struct domain_device *dev, u8 *lun) { - void __iomem *regs = mvi->regs; - u32 tmp, stat; - int i; + int rc = TMF_RESP_FUNC_FAILED; + struct mvs_tmf_task tmf_task; - stat = mr32(INT_STAT); + tmf_task.tmf = TMF_CLEAR_ACA; + rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); - mvs_int_rx(mvi, false); + return rc; +} - for (i = 0; i < MVS_MAX_PORTS; i++) { - tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED); - if (tmp) - mvs_int_port(mvi, i, tmp); - } +int mvs_clear_task_set(struct domain_device *dev, u8 *lun) +{ + int rc = TMF_RESP_FUNC_FAILED; + struct mvs_tmf_task tmf_task; - if (stat & CINT_SRS) - mvs_int_sata(mvi); + tmf_task.tmf = TMF_CLEAR_TASK_SET; + rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); - mw32(INT_STAT, stat); + return rc; } -#ifndef MVS_DISABLE_MSI -static irqreturn_t mvs_msi_interrupt(int irq, void *opaque) +static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, + u32 slot_idx, int err) { - struct mvs_info *mvi = opaque; - -#ifndef MVS_USE_TASKLET - spin_lock(&mvi->lock); + struct mvs_device *mvi_dev = (struct mvs_device *)task->dev->lldd_dev; + struct task_status_struct *tstat = &task->task_status; + struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; + int stat = SAM_GOOD; - mvs_int_rx(mvi, true); - spin_unlock(&mvi->lock); -#else - tasklet_schedule(&mvi->tasklet); -#endif - return IRQ_HANDLED; + resp->frame_len = sizeof(struct dev_to_host_fis); + memcpy(&resp->ending_fis[0], + SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset), + sizeof(struct dev_to_host_fis)); + tstat->buf_valid_size = sizeof(*resp); + if (unlikely(err)) + stat = SAS_PROTO_RESPONSE; + return stat; } -#endif -int mvs_task_abort(struct sas_task *task) +static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, + u32 slot_idx) { - int rc; - unsigned long flags; - struct mvs_info *mvi = task->dev->port->ha->lldd_ha; - struct pci_dev *pdev = mvi->pdev; - int tag; + struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; + int stat; + u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response)); + u32 tfs = 0; + enum mvs_port_type type = PORT_TYPE_SAS; - spin_lock_irqsave(&task->task_state_lock, flags); - if (task->task_state_flags & SAS_TASK_STATE_DONE) { - rc = TMF_RESP_FUNC_COMPLETE; - spin_unlock_irqrestore(&task->task_state_lock, flags); - goto out_done; - } - spin_unlock_irqrestore(&task->task_state_lock, flags); + if (err_dw0 & CMD_ISS_STPD) + MVS_CHIP_DISP->issue_stop(mvi, type, tfs); + + MVS_CHIP_DISP->command_active(mvi, slot_idx); + stat = SAM_CHECK_COND; switch (task->task_proto) { - case SAS_PROTOCOL_SMP: - dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n"); - break; case SAS_PROTOCOL_SSP: - dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n"); + stat = SAS_ABORTED_TASK; + break; + case SAS_PROTOCOL_SMP: + stat = SAM_CHECK_COND; break; + case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{ - dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n"); -#if _MV_DUMP - dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n"); - mvs_hexdump(sizeof(struct host_to_dev_fis), - (void *)&task->ata_task.fis, 0); - dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n"); - mvs_hexdump(16, task->ata_task.atapi_packet, 0); -#endif - spin_lock_irqsave(&task->task_state_lock, flags); - if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) { - /* TODO */ - ; - } - spin_unlock_irqrestore(&task->task_state_lock, flags); - break; + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + { + if (err_dw0 == 0x80400002) + mv_printk("find reserved error, why?\n"); + + task->ata_task.use_ncq = 0; + stat = SAS_PROTO_RESPONSE; + mvs_sata_done(mvi, task, slot_idx, 1); + } + break; default: break; } - if (mvs_find_tag(mvi, task, &tag)) { - spin_lock_irqsave(&mvi->lock, flags); - mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag); - spin_unlock_irqrestore(&mvi->lock, flags); - } - if (!mvs_task_exec(task, 1, GFP_ATOMIC)) - rc = TMF_RESP_FUNC_COMPLETE; - else - rc = TMF_RESP_FUNC_FAILED; -out_done: - return rc; + return stat; } -int __devinit mvs_hw_init(struct mvs_info *mvi) +int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) { - void __iomem *regs = mvi->regs; - int i; - u32 tmp, cctl; - - /* make sure interrupts are masked immediately (paranoia) */ - mw32(GBL_CTL, 0); - tmp = mr32(GBL_CTL); - - /* Reset Controller */ - if (!(tmp & HBA_RST)) { - if (mvi->flags & MVF_PHY_PWR_FIX) { - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); - tmp &= ~PCTL_PWR_ON; - tmp |= PCTL_OFF; - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); - - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); - tmp &= ~PCTL_PWR_ON; - tmp |= PCTL_OFF; - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); - } + u32 slot_idx = rx_desc & RXQ_SLOT_MASK; + struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; + struct sas_task *task = slot->task; + struct mvs_device *mvi_dev = NULL; + struct task_status_struct *tstat; + + bool aborted; + void *to; + enum exec_status sts; + + if (mvi->exp_req) + mvi->exp_req--; + if (unlikely(!task || !task->lldd_task)) + return -1; + + tstat = &task->task_status; + mvi_dev = (struct mvs_device *)task->dev->lldd_dev; - /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */ - mw32_f(GBL_CTL, HBA_RST); + mvs_hba_cq_dump(mvi); + + spin_lock(&task->task_state_lock); + task->task_state_flags &= + ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); + task->task_state_flags |= SAS_TASK_STATE_DONE; + /* race condition*/ + aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; + spin_unlock(&task->task_state_lock); + + memset(tstat, 0, sizeof(*tstat)); + tstat->resp = SAS_TASK_COMPLETE; + + if (unlikely(aborted)) { + tstat->stat = SAS_ABORTED_TASK; + if (mvi_dev) + mvi_dev->runing_req--; + if (sas_protocol_ata(task->task_proto)) + mvs_free_reg_set(mvi, mvi_dev); + + mvs_slot_task_free(mvi, task, slot, slot_idx); + return -1; } - /* wait for reset to finish; timeout is just a guess */ - i = 1000; - while (i-- > 0) { - msleep(10); + if (unlikely(!mvi_dev || !slot->port->port_attached || flags)) { + mv_dprintk("port has not device.\n"); + tstat->stat = SAS_PHY_DOWN; + goto out; + } - if (!(mr32(GBL_CTL) & HBA_RST)) - break; + /* + if (unlikely((rx_desc & RXQ_ERR) || (*(u64 *) slot->response))) { + mv_dprintk("Find device[%016llx] RXQ_ERR %X, + err info:%016llx\n", + SAS_ADDR(task->dev->sas_addr), + rx_desc, (u64)(*(u64 *) slot->response)); } - if (mr32(GBL_CTL) & HBA_RST) { - dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n"); - return -EBUSY; + */ + + /* error info record present */ + if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { + tstat->stat = mvs_slot_err(mvi, task, slot_idx); + goto out; } - /* Init Chip */ - /* make sure RST is set; HBA_RST /should/ have done that for us */ - cctl = mr32(CTL); - if (cctl & CCTL_RST) - cctl &= ~CCTL_RST; - else - mw32_f(CTL, cctl | CCTL_RST); + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: + /* hw says status == 0, datapres == 0 */ + if (rx_desc & RXQ_GOOD) { + tstat->stat = SAM_GOOD; + tstat->resp = SAS_TASK_COMPLETE; + } + /* response frame present */ + else if (rx_desc & RXQ_RSP) { + struct ssp_response_iu *iu = slot->response + + sizeof(struct mvs_err_info); + sas_ssp_task_response(mvi->dev, task, iu); + } else + tstat->stat = SAM_CHECK_COND; + break; - /* write to device control _AND_ device status register? - A.C. */ - pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp); - tmp &= ~PRD_REQ_MASK; - tmp |= PRD_REQ_SIZE; - pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp); + case SAS_PROTOCOL_SMP: { + struct scatterlist *sg_resp = &task->smp_task.smp_resp; + tstat->stat = SAM_GOOD; + to = kmap_atomic(sg_page(sg_resp), KM_IRQ0); + memcpy(to + sg_resp->offset, + slot->response + sizeof(struct mvs_err_info), + sg_dma_len(sg_resp)); + kunmap_atomic(to, KM_IRQ0); + break; + } - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); - tmp |= PCTL_PWR_ON; - tmp &= ~PCTL_OFF; - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { + tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0); + break; + } - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); - tmp |= PCTL_PWR_ON; - tmp &= ~PCTL_OFF; - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); + default: + tstat->stat = SAM_CHECK_COND; + break; + } - mw32_f(CTL, cctl); +out: + if (mvi_dev) + mvi_dev->runing_req--; + if (sas_protocol_ata(task->task_proto)) + mvs_free_reg_set(mvi, mvi_dev); - /* reset control */ - mw32(PCS, 0); /*MVS_PCS */ + mvs_slot_task_free(mvi, task, slot, slot_idx); + sts = tstat->stat; - mvs_phy_hacks(mvi); + spin_unlock(&mvi->lock); + if (task->task_done) + task->task_done(task); + else + mv_dprintk("why has not task_done.\n"); + spin_lock(&mvi->lock); - mw32(CMD_LIST_LO, mvi->slot_dma); - mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16); + return sts; +} - mw32(RX_FIS_LO, mvi->rx_fis_dma); - mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16); +void mvs_release_task(struct mvs_info *mvi, + int phy_no, struct domain_device *dev) +{ + int i = 0; u32 slot_idx; + struct mvs_phy *phy; + struct mvs_port *port; + struct mvs_slot_info *slot, *slot2; - mw32(TX_CFG, MVS_CHIP_SLOT_SZ); - mw32(TX_LO, mvi->tx_dma); - mw32(TX_HI, (mvi->tx_dma >> 16) >> 16); + phy = &mvi->phy[phy_no]; + port = phy->port; + if (!port) + return; - mw32(RX_CFG, MVS_RX_RING_SZ); - mw32(RX_LO, mvi->rx_dma); - mw32(RX_HI, (mvi->rx_dma >> 16) >> 16); + list_for_each_entry_safe(slot, slot2, &port->list, entry) { + struct sas_task *task; + slot_idx = (u32) (slot - mvi->slot_info); + task = slot->task; - /* enable auto port detection */ - mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN); - msleep(1100); - /* init and reset phys */ - for (i = 0; i < mvi->chip->n_phy; i++) { - u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]); - u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]); + if (dev && task->dev != dev) + continue; - mvs_detect_porttype(mvi, i); + mv_printk("Release slot [%x] tag[%x], task [%p]:\n", + slot_idx, slot->slot_tag, task); - /* set phy local SAS address */ - mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO); - mvs_write_port_cfg_data(mvi, i, lo); - mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI); - mvs_write_port_cfg_data(mvi, i, hi); + if (task->task_proto & SAS_PROTOCOL_SSP) { + mv_printk("attached with SSP task CDB["); + for (i = 0; i < 16; i++) + mv_printk(" %02x", task->ssp_task.cdb[i]); + mv_printk(" ]\n"); + } - /* reset phy */ - tmp = mvs_read_phy_ctl(mvi, i); - tmp |= PHY_RST; - mvs_write_phy_ctl(mvi, i, tmp); + mvs_slot_complete(mvi, slot_idx, 1); } +} - msleep(100); +static void mvs_phy_disconnected(struct mvs_phy *phy) +{ + phy->phy_attached = 0; + phy->att_dev_info = 0; + phy->att_dev_sas_addr = 0; +} + +static void mvs_work_queue(struct work_struct *work) +{ + struct delayed_work *dw = container_of(work, struct delayed_work, work); + struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q); + struct mvs_info *mvi = mwq->mvi; + unsigned long flags; - for (i = 0; i < mvi->chip->n_phy; i++) { - /* clear phy int status */ - tmp = mvs_read_port_irq_stat(mvi, i); - tmp &= ~PHYEV_SIG_FIS; - mvs_write_port_irq_stat(mvi, i, tmp); + spin_lock_irqsave(&mvi->lock, flags); + if (mwq->handler & PHY_PLUG_EVENT) { + u32 phy_no = (unsigned long) mwq->data; + struct sas_ha_struct *sas_ha = mvi->sas; + struct mvs_phy *phy = &mvi->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + if (phy->phy_event & PHY_PLUG_OUT) { + u32 tmp; + struct sas_identify_frame *id; + id = (struct sas_identify_frame *)phy->frame_rcvd; + tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no); + phy->phy_event &= ~PHY_PLUG_OUT; + if (!(tmp & PHY_READY_MASK)) { + sas_phy_disconnected(sas_phy); + mvs_phy_disconnected(phy); + sas_ha->notify_phy_event(sas_phy, + PHYE_LOSS_OF_SIGNAL); + mv_dprintk("phy%d Removed Device\n", phy_no); + } else { + MVS_CHIP_DISP->detect_porttype(mvi, phy_no); + mvs_update_phyinfo(mvi, phy_no, 1); + mvs_bytes_dmaed(mvi, phy_no); + mvs_port_notify_formed(sas_phy, 0); + mv_dprintk("phy%d Attached Device\n", phy_no); + } + } + } + list_del(&mwq->entry); + spin_unlock_irqrestore(&mvi->lock, flags); + kfree(mwq); +} - /* set phy int mask */ - tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS | - PHYEV_ID_DONE | PHYEV_DEC_ERR; - mvs_write_port_irq_mask(mvi, i, tmp); +static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler) +{ + struct mvs_wq *mwq; + int ret = 0; + + mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC); + if (mwq) { + mwq->mvi = mvi; + mwq->data = data; + mwq->handler = handler; + MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq); + list_add_tail(&mwq->entry, &mvi->wq_list); + schedule_delayed_work(&mwq->work_q, HZ * 2); + } else + ret = -ENOMEM; + + return ret; +} - msleep(100); - mvs_update_phyinfo(mvi, i, 1); - mvs_enable_xmt(mvi, i); +static void mvs_sig_time_out(unsigned long tphy) +{ + struct mvs_phy *phy = (struct mvs_phy *)tphy; + struct mvs_info *mvi = phy->mvi; + u8 phy_no; + + for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) { + if (&mvi->phy[phy_no] == phy) { + mv_dprintk("Get signature time out, reset phy %d\n", + phy_no+mvi->id*mvi->chip->n_phy); + MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1); + } } +} - /* FIXME: update wide port bitmaps */ +static void mvs_sig_remove_timer(struct mvs_phy *phy) +{ + if (phy->timer.function) + del_timer(&phy->timer); + phy->timer.function = NULL; +} - /* little endian for open address and command table, etc. */ - /* A.C. - * it seems that ( from the spec ) turning on big-endian won't - * do us any good on big-endian machines, need further confirmation - */ - cctl = mr32(CTL); - cctl |= CCTL_ENDIAN_CMD; - cctl |= CCTL_ENDIAN_DATA; - cctl &= ~CCTL_ENDIAN_OPEN; - cctl |= CCTL_ENDIAN_RSP; - mw32_f(CTL, cctl); - - /* reset CMD queue */ - tmp = mr32(PCS); - tmp |= PCS_CMD_RST; - mw32(PCS, tmp); - /* interrupt coalescing may cause missing HW interrput in some case, - * and the max count is 0x1ff, while our max slot is 0x200, - * it will make count 0. - */ - tmp = 0; - mw32(INT_COAL, tmp); +void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) +{ + u32 tmp; + struct sas_ha_struct *sas_ha = mvi->sas; + struct mvs_phy *phy = &mvi->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; - tmp = 0x100; - mw32(INT_COAL_TMOUT, tmp); + phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no); + mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy, + MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no)); + mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy, + phy->irq_status); - /* ladies and gentlemen, start your engines */ - mw32(TX_CFG, 0); - mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN); - mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN); - /* enable CMD/CMPL_Q/RESP mode */ - mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN); + /* + * events is port event now , + * we need check the interrupt status which belongs to per port. + */ - /* enable completion queue interrupt */ - tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS); - mw32(INT_MASK, tmp); + if (phy->irq_status & PHYEV_DCDR_ERR) + mv_dprintk("port %d STP decoding error.\n", + phy_no+mvi->id*mvi->chip->n_phy); + + if (phy->irq_status & PHYEV_POOF) { + if (!(phy->phy_event & PHY_PLUG_OUT)) { + int dev_sata = phy->phy_type & PORT_TYPE_SATA; + int ready; + mvs_release_task(mvi, phy_no, NULL); + phy->phy_event |= PHY_PLUG_OUT; + mvs_handle_event(mvi, + (void *)(unsigned long)phy_no, + PHY_PLUG_EVENT); + ready = mvs_is_phy_ready(mvi, phy_no); + if (!ready) + mv_dprintk("phy%d Unplug Notice\n", + phy_no + + mvi->id * mvi->chip->n_phy); + if (ready || dev_sata) { + if (MVS_CHIP_DISP->stp_reset) + MVS_CHIP_DISP->stp_reset(mvi, + phy_no); + else + MVS_CHIP_DISP->phy_reset(mvi, + phy_no, 0); + return; + } + } + } - /* Enable SRS interrupt */ - mw32(INT_MASK_SRS, 0xFF); - return 0; + if (phy->irq_status & PHYEV_COMWAKE) { + tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no); + MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no, + tmp | PHYEV_SIG_FIS); + if (phy->timer.function == NULL) { + phy->timer.data = (unsigned long)phy; + phy->timer.function = mvs_sig_time_out; + phy->timer.expires = jiffies + 10*HZ; + add_timer(&phy->timer); + } + } + if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { + phy->phy_status = mvs_is_phy_ready(mvi, phy_no); + mvs_sig_remove_timer(phy); + mv_dprintk("notify plug in on phy[%d]\n", phy_no); + if (phy->phy_status) { + mdelay(10); + MVS_CHIP_DISP->detect_porttype(mvi, phy_no); + if (phy->phy_type & PORT_TYPE_SATA) { + tmp = MVS_CHIP_DISP->read_port_irq_mask( + mvi, phy_no); + tmp &= ~PHYEV_SIG_FIS; + MVS_CHIP_DISP->write_port_irq_mask(mvi, + phy_no, tmp); + } + mvs_update_phyinfo(mvi, phy_no, 0); + mvs_bytes_dmaed(mvi, phy_no); + /* whether driver is going to handle hot plug */ + if (phy->phy_event & PHY_PLUG_OUT) { + mvs_port_notify_formed(sas_phy, 0); + phy->phy_event &= ~PHY_PLUG_OUT; + } + } else { + mv_dprintk("plugin interrupt but phy%d is gone\n", + phy_no + mvi->id*mvi->chip->n_phy); + } + } else if (phy->irq_status & PHYEV_BROAD_CH) { + mv_dprintk("port %d broadcast change.\n", + phy_no + mvi->id*mvi->chip->n_phy); + /* exception for Samsung disk drive*/ + mdelay(1000); + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + } + MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status); } -void __devinit mvs_print_info(struct mvs_info *mvi) +int mvs_int_rx(struct mvs_info *mvi, bool self_clear) { - struct pci_dev *pdev = mvi->pdev; - static int printed_version; + u32 rx_prod_idx, rx_desc; + bool attn = false; - if (!printed_version++) - dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); + /* the first dword in the RX ring is special: it contains + * a mirror of the hardware's RX producer index, so that + * we don't have to stall the CPU reading that register. + * The actual RX ring is offset by one dword, due to this. + */ + rx_prod_idx = mvi->rx_cons; + mvi->rx_cons = le32_to_cpu(mvi->rx[0]); + if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */ + return 0; - dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n", - mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr)); + /* The CMPL_Q may come late, read from register and try again + * note: if coalescing is enabled, + * it will need to read from register every time for sure + */ + if (unlikely(mvi->rx_cons == rx_prod_idx)) + mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK; + + if (mvi->rx_cons == rx_prod_idx) + return 0; + + while (mvi->rx_cons != rx_prod_idx) { + /* increment our internal RX consumer pointer */ + rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); + rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); + + if (likely(rx_desc & RXQ_DONE)) + mvs_slot_complete(mvi, rx_desc, 0); + if (rx_desc & RXQ_ATTN) { + attn = true; + } else if (rx_desc & RXQ_ERR) { + if (!(rx_desc & RXQ_DONE)) + mvs_slot_complete(mvi, rx_desc, 0); + } else if (rx_desc & RXQ_SLOT_RESET) { + mvs_slot_free(mvi, rx_desc); + } + } + + if (attn && self_clear) + MVS_CHIP_DISP->int_full(mvi); + return 0; } diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h index 7a954a95a217..75b9748ae7cc 100644 --- a/drivers/scsi/mvsas/mv_sas.h +++ b/drivers/scsi/mvsas/mv_sas.h @@ -1,25 +1,26 @@ /* - mv_sas.h - Marvell 88SE6440 SAS/SATA support - - Copyright 2007 Red Hat, Inc. - Copyright 2008 Marvell. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2, - or (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public - License along with this program; see the file COPYING. If not, - write to the Free Software Foundation, 675 Mass Ave, Cambridge, - MA 02139, USA. - - */ + * Marvell 88SE64xx/88SE94xx main function head file + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA +*/ #ifndef _MV_SAS_H_ #define _MV_SAS_H_ @@ -42,25 +43,144 @@ #include #include "mv_defs.h" -#define DRV_NAME "mvsas" -#define DRV_VERSION "0.5.2" -#define _MV_DUMP 0 -#define MVS_DISABLE_NVRAM -#define MVS_DISABLE_MSI - +#define DRV_NAME "mvsas" +#define DRV_VERSION "0.8.2" +#define _MV_DUMP 0 #define MVS_ID_NOT_MAPPED 0x7f -#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width) +/* #define DISABLE_HOTPLUG_DMA_FIX */ +#define MAX_EXP_RUNNING_REQ 2 +#define WIDE_PORT_MAX_PHY 4 +#define MV_DISABLE_NCQ 0 +#define mv_printk(fmt, arg ...) \ + printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg) +#ifdef MV_DEBUG +#define mv_dprintk(format, arg...) \ + printk(KERN_DEBUG"%s %d:" format, __FILE__, __LINE__, ## arg) +#else +#define mv_dprintk(format, arg...) +#endif +#define MV_MAX_U32 0xffffffff + +extern struct mvs_tgt_initiator mvs_tgt; +extern struct mvs_info *tgt_mvi; +extern const struct mvs_dispatch mvs_64xx_dispatch; +extern const struct mvs_dispatch mvs_94xx_dispatch; + +#define DEV_IS_EXPANDER(type) \ + ((type == EDGE_DEV) || (type == FANOUT_DEV)) -#define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \ - for ((__mc) = (__lseq_mask), (__lseq) = 0; \ - (__mc) != 0 && __rest; \ +#define bit(n) ((u32)1 << n) + +#define for_each_phy(__lseq_mask, __mc, __lseq) \ + for ((__mc) = (__lseq_mask), (__lseq) = 0; \ + (__mc) != 0 ; \ (++__lseq), (__mc) >>= 1) +#define MV_INIT_DELAYED_WORK(w, f, d) INIT_DELAYED_WORK(w, f) +#define UNASSOC_D2H_FIS(id) \ + ((void *) mvi->rx_fis + 0x100 * id) +#define SATA_RECEIVED_FIS_LIST(reg_set) \ + ((void *) mvi->rx_fis + mvi->chip->fis_offs + 0x100 * reg_set) +#define SATA_RECEIVED_SDB_FIS(reg_set) \ + (SATA_RECEIVED_FIS_LIST(reg_set) + 0x58) +#define SATA_RECEIVED_D2H_FIS(reg_set) \ + (SATA_RECEIVED_FIS_LIST(reg_set) + 0x40) +#define SATA_RECEIVED_PIO_FIS(reg_set) \ + (SATA_RECEIVED_FIS_LIST(reg_set) + 0x20) +#define SATA_RECEIVED_DMA_FIS(reg_set) \ + (SATA_RECEIVED_FIS_LIST(reg_set) + 0x00) + +enum dev_status { + MVS_DEV_NORMAL = 0x0, + MVS_DEV_EH = 0x1, +}; + + +struct mvs_info; + +struct mvs_dispatch { + char *name; + int (*chip_init)(struct mvs_info *mvi); + int (*spi_init)(struct mvs_info *mvi); + int (*chip_ioremap)(struct mvs_info *mvi); + void (*chip_iounmap)(struct mvs_info *mvi); + irqreturn_t (*isr)(struct mvs_info *mvi, int irq, u32 stat); + u32 (*isr_status)(struct mvs_info *mvi, int irq); + void (*interrupt_enable)(struct mvs_info *mvi); + void (*interrupt_disable)(struct mvs_info *mvi); + + u32 (*read_phy_ctl)(struct mvs_info *mvi, u32 port); + void (*write_phy_ctl)(struct mvs_info *mvi, u32 port, u32 val); + + u32 (*read_port_cfg_data)(struct mvs_info *mvi, u32 port); + void (*write_port_cfg_data)(struct mvs_info *mvi, u32 port, u32 val); + void (*write_port_cfg_addr)(struct mvs_info *mvi, u32 port, u32 addr); + + u32 (*read_port_vsr_data)(struct mvs_info *mvi, u32 port); + void (*write_port_vsr_data)(struct mvs_info *mvi, u32 port, u32 val); + void (*write_port_vsr_addr)(struct mvs_info *mvi, u32 port, u32 addr); + + u32 (*read_port_irq_stat)(struct mvs_info *mvi, u32 port); + void (*write_port_irq_stat)(struct mvs_info *mvi, u32 port, u32 val); + + u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port); + void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val); + + void (*get_sas_addr)(void *buf, u32 buflen); + void (*command_active)(struct mvs_info *mvi, u32 slot_idx); + void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type, + u32 tfs); + void (*start_delivery)(struct mvs_info *mvi, u32 tx); + u32 (*rx_update)(struct mvs_info *mvi); + void (*int_full)(struct mvs_info *mvi); + u8 (*assign_reg_set)(struct mvs_info *mvi, u8 *tfs); + void (*free_reg_set)(struct mvs_info *mvi, u8 *tfs); + u32 (*prd_size)(void); + u32 (*prd_count)(void); + void (*make_prd)(struct scatterlist *scatter, int nr, void *prd); + void (*detect_porttype)(struct mvs_info *mvi, int i); + int (*oob_done)(struct mvs_info *mvi, int i); + void (*fix_phy_info)(struct mvs_info *mvi, int i, + struct sas_identify_frame *id); + void (*phy_work_around)(struct mvs_info *mvi, int i); + void (*phy_set_link_rate)(struct mvs_info *mvi, u32 phy_id, + struct sas_phy_linkrates *rates); + u32 (*phy_max_link_rate)(void); + void (*phy_disable)(struct mvs_info *mvi, u32 phy_id); + void (*phy_enable)(struct mvs_info *mvi, u32 phy_id); + void (*phy_reset)(struct mvs_info *mvi, u32 phy_id, int hard); + void (*stp_reset)(struct mvs_info *mvi, u32 phy_id); + void (*clear_active_cmds)(struct mvs_info *mvi); + u32 (*spi_read_data)(struct mvs_info *mvi); + void (*spi_write_data)(struct mvs_info *mvi, u32 data); + int (*spi_buildcmd)(struct mvs_info *mvi, + u32 *dwCmd, + u8 cmd, + u8 read, + u8 length, + u32 addr + ); + int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd); + int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout); +#ifndef DISABLE_HOTPLUG_DMA_FIX + void (*dma_fix)(dma_addr_t buf_dma, int buf_len, int from, void *prd); +#endif + +}; + struct mvs_chip_info { - u32 n_phy; - u32 srs_sz; - u32 slot_width; + u32 n_host; + u32 n_phy; + u32 fis_offs; + u32 fis_count; + u32 srs_sz; + u32 slot_width; + const struct mvs_dispatch *dispatch; }; +#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width) +#define MVS_RX_FISL_SZ \ + (mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100)) +#define MVS_CHIP_DISP (mvi->chip->dispatch) struct mvs_err_info { __le32 flags; @@ -72,7 +192,7 @@ struct mvs_cmd_hdr { __le32 lens; /* cmd, max resp frame len */ __le32 tags; /* targ port xfer tag; tag */ __le32 data_len; /* data xfer len */ - __le64 cmd_tbl; /* command table address */ + __le64 cmd_tbl; /* command table address */ __le64 open_frame; /* open addr frame address */ __le64 status_buf; /* status buffer address */ __le64 prd_tbl; /* PRD tbl address */ @@ -82,16 +202,17 @@ struct mvs_cmd_hdr { struct mvs_port { struct asd_sas_port sas_port; u8 port_attached; - u8 taskfileset; u8 wide_port_phymap; struct list_head list; }; struct mvs_phy { + struct mvs_info *mvi; struct mvs_port *port; struct asd_sas_phy sas_phy; struct sas_identify identify; struct scsi_device *sdev; + struct timer_list timer; u64 dev_sas_addr; u64 att_dev_sas_addr; u32 att_dev_info; @@ -102,15 +223,34 @@ struct mvs_phy { u32 frame_rcvd_size; u8 frame_rcvd[32]; u8 phy_attached; + u8 phy_mode; + u8 reserved[2]; + u32 phy_event; enum sas_linkrate minimum_linkrate; enum sas_linkrate maximum_linkrate; }; +struct mvs_device { + enum sas_dev_type dev_type; + struct domain_device *sas_device; + u32 attached_phy; + u32 device_id; + u32 runing_req; + u8 taskfileset; + u8 dev_status; + u16 reserved; + struct list_head dev_entry; +}; + struct mvs_slot_info { - struct list_head list; - struct sas_task *task; + struct list_head entry; + union { + struct sas_task *task; + void *tdata; + }; u32 n_elem; u32 tx; + u32 slot_tag; /* DMA buffer for storing cmd tbl, open addr frame, status buffer, * and PRD table @@ -120,9 +260,10 @@ struct mvs_slot_info { #if _MV_DUMP u32 cmd_size; #endif - void *response; struct mvs_port *port; + struct mvs_device *device; + void *open_frame; }; struct mvs_info { @@ -133,17 +274,17 @@ struct mvs_info { /* our device */ struct pci_dev *pdev; + struct device *dev; /* enhanced mode registers */ void __iomem *regs; - /* peripheral registers */ - void __iomem *peri_regs; - + /* peripheral or soc registers */ + void __iomem *regs_ex; u8 sas_addr[SAS_ADDR_SIZE]; /* SCSI/SAS glue */ - struct sas_ha_struct sas; + struct sas_ha_struct *sas; struct Scsi_Host *shost; /* TX (delivery) DMA ring */ @@ -154,7 +295,7 @@ struct mvs_info { u32 tx_prod; /* RX (completion) DMA ring */ - __le32 *rx; + __le32 *rx; dma_addr_t rx_dma; /* RX consumer idx */ @@ -168,38 +309,98 @@ struct mvs_info { struct mvs_cmd_hdr *slot; dma_addr_t slot_dma; + u32 chip_id; const struct mvs_chip_info *chip; - u8 tags[MVS_SLOTS]; - struct mvs_slot_info slot_info[MVS_SLOTS]; - /* further per-slot information */ + int tags_num; + u8 tags[MVS_SLOTS >> 3]; + + /* further per-slot information */ struct mvs_phy phy[MVS_MAX_PHYS]; struct mvs_port port[MVS_MAX_PHYS]; -#ifdef MVS_USE_TASKLET - struct tasklet_struct tasklet; + u32 irq; + u32 exp_req; + u32 id; + u64 sata_reg_set; + struct list_head *hba_list; + struct list_head soc_entry; + struct list_head wq_list; + unsigned long instance; + u16 flashid; + u32 flashsize; + u32 flashsectSize; + + void *addon; + struct mvs_device devices[MVS_MAX_DEVICES]; +#ifndef DISABLE_HOTPLUG_DMA_FIX + void *bulk_buffer; + dma_addr_t bulk_buffer_dma; +#define TRASH_BUCKET_SIZE 0x20000 #endif + struct mvs_slot_info slot_info[0]; +}; + +struct mvs_prv_info{ + u8 n_host; + u8 n_phy; + u16 reserve; + struct mvs_info *mvi[2]; +}; + +struct mvs_wq { + struct delayed_work work_q; + struct mvs_info *mvi; + void *data; + int handler; + struct list_head entry; }; +struct mvs_task_exec_info { + struct sas_task *task; + struct mvs_cmd_hdr *hdr; + struct mvs_port *port; + u32 tag; + int n_elem; +}; + + +/******************** function prototype *********************/ +void mvs_get_sas_addr(void *buf, u32 buflen); +void mvs_tag_clear(struct mvs_info *mvi, u32 tag); +void mvs_tag_free(struct mvs_info *mvi, u32 tag); +void mvs_tag_set(struct mvs_info *mvi, unsigned int tag); +int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out); +void mvs_tag_init(struct mvs_info *mvi); +void mvs_iounmap(void __iomem *regs); +int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex); +void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard); int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, void *funcdata); +void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id, + u32 off_lo, u32 off_hi, u64 sas_addr); +int mvs_slave_alloc(struct scsi_device *scsi_dev); int mvs_slave_configure(struct scsi_device *sdev); void mvs_scan_start(struct Scsi_Host *shost); int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time); -int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags); -int mvs_task_abort(struct sas_task *task); +int mvs_queue_command(struct sas_task *task, const int num, + gfp_t gfp_flags); +int mvs_abort_task(struct sas_task *task); +int mvs_abort_task_set(struct domain_device *dev, u8 *lun); +int mvs_clear_aca(struct domain_device *dev, u8 *lun); +int mvs_clear_task_set(struct domain_device *dev, u8 * lun); void mvs_port_formed(struct asd_sas_phy *sas_phy); +void mvs_port_deformed(struct asd_sas_phy *sas_phy); +int mvs_dev_found(struct domain_device *dev); +void mvs_dev_gone(struct domain_device *dev); +int mvs_lu_reset(struct domain_device *dev, u8 *lun); +int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags); int mvs_I_T_nexus_reset(struct domain_device *dev); -void mvs_int_full(struct mvs_info *mvi); -void mvs_tag_init(struct mvs_info *mvi); -int mvs_nvram_read(struct mvs_info *mvi, u32 addr, void *buf, u32 buflen); -int __devinit mvs_hw_init(struct mvs_info *mvi); -void __devinit mvs_print_info(struct mvs_info *mvi); -void mvs_hba_interrupt_enable(struct mvs_info *mvi); -void mvs_hba_interrupt_disable(struct mvs_info *mvi); -void mvs_detect_porttype(struct mvs_info *mvi, int i); -u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port); -void mvs_enable_xmt(struct mvs_info *mvi, int PhyId); -void __devinit mvs_phy_hacks(struct mvs_info *mvi); -void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port); - +int mvs_query_task(struct sas_task *task); +void mvs_release_task(struct mvs_info *mvi, int phy_no, + struct domain_device *dev); +void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events); +void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st); +int mvs_int_rx(struct mvs_info *mvi, bool self_clear); +void mvs_hexdump(u32 size, u8 *data, u32 baseaddr); #endif + -- cgit v1.2.3 From 14faf12f7df8404c3e8e54baad8d178c327a2f87 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Thu, 12 Mar 2009 11:08:51 -0400 Subject: [SCSI] Increase default timeout for INQUIRY This patch (as1224) changes the default timeout for INQUIRY commands from 3 seconds to 20 seconds, which is the value used by Windows for USB Mass-Storage devices. Some of these devices, like the Corsair Flash Voyager (see Bugzilla #12188) really do need a long timeout. Signed-off-by: Alan Stern Signed-off-by: James Bottomley --- drivers/scsi/scsi_scan.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index e2b50d8f57a8..c44783801402 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -115,12 +115,12 @@ MODULE_PARM_DESC(max_report_luns, "REPORT LUNS maximum number of LUNS received (should be" " between 1 and 16384)"); -static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ+3; +static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18; module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(inq_timeout, "Timeout (in seconds) waiting for devices to answer INQUIRY." - " Default is 5. Some non-compliant devices need more."); + " Default is 20. Some devices may need more; most need less."); /* This lock protects only this list */ static DEFINE_SPINLOCK(async_scan_lock); -- cgit v1.2.3 From fd65e5e93cbd9d2f34bbb0f0b2f46a30a1d20915 Mon Sep 17 00:00:00 2001 From: Michael Reed Date: Wed, 8 Apr 2009 14:33:48 -0500 Subject: [SCSI] qla1280: driver clean up Remove some unneeded, inactive and unused code, make some trivial corrections to comments and a printk, and return a proper status in qla1280_queuecommand. No fundamental logic changes are made. Signed-off-by: Michael Reed Signed-off-by: James Bottomley --- drivers/scsi/qla1280.c | 99 +++++--------------------------------------------- 1 file changed, 10 insertions(+), 89 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 5defe5ea5eda..0cbad4982db9 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -435,7 +435,6 @@ static int qla1280_mailbox_command(struct scsi_qla_host *, uint8_t, uint16_t *); static int qla1280_bus_reset(struct scsi_qla_host *, int); static int qla1280_device_reset(struct scsi_qla_host *, int, int); -static int qla1280_abort_device(struct scsi_qla_host *, int, int, int); static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int); static int qla1280_abort_isp(struct scsi_qla_host *); #ifdef QLA_64BIT_PTR @@ -698,7 +697,7 @@ qla1280_info(struct Scsi_Host *host) } /************************************************************************** - * qla1200_queuecommand + * qla1280_queuecommand * Queue a command to the controller. * * Note: @@ -713,7 +712,7 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) { struct Scsi_Host *host = cmd->device->host; struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; - struct srb *sp = (struct srb *)&cmd->SCp; + struct srb *sp = (struct srb *)CMD_SP(cmd); int status; cmd->scsi_done = fn; @@ -738,11 +737,9 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) enum action { ABORT_COMMAND, - ABORT_DEVICE, DEVICE_RESET, BUS_RESET, ADAPTER_RESET, - FAIL }; /* timer action for error action processor */ @@ -768,7 +765,7 @@ static void qla1280_mailbox_timeout(unsigned long __data) } /************************************************************************** - * qla1200_error_action + * qla1280_error_action * The function will attempt to perform a specified error action and * wait for the results (or time out). * @@ -798,6 +795,8 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action) DECLARE_COMPLETION_ONSTACK(wait); struct timer_list timer; + ENTER("qla1280_error_action"); + ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata); dprintk(4, "error_action %i, istatus 0x%04x\n", action, @@ -807,20 +806,11 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action) RD_REG_WORD(&ha->iobase->host_cmd), RD_REG_WORD(&ha->iobase->ictrl), jiffies); - ENTER("qla1280_error_action"); if (qla1280_verbose) printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, " "Handle=0x%p, action=0x%x\n", ha->host_no, cmd, CMD_HANDLE(cmd), action); - if (cmd == NULL) { - printk(KERN_WARNING "(scsi?:?:?:?) Reset called with NULL " - "si_Cmnd pointer, failing.\n"); - LEAVE("qla1280_error_action"); - return FAILED; - } - - ha = (struct scsi_qla_host *)cmd->device->host->hostdata; sp = (struct srb *)CMD_SP(cmd); handle = CMD_HANDLE(cmd); @@ -857,9 +847,6 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action) * mean the actual success or fail of the action */ result = FAILED; switch (action) { - case FAIL: - break; - case ABORT_COMMAND: if ((sp->flags & SRB_ABORT_PENDING)) { printk(KERN_WARNING @@ -893,15 +880,6 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action) } break; - case ABORT_DEVICE: - if (qla1280_verbose) - printk(KERN_INFO - "scsi(%ld:%d:%d:%d): Queueing abort device " - "command.\n", ha->host_no, bus, target, lun); - if (qla1280_abort_device(ha, bus, target, lun) == 0) - result = SUCCESS; - break; - case DEVICE_RESET: if (qla1280_verbose) printk(KERN_INFO @@ -1285,8 +1263,6 @@ qla1280_done(struct scsi_qla_host *ha) case DID_ABORT: sp->flags &= ~SRB_ABORT_PENDING; sp->flags |= SRB_ABORTED; - if (sp->flags & SRB_TIMEOUT) - CMD_RESULT(sp->cmd) = DID_TIME_OUT << 16; break; default: break; @@ -2417,9 +2393,6 @@ static int qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb) { struct device_reg __iomem *reg = ha->iobase; -#if 0 - LIST_HEAD(done_q); -#endif int status = 0; int cnt; uint16_t *optr, *iptr; @@ -2493,19 +2466,9 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb) mr = MAILBOX_REGISTER_COUNT; memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t)); -#if 0 - /* Go check for any response interrupts pending. */ - qla1280_isr(ha, &done_q); -#endif - if (ha->flags.reset_marker) qla1280_rst_aen(ha); -#if 0 - if (!list_empty(&done_q)) - qla1280_done(ha, &done_q); -#endif - if (status) dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = " "0x%x ****\n", mb[0]); @@ -2640,41 +2603,6 @@ qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target) return status; } -/* - * qla1280_abort_device - * Issue an abort message to the device - * - * Input: - * ha = adapter block pointer. - * bus = SCSI BUS. - * target = SCSI ID. - * lun = SCSI LUN. - * - * Returns: - * 0 = success - */ -static int -qla1280_abort_device(struct scsi_qla_host *ha, int bus, int target, int lun) -{ - uint16_t mb[MAILBOX_REGISTER_COUNT]; - int status; - - ENTER("qla1280_abort_device"); - - mb[0] = MBC_ABORT_DEVICE; - mb[1] = (bus ? target | BIT_7 : target) << 8 | lun; - status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); - - /* Issue marker command. */ - qla1280_marker(ha, bus, target, lun, MK_SYNC_ID_LUN); - - if (status) - dprintk(2, "qla1280_abort_device: **** FAILED ****\n"); - - LEAVE("qla1280_abort_device"); - return status; -} - /* * qla1280_abort_command * Abort command aborts a specified IOCB. @@ -2833,7 +2761,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) /* If room for request in request ring. */ if ((req_cnt + 2) >= ha->req_q_cnt) { - status = 1; + status = SCSI_MLQUEUE_HOST_BUSY; dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt=" "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt, req_cnt); @@ -2845,7 +2773,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) ha->outstanding_cmds[cnt] != NULL; cnt++); if (cnt >= MAX_OUTSTANDING_COMMANDS) { - status = 1; + status = SCSI_MLQUEUE_HOST_BUSY; dprintk(2, "qla1280_start_scsi: NO ROOM IN " "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt); goto out; @@ -3108,7 +3036,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) ha->req_q_cnt, seg_cnt); /* If room for request in request ring. */ if ((req_cnt + 2) >= ha->req_q_cnt) { - status = 1; + status = SCSI_MLQUEUE_HOST_BUSY; dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, " "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt, req_cnt); @@ -3120,7 +3048,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) (ha->outstanding_cmds[cnt] != 0); cnt++) ; if (cnt >= MAX_OUTSTANDING_COMMANDS) { - status = 1; + status = SCSI_MLQUEUE_HOST_BUSY; dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING " "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt); goto out; @@ -3495,7 +3423,7 @@ qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q) * If we get here we have a real problem! */ printk(KERN_WARNING - "qla1280: ISP invalid handle"); + "qla1280: ISP invalid handle\n"); } } break; @@ -3955,13 +3883,6 @@ qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus) if (scsi_control == SCSI_PHASE_INVALID) { ha->bus_settings[bus].scsi_bus_dead = 1; -#if 0 - CMD_RESULT(cp) = DID_NO_CONNECT << 16; - CMD_HANDLE(cp) = INVALID_HANDLE; - /* ha->actthreads--; */ - - (*(cp)->scsi_done)(cp); -#endif return 1; /* bus is dead */ } else { ha->bus_settings[bus].scsi_bus_dead = 0; -- cgit v1.2.3 From 413e6e18b483de272bdafa56e5c086c75f11d681 Mon Sep 17 00:00:00 2001 From: Michael Reed Date: Wed, 8 Apr 2009 14:34:33 -0500 Subject: [SCSI] qla1280: error recovery rewrite The driver now waits for the scsi commands associated with a particular error recovery step to be returned to the mid-layer, and returns the appropriate SUCCESS or FAILED status. Removes unneeded polling of chip for interrupts. This patch also bumps the driver version number. Signed-off-by: Michael Reed Signed-off-by: James Bottomley --- drivers/scsi/qla1280.c | 290 ++++++++++++++++++++++++++----------------------- drivers/scsi/qla1280.h | 3 +- 2 files changed, 159 insertions(+), 134 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 0cbad4982db9..8371d917a9a2 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -17,9 +17,12 @@ * General Public License for more details. * ******************************************************************************/ -#define QLA1280_VERSION "3.26" +#define QLA1280_VERSION "3.27" /***************************************************************************** Revision History: + Rev 3.27, February 10, 2009, Michael Reed + - General code cleanup. + - Improve error recovery. Rev 3.26, January 16, 2006 Jes Sorensen - Ditch all < 2.6 support Rev 3.25.1, February 10, 2005 Christoph Hellwig @@ -718,6 +721,8 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) cmd->scsi_done = fn; sp->cmd = cmd; sp->flags = 0; + sp->wait = NULL; + CMD_HANDLE(cmd) = (unsigned char *)NULL; qla1280_print_scsi_cmd(5, cmd); @@ -742,14 +747,6 @@ enum action { ADAPTER_RESET, }; -/* timer action for error action processor */ -static void qla1280_error_wait_timeout(unsigned long __data) -{ - struct scsi_cmnd *cmd = (struct scsi_cmnd *)__data; - struct srb *sp = (struct srb *)CMD_SP(cmd); - - complete(sp->wait); -} static void qla1280_mailbox_timeout(unsigned long __data) { @@ -764,6 +761,65 @@ static void qla1280_mailbox_timeout(unsigned long __data) complete(ha->mailbox_wait); } +static int +_qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp, + struct completion *wait) +{ + int status = FAILED; + struct scsi_cmnd *cmd = sp->cmd; + + spin_unlock_irq(ha->host->host_lock); + wait_for_completion_timeout(wait, 4*HZ); + spin_lock_irq(ha->host->host_lock); + sp->wait = NULL; + if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) { + status = SUCCESS; + (*cmd->scsi_done)(cmd); + } + return status; +} + +static int +qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp) +{ + DECLARE_COMPLETION_ONSTACK(wait); + + sp->wait = &wait; + return _qla1280_wait_for_single_command(ha, sp, &wait); +} + +static int +qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target) +{ + int cnt; + int status; + struct srb *sp; + struct scsi_cmnd *cmd; + + status = SUCCESS; + + /* + * Wait for all commands with the designated bus/target + * to be completed by the firmware + */ + for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { + sp = ha->outstanding_cmds[cnt]; + if (sp) { + cmd = sp->cmd; + + if (bus >= 0 && SCSI_BUS_32(cmd) != bus) + continue; + if (target >= 0 && SCSI_TCN_32(cmd) != target) + continue; + + status = qla1280_wait_for_single_command(ha, sp); + if (status == FAILED) + break; + } + } + return status; +} + /************************************************************************** * qla1280_error_action * The function will attempt to perform a specified error action and @@ -777,11 +833,6 @@ static void qla1280_mailbox_timeout(unsigned long __data) * Returns: * SUCCESS or FAILED * - * Note: - * Resetting the bus always succeeds - is has to, otherwise the - * kernel will panic! Try a surgical technique - sending a BUS - * DEVICE RESET message - on the offending target before pulling - * the SCSI bus reset line. **************************************************************************/ static int qla1280_error_action(struct scsi_cmnd *cmd, enum action action) @@ -789,15 +840,19 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action) struct scsi_qla_host *ha; int bus, target, lun; struct srb *sp; - uint16_t data; - unsigned char *handle; - int result, i; + int i, found; + int result=FAILED; + int wait_for_bus=-1; + int wait_for_target = -1; DECLARE_COMPLETION_ONSTACK(wait); - struct timer_list timer; ENTER("qla1280_error_action"); ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata); + sp = (struct srb *)CMD_SP(cmd); + bus = SCSI_BUS_32(cmd); + target = SCSI_TCN_32(cmd); + lun = SCSI_LUN_32(cmd); dprintk(4, "error_action %i, istatus 0x%04x\n", action, RD_REG_WORD(&ha->iobase->istatus)); @@ -811,73 +866,42 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action) "Handle=0x%p, action=0x%x\n", ha->host_no, cmd, CMD_HANDLE(cmd), action); - sp = (struct srb *)CMD_SP(cmd); - handle = CMD_HANDLE(cmd); - - /* Check for pending interrupts. */ - data = qla1280_debounce_register(&ha->iobase->istatus); - /* - * The io_request_lock is held when the reset handler is called, hence - * the interrupt handler cannot be running in parallel as it also - * grabs the lock. /Jes - */ - if (data & RISC_INT) - qla1280_isr(ha, &ha->done_q); - /* - * Determine the suggested action that the mid-level driver wants - * us to perform. + * Check to see if we have the command in the outstanding_cmds[] + * array. If not then it must have completed before this error + * action was initiated. If the error_action isn't ABORT_COMMAND + * then the driver must proceed with the requested action. */ - if (handle == (unsigned char *)INVALID_HANDLE || handle == NULL) { - if(action == ABORT_COMMAND) { - /* we never got this command */ - printk(KERN_INFO "qla1280: Aborting a NULL handle\n"); - return SUCCESS; /* no action - we don't have command */ + found = -1; + for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) { + if (sp == ha->outstanding_cmds[i]) { + found = i; + sp->wait = &wait; /* we'll wait for it to complete */ + break; } - } else { - sp->wait = &wait; } - bus = SCSI_BUS_32(cmd); - target = SCSI_TCN_32(cmd); - lun = SCSI_LUN_32(cmd); + if (found < 0) { /* driver doesn't have command */ + result = SUCCESS; + if (qla1280_verbose) { + printk(KERN_INFO + "scsi(%ld:%d:%d:%d): specified command has " + "already completed.\n", ha->host_no, bus, + target, lun); + } + } - /* Overloading result. Here it means the success or fail of the - * *issue* of the action. When we return from the routine, it must - * mean the actual success or fail of the action */ - result = FAILED; switch (action) { - case ABORT_COMMAND: - if ((sp->flags & SRB_ABORT_PENDING)) { - printk(KERN_WARNING - "scsi(): Command has a pending abort " - "message - ABORT_PENDING.\n"); - /* This should technically be impossible since we - * now wait for abort completion */ - break; - } - for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) { - if (sp == ha->outstanding_cmds[i]) { - dprintk(1, "qla1280: RISC aborting command\n"); - if (qla1280_abort_command(ha, sp, i) == 0) - result = SUCCESS; - else { - /* - * Since we don't know what might - * have happend to the command, it - * is unsafe to remove it from the - * device's queue at this point. - * Wait and let the escalation - * process take care of it. - */ - printk(KERN_WARNING - "scsi(%li:%i:%i:%i): Unable" - " to abort command!\n", - ha->host_no, bus, target, lun); - } - } - } + case ABORT_COMMAND: + dprintk(1, "qla1280: RISC aborting command\n"); + /* + * The abort might fail due to race when the host_lock + * is released to issue the abort. As such, we + * don't bother to check the return status. + */ + if (found >= 0) + qla1280_abort_command(ha, sp, found); break; case DEVICE_RESET: @@ -885,16 +909,21 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action) printk(KERN_INFO "scsi(%ld:%d:%d:%d): Queueing device reset " "command.\n", ha->host_no, bus, target, lun); - if (qla1280_device_reset(ha, bus, target) == 0) - result = SUCCESS; + if (qla1280_device_reset(ha, bus, target) == 0) { + /* issued device reset, set wait conditions */ + wait_for_bus = bus; + wait_for_target = target; + } break; case BUS_RESET: if (qla1280_verbose) printk(KERN_INFO "qla1280(%ld:%d): Issued bus " "reset.\n", ha->host_no, bus); - if (qla1280_bus_reset(ha, bus) == 0) - result = SUCCESS; + if (qla1280_bus_reset(ha, bus) == 0) { + /* issued bus reset, set wait conditions */ + wait_for_bus = bus; + } break; case ADAPTER_RESET: @@ -907,55 +936,48 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action) "continue automatically\n", ha->host_no); } ha->flags.reset_active = 1; - /* - * We restarted all of the commands automatically, so the - * mid-level code can expect completions momentitarily. - */ - if (qla1280_abort_isp(ha) == 0) - result = SUCCESS; + + if (qla1280_abort_isp(ha) != 0) { /* it's dead */ + result = FAILED; + } ha->flags.reset_active = 0; } - if (!list_empty(&ha->done_q)) - qla1280_done(ha); - - /* If we didn't manage to issue the action, or we have no - * command to wait for, exit here */ - if (result == FAILED || handle == NULL || - handle == (unsigned char *)INVALID_HANDLE) { - /* - * Clear completion queue to avoid qla1280_done() trying - * to complete the command at a later stage after we - * have exited the current context - */ - sp->wait = NULL; - goto leave; - } + /* + * At this point, the host_lock has been released and retaken + * by the issuance of the mailbox command. + * Wait for the command passed in by the mid-layer if it + * was found by the driver. It might have been returned + * between eh recovery steps, hence the check of the "found" + * variable. + */ - /* set up a timer just in case we're really jammed */ - init_timer(&timer); - timer.expires = jiffies + 4*HZ; - timer.data = (unsigned long)cmd; - timer.function = qla1280_error_wait_timeout; - add_timer(&timer); + if (found >= 0) + result = _qla1280_wait_for_single_command(ha, sp, &wait); - /* wait for the action to complete (or the timer to expire) */ - spin_unlock_irq(ha->host->host_lock); - wait_for_completion(&wait); - del_timer_sync(&timer); - spin_lock_irq(ha->host->host_lock); - sp->wait = NULL; + if (action == ABORT_COMMAND && result != SUCCESS) { + printk(KERN_WARNING + "scsi(%li:%i:%i:%i): " + "Unable to abort command!\n", + ha->host_no, bus, target, lun); + } - /* the only action we might get a fail for is abort */ - if (action == ABORT_COMMAND) { - if(sp->flags & SRB_ABORTED) - result = SUCCESS; - else - result = FAILED; + /* + * If the command passed in by the mid-layer has been + * returned by the board, then wait for any additional + * commands which are supposed to complete based upon + * the error action. + * + * All commands are unconditionally returned during a + * call to qla1280_abort_isp(), ADAPTER_RESET. No need + * to wait for them. + */ + if (result == SUCCESS && wait_for_bus >= 0) { + result = qla1280_wait_for_pending_commands(ha, + wait_for_bus, wait_for_target); } - leave: dprintk(1, "RESET returning %d\n", result); LEAVE("qla1280_error_action"); @@ -1258,7 +1280,8 @@ qla1280_done(struct scsi_qla_host *ha) switch ((CMD_RESULT(cmd) >> 16)) { case DID_RESET: /* Issue marker command. */ - qla1280_marker(ha, bus, target, 0, MK_SYNC_ID); + if (!ha->flags.abort_isp_active) + qla1280_marker(ha, bus, target, 0, MK_SYNC_ID); break; case DID_ABORT: sp->flags &= ~SRB_ABORT_PENDING; @@ -1272,12 +1295,11 @@ qla1280_done(struct scsi_qla_host *ha) scsi_dma_unmap(cmd); /* Call the mid-level driver interrupt handler */ - CMD_HANDLE(sp->cmd) = (unsigned char *)INVALID_HANDLE; ha->actthreads--; - (*(cmd)->scsi_done)(cmd); - - if(sp->wait != NULL) + if (sp->wait == NULL) + (*(cmd)->scsi_done)(cmd); + else complete(sp->wait); } LEAVE("qla1280_done"); @@ -3415,6 +3437,7 @@ qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q) /* Save ISP completion status */ CMD_RESULT(sp->cmd) = 0; + CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE; /* Place block on done queue */ list_add_tail(&sp->list, done_q); @@ -3681,6 +3704,8 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt, } } + CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE; + /* Place command on done queue. */ list_add_tail(&sp->list, done_q); out: @@ -3736,6 +3761,8 @@ qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt, CMD_RESULT(sp->cmd) = DID_ERROR << 16; } + CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE; + /* Place command on done queue. */ list_add_tail(&sp->list, done_q); } @@ -3786,19 +3813,16 @@ qla1280_abort_isp(struct scsi_qla_host *ha) struct scsi_cmnd *cmd; sp = ha->outstanding_cmds[cnt]; if (sp) { - cmd = sp->cmd; CMD_RESULT(cmd) = DID_RESET << 16; - - sp->cmd = NULL; + CMD_HANDLE(cmd) = COMPLETED_HANDLE; ha->outstanding_cmds[cnt] = NULL; - - (*cmd->scsi_done)(cmd); - - sp->flags = 0; + list_add_tail(&sp->list, &ha->done_q); } } + qla1280_done(ha); + status = qla1280_load_firmware(ha); if (status) goto out; diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h index d7c44b8d2b4f..834884b9eed5 100644 --- a/drivers/scsi/qla1280.h +++ b/drivers/scsi/qla1280.h @@ -88,7 +88,8 @@ /* Maximum outstanding commands in ISP queues */ #define MAX_OUTSTANDING_COMMANDS 512 -#define INVALID_HANDLE (MAX_OUTSTANDING_COMMANDS + 2) +#define COMPLETED_HANDLE ((unsigned char *) \ + (MAX_OUTSTANDING_COMMANDS + 2)) /* ISP request and response entry counts (37-65535) */ #define REQUEST_ENTRY_CNT 255 /* Number of request entries. */ -- cgit v1.2.3 From 410604d25faddb1b4f0f9667b7452c06cc06cea1 Mon Sep 17 00:00:00 2001 From: Aaro Koskinen Date: Tue, 14 Apr 2009 15:46:59 -0500 Subject: [SCSI] sym53c8xx_2: lun to_clear flag not re-initialized (2.6.27.5) (Resent with proper formatting) Fix for the sym53c8xx_2 driver to initialize lun's to_clear flag after a bus reset (a failed clear can trigger a bus reset and it should not be attemped again after that). Signed-off-by: Aaro Koskinen Tested-by: Tony Battersby Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/sym53c8xx_2/sym_hipd.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index ffa70d1ed182..60d6a6d23088 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c @@ -1896,6 +1896,15 @@ void sym_start_up(struct Scsi_Host *shost, int reason) tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; + if (tp->lun0p) + tp->lun0p->to_clear = 0; + if (tp->lunmp) { + int ln; + + for (ln = 1; ln < SYM_CONF_MAX_LUN; ln++) + if (tp->lunmp[ln]) + tp->lunmp[ln]->to_clear = 0; + } } /* -- cgit v1.2.3 From fa8584566cc9cdaf067dbc12132792887a521da9 Mon Sep 17 00:00:00 2001 From: Aaro Koskinen Date: Tue, 14 Apr 2009 15:47:00 -0500 Subject: [SCSI] sym53c8xx_2: slave_alloc/destroy safety (2.6.27.5) Make the sym53c8xx_2 driver slave_alloc/destroy less unsafe. References to the destroyed LCB are cleared from the target structure (instead of leaving a dangling pointer), and when the last LCB for the target is destroyed the reference to the upper layer target data is cleared. The host lock is used to prevent a race with the interrupt handler. Also user commands are prevented for targets with all LCBs destroyed. Signed-off-by: Aaro Koskinen Tested-by: Tony Battersby Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/sym53c8xx_2/sym_glue.c | 66 +++++++++++++++++++++++++++++-------- drivers/scsi/sym53c8xx_2/sym_hipd.c | 40 +++++++++++++++++++++- drivers/scsi/sym53c8xx_2/sym_hipd.h | 2 ++ 3 files changed, 93 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index 583966ec8266..45374d66d26a 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -737,11 +737,14 @@ static int sym53c8xx_slave_alloc(struct scsi_device *sdev) struct sym_hcb *np = sym_get_hcb(sdev->host); struct sym_tcb *tp = &np->target[sdev->id]; struct sym_lcb *lp; + unsigned long flags; + int error; if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN) return -ENXIO; - tp->starget = sdev->sdev_target; + spin_lock_irqsave(np->s.host->host_lock, flags); + /* * Fail the device init if the device is flagged NOSCAN at BOOT in * the NVRAM. This may speed up boot and maintain coherency with @@ -753,26 +756,37 @@ static int sym53c8xx_slave_alloc(struct scsi_device *sdev) if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) { tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; - starget_printk(KERN_INFO, tp->starget, + starget_printk(KERN_INFO, sdev->sdev_target, "Scan at boot disabled in NVRAM\n"); - return -ENXIO; + error = -ENXIO; + goto out; } if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) { - if (sdev->lun != 0) - return -ENXIO; - starget_printk(KERN_INFO, tp->starget, + if (sdev->lun != 0) { + error = -ENXIO; + goto out; + } + starget_printk(KERN_INFO, sdev->sdev_target, "Multiple LUNs disabled in NVRAM\n"); } lp = sym_alloc_lcb(np, sdev->id, sdev->lun); - if (!lp) - return -ENOMEM; + if (!lp) { + error = -ENOMEM; + goto out; + } + if (tp->nlcb == 1) + tp->starget = sdev->sdev_target; spi_min_period(tp->starget) = tp->usr_period; spi_max_width(tp->starget) = tp->usr_width; - return 0; + error = 0; +out: + spin_unlock_irqrestore(np->s.host->host_lock, flags); + + return error; } /* @@ -819,12 +833,34 @@ static int sym53c8xx_slave_configure(struct scsi_device *sdev) static void sym53c8xx_slave_destroy(struct scsi_device *sdev) { struct sym_hcb *np = sym_get_hcb(sdev->host); - struct sym_lcb *lp = sym_lp(&np->target[sdev->id], sdev->lun); + struct sym_tcb *tp = &np->target[sdev->id]; + struct sym_lcb *lp = sym_lp(tp, sdev->lun); + unsigned long flags; + + spin_lock_irqsave(np->s.host->host_lock, flags); + + if (lp->busy_itlq || lp->busy_itl) { + /* + * This really shouldn't happen, but we can't return an error + * so let's try to stop all on-going I/O. + */ + starget_printk(KERN_WARNING, tp->starget, + "Removing busy LCB (%d)\n", sdev->lun); + sym_reset_scsi_bus(np, 1); + } - if (lp->itlq_tbl) - sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK * 4, "ITLQ_TBL"); - kfree(lp->cb_tags); - sym_mfree_dma(lp, sizeof(*lp), "LCB"); + if (sym_free_lcb(np, sdev->id, sdev->lun) == 0) { + /* + * It was the last unit for this target. + */ + tp->head.sval = 0; + tp->head.wval = np->rv_scntl3; + tp->head.uval = 0; + tp->tgoal.check_nego = 1; + tp->starget = NULL; + } + + spin_unlock_irqrestore(np->s.host->host_lock, flags); } /* @@ -890,6 +926,8 @@ static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc) if (!((uc->target >> t) & 1)) continue; tp = &np->target[t]; + if (!tp->nlcb) + continue; switch (uc->cmd) { diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index 60d6a6d23088..69ad4945c936 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c @@ -4997,7 +4997,7 @@ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln) */ if (ln && !tp->lunmp) { tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *), - GFP_KERNEL); + GFP_ATOMIC); if (!tp->lunmp) goto fail; } @@ -5017,6 +5017,7 @@ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln) tp->lun0p = lp; tp->head.lun0_sa = cpu_to_scr(vtobus(lp)); } + tp->nlcb++; /* * Let the itl task point to error handling. @@ -5093,6 +5094,43 @@ fail: return; } +/* + * Lun control block deallocation. Returns the number of valid remaing LCBs + * for the target. + */ +int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln) +{ + struct sym_tcb *tp = &np->target[tn]; + struct sym_lcb *lp = sym_lp(tp, ln); + + tp->nlcb--; + + if (ln) { + if (!tp->nlcb) { + kfree(tp->lunmp); + sym_mfree_dma(tp->luntbl, 256, "LUNTBL"); + tp->lunmp = NULL; + tp->luntbl = NULL; + tp->head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl)); + } else { + tp->luntbl[ln] = cpu_to_scr(vtobus(&np->badlun_sa)); + tp->lunmp[ln] = NULL; + } + } else { + tp->lun0p = NULL; + tp->head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa)); + } + + if (lp->itlq_tbl) { + sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); + kfree(lp->cb_tags); + } + + sym_mfree_dma(lp, sizeof(*lp), "LCB"); + + return tp->nlcb; +} + /* * Queue a SCSI IO to the controller. */ diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h index 9ebc8706b6bf..053e63c86822 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.h +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h @@ -401,6 +401,7 @@ struct sym_tcb { * An array of bus addresses is used on reselection. */ u32 *luntbl; /* LCBs bus address table */ + int nlcb; /* Number of valid LCBs (including LUN #0) */ /* * LUN table used by the C code. @@ -1065,6 +1066,7 @@ int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order); void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp); struct sym_lcb *sym_alloc_lcb(struct sym_hcb *np, u_char tn, u_char ln); +int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln); int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp); int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *ccb, int timed_out); int sym_reset_scsi_target(struct sym_hcb *np, int target); -- cgit v1.2.3 From 8f03226358972f93cd45be0a710927cbb7fd5127 Mon Sep 17 00:00:00 2001 From: "Chauhan, Vijay" Date: Mon, 20 Apr 2009 18:14:23 +0530 Subject: [SCSI] scsi_dh_rdac: Retry for NOT_READY(02/04/01) in rdac device handler During device discovery read capacity fails with 0x020401 and sets the device size to 0. As a reason any I/O submitted to this path gets killed at sd_prep_fn with BLKPREP_KILL. This patch is to retry for 0x020401. NEED_RETRY in scsi_decide_disposition does not give sufficient time for the device to become ready. Signed-off-by: Vijay Chauhan Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh_rdac.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 43b8c51e98d0..fd0544f7da81 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -561,6 +561,12 @@ static int rdac_check_sense(struct scsi_device *sdev, struct rdac_dh_data *h = get_rdac_data(sdev); switch (sense_hdr->sense_key) { case NOT_READY: + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01) + /* LUN Not Ready - Logical Unit Not Ready and is in + * the process of becoming ready + * Just retry. + */ + return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81) /* LUN Not Ready - Storage firmware incompatible * Manual code synchonisation required. -- cgit v1.2.3 From f381642d8f8963e62f5d45774505fd936f2b6072 Mon Sep 17 00:00:00 2001 From: "Kleber S. Souza" Date: Wed, 22 Apr 2009 10:50:28 -0300 Subject: [SCSI] ipr: ipr_remove() marked __devexit Marking the ipr clean up function ipr_remove() as __devexit and using __devexit_p() macro in its address reference. Signed-off-by: Kleber Sacilotto de Souza Reported-by: Breno Leitao Acked-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ipr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index dd689ded8609..764cfcccec67 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -7688,7 +7688,7 @@ static void __ipr_remove(struct pci_dev *pdev) * Return value: * none **/ -static void ipr_remove(struct pci_dev *pdev) +static void __devexit ipr_remove(struct pci_dev *pdev) { struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); @@ -7864,7 +7864,7 @@ static struct pci_driver ipr_driver = { .name = IPR_NAME, .id_table = ipr_pci_table, .probe = ipr_probe, - .remove = ipr_remove, + .remove = __devexit_p(ipr_remove), .shutdown = ipr_shutdown, .err_handler = &ipr_err_handler, }; -- cgit v1.2.3 From 73da9c13d4df3c1715029aa45edc78d71b617dfd Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Wed, 22 Apr 2009 17:42:25 -0700 Subject: [SCSI] scsi_debug: fix virtual disk larger than 1TB Signed-off-by: FUJITA Tomonori Acked-by: Douglas Gilbert Signed-off-by: James Bottomley --- drivers/scsi/scsi_debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 213123b0486b..41a21772df12 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -887,7 +887,7 @@ static int resp_start_stop(struct scsi_cmnd * scp, static sector_t get_sdebug_capacity(void) { if (scsi_debug_virtual_gb > 0) - return 2048 * 1024 * scsi_debug_virtual_gb; + return 2048 * 1024 * (sector_t)scsi_debug_virtual_gb; else return sdebug_store_sectors; } -- cgit v1.2.3 From b0d428adebe9f1232c72bf4c686a6f0eed047cc2 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 27 Apr 2009 21:49:31 -0700 Subject: [SCSI] fcoe, libfc: fix function declarations to be ANSI-compliant Fix function declarations: drivers/scsi/fcoe/fcoe.c:1356:28: warning: non-ANSI function declaration of function 'fcoe_dev_setup' drivers/scsi/libfc/fc_rport.c:1293:20: warning: non-ANSI function declaration of function 'fc_setup_rport' drivers/scsi/libfc/fc_rport.c:1302:23: warning: non-ANSI function declaration of function 'fc_destroy_rport' [jejb: fixed wrong doc in comment noticed during inspection] Signed-off-by: Randy Dunlap Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 4 ++-- drivers/scsi/libfc/fc_rport.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 03e1926f40b5..d08121f246c3 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1350,13 +1350,13 @@ out: /** * fcoe_dev_setup() - setup link change notification interface */ -static void fcoe_dev_setup() +static void fcoe_dev_setup(void) { register_netdevice_notifier(&fcoe_notifier); } /** - * fcoe_dev_setup() - cleanup link change notification interface + * fcoe_dev_cleanup() - cleanup link change notification interface */ static void fcoe_dev_cleanup(void) { diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 747d73c5c8af..3f5094ebc397 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -1330,7 +1330,7 @@ int fc_rport_init(struct fc_lport *lport) } EXPORT_SYMBOL(fc_rport_init); -int fc_setup_rport() +int fc_setup_rport(void) { rport_event_queue = create_singlethread_workqueue("fc_rport_eq"); if (!rport_event_queue) @@ -1339,7 +1339,7 @@ int fc_setup_rport() } EXPORT_SYMBOL(fc_setup_rport); -void fc_destroy_rport() +void fc_destroy_rport(void) { destroy_workqueue(rport_event_queue); } -- cgit v1.2.3 From 6fa612b56c575a5235568593eab4240c90608630 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 11 May 2009 15:49:12 +0200 Subject: microblaze: Kconfig: Enable drivers for Microblaze Signed-off-by: Michal Simek --- drivers/block/Kconfig | 2 +- drivers/char/Kconfig | 2 +- drivers/gpio/Kconfig | 2 +- drivers/input/serio/Kconfig | 2 +- drivers/of/Kconfig | 8 ++++---- drivers/spi/Kconfig | 2 +- drivers/usb/Kconfig | 1 + drivers/video/Kconfig | 2 +- 8 files changed, 11 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index ddea8e485cc9..9f1665fc0f66 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -438,7 +438,7 @@ source "drivers/s390/block/Kconfig" config XILINX_SYSACE tristate "Xilinx SystemACE support" - depends on 4xx + depends on 4xx || MICROBLAZE help Include support for the Xilinx SystemACE CompactFlash interface diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 735bbe2be51a..bb1a071b8038 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -893,7 +893,7 @@ config DTLK config XILINX_HWICAP tristate "Xilinx HWICAP Support" - depends on XILINX_VIRTEX + depends on XILINX_VIRTEX || MICROBLAZE help This option enables support for Xilinx Internal Configuration Access Port (ICAP) driver. The ICAP is used on Xilinx Virtex diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index edb02530e461..11f373971fa5 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -69,7 +69,7 @@ comment "Memory mapped GPIO expanders:" config GPIO_XILINX bool "Xilinx GPIO support" - depends on PPC_OF + depends on PPC_OF || MICROBLAZE help Say yes here to support the Xilinx FPGA GPIO device diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig index da3c3a5d2689..c4b3fbd1a80f 100644 --- a/drivers/input/serio/Kconfig +++ b/drivers/input/serio/Kconfig @@ -192,7 +192,7 @@ config SERIO_RAW config SERIO_XILINX_XPS_PS2 tristate "Xilinx XPS PS/2 Controller Support" - depends on PPC + depends on PPC || MICROBLAZE help This driver supports XPS PS/2 IP from the Xilinx EDK on PowerPC platform. diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index f821dbc952a4..27f3b81333de 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig @@ -1,21 +1,21 @@ config OF_DEVICE def_bool y - depends on OF && (SPARC || PPC_OF) + depends on OF && (SPARC || PPC_OF || MICROBLAZE) config OF_GPIO def_bool y - depends on OF && PPC_OF && GPIOLIB + depends on OF && (PPC_OF || MICROBLAZE) && GPIOLIB help OpenFirmware GPIO accessors config OF_I2C def_tristate I2C - depends on PPC_OF && I2C + depends on (PPC_OF || MICROBLAZE) && I2C help OpenFirmware I2C accessors config OF_SPI def_tristate SPI - depends on OF && PPC_OF && SPI + depends on OF && (PPC_OF || MICROBLAZE) && SPI help OpenFirmware SPI accessors diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 83a185d52961..957494775413 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -212,7 +212,7 @@ config SPI_TXX9 config SPI_XILINX tristate "Xilinx SPI controller" - depends on XILINX_VIRTEX && EXPERIMENTAL + depends on (XILINX_VIRTEX || MICROBLAZE) && EXPERIMENTAL select SPI_BITBANG help This exposes the SPI controller IP from the Xilinx EDK. diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index c6c816b7ecb5..5eee3f82be5d 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -22,6 +22,7 @@ config USB_ARCH_HAS_HCD default y if PCMCIA && !M32R # sl811_cs default y if ARM # SL-811 default y if SUPERH # r8a66597-hcd + default y if MICROBLAZE default PCI # many non-PCI SOC chips embed OHCI diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 0048f1185a60..74712cb8399a 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -1996,7 +1996,7 @@ config FB_PS3_DEFAULT_SIZE_M config FB_XILINX tristate "Xilinx frame buffer support" - depends on FB && XILINX_VIRTEX + depends on FB && (XILINX_VIRTEX || MICROBLAZE) select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT -- cgit v1.2.3 From 5c10e63c943b4c67561ddc6bf61e01d4141f881f Mon Sep 17 00:00:00 2001 From: Takahiro Yasui Date: Wed, 29 Apr 2009 12:13:02 -0400 Subject: [SCSI] limit state transitions in scsi_internal_device_unblock scsi timeout on two or more devices may cause extremely long execution time for user applications because SDEV_OFFLINE state is changed to SDEV_RUNNING state during scsi error recovery procedures triggered by a bus reset or a host reset of scsi LLD, and scsi timeout can happens on the same devices many times. This happens because scsi_internal_device_unblock() changes device's state to SDEV_RUNNING even if a device in other states than SDEV_BLOCK, while the following two transitions are required in this function. SDEV_BLOCK -> SDEV_RUNNING SDEV_CREATED_BLOCK -> SDEV_CREATED Otherwise, it returns -EINVAL. Signed-off-by: Takahiro Yasui [matthew@wil.cx: supplied rewritten base for patch] Signed-off-by: Matthew Wilcox Signed-off-by: James Bottomley --- drivers/scsi/scsi_lib.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index bb218c8b6e98..27dbf2e8e34a 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -2441,20 +2441,18 @@ int scsi_internal_device_unblock(struct scsi_device *sdev) { struct request_queue *q = sdev->request_queue; - int err; unsigned long flags; /* * Try to transition the scsi device to SDEV_RUNNING * and goose the device queue if successful. */ - err = scsi_device_set_state(sdev, SDEV_RUNNING); - if (err) { - err = scsi_device_set_state(sdev, SDEV_CREATED); - - if (err) - return err; - } + if (sdev->sdev_state == SDEV_BLOCK) + sdev->sdev_state = SDEV_RUNNING; + else if (sdev->sdev_state == SDEV_CREATED_BLOCK) + sdev->sdev_state = SDEV_CREATED; + else + return -EINVAL; spin_lock_irqsave(q->queue_lock, flags); blk_start_queue(q); -- cgit v1.2.3 From 1da2019fffc65c02a613305919bac28c9bdfaf99 Mon Sep 17 00:00:00 2001 From: Kai Makisara Date: Sat, 2 May 2009 08:49:34 +0300 Subject: [SCSI] st: fix gcc 4.4 warning This patch fixes the GCC 4.4 warning reported by David Binderman and Sergey Senozhatsky. The old version was working correctly but was not easy to read. Signed-off-by: Kai Makisara Signed-off-by: James Bottomley --- drivers/scsi/st.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index eb24efea8f14..6f46e627aab3 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -2964,7 +2964,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon !(STp->use_pf & PF_TESTED)) { /* Try the other possible state of Page Format if not already tried */ - STp->use_pf = !STp->use_pf | PF_TESTED; + STp->use_pf = (STp->use_pf ^ USE_PF) | PF_TESTED; st_release_request(SRpnt); SRpnt = NULL; return st_int_ioctl(STp, cmd_in, arg); -- cgit v1.2.3 From 16b3858ec967f4d9817f6958cc7a0bf1222355f3 Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Sat, 2 May 2009 22:14:54 +0200 Subject: [SCSI] ibmvscsi: Remove redundant test on unsigned. Signed-off-by: Roel Kluin Acked by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvscsi.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index c9aa7611e408..8d3925f6b5a1 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -910,9 +910,6 @@ static void login_rsp(struct srp_event_struct *evt_struct) dev_info(hostdata->dev, "SRP_LOGIN succeeded\n"); - if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0) - dev_err(hostdata->dev, "Invalid request_limit.\n"); - /* Now we know what the real request-limit is. * This value is set rather than added to request_limit because * request_limit could have been set to -1 by this client. -- cgit v1.2.3 From a3ec723a949d65bf0349cdf60958036454927729 Mon Sep 17 00:00:00 2001 From: Eric Piel Date: Mon, 4 May 2009 12:43:02 +0200 Subject: [SCSI] Update wording of CONFIG_SCSI_MULTI_LUN help I had to set CONFIG_SCSI_MULTI_LUN to y in order to get my SE W595 working when plugging it as a mass storage. Looking at SCSI option to get a phone behaving correctly was convoluted to say the least. There are quite a few other reports about USB card readers needing this option as well. This patch improves the help text to make the use of the option more obvious. Signed-off-by: Eric Piel Signed-off-by: James Bottomley --- drivers/scsi/Kconfig | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 6e8106a70b3d..759e1507e63c 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -191,20 +191,19 @@ config SCSI_ENCLOSURE it has an enclosure device. Selecting this option will just allow certain enclosure conditions to be reported and is not required. -comment "Some SCSI devices (e.g. CD jukebox) support multiple LUNs" - depends on SCSI - config SCSI_MULTI_LUN bool "Probe all LUNs on each SCSI device" depends on SCSI help - If you have a SCSI device that supports more than one LUN (Logical - Unit Number), e.g. a CD jukebox, and only one LUN is detected, you - can say Y here to force the SCSI driver to probe for multiple LUNs. - A SCSI device with multiple LUNs acts logically like multiple SCSI - devices. The vast majority of SCSI devices have only one LUN, and - so most people can say N here. The max_luns boot/module parameter - allows to override this setting. + Some devices support more than one LUN (Logical Unit Number) in order + to allow access to several media, e.g. CD jukebox, USB card reader, + mobile phone in mass storage mode. This option forces the kernel to + probe for all LUNs by default. This setting can be overriden by + max_luns boot/module parameter. Note that this option does not affect + devices conforming to SCSI-3 or higher as they can explicitely report + their number of LUNs. It is safe to say Y here unless you have one of + those rare devices which reacts in an unexpected way when probed for + multiple LUNs. config SCSI_CONSTANTS bool "Verbose SCSI error reporting (kernel size +=12K)" -- cgit v1.2.3 From 6ff63896e5bd624d8563f4b67fe2fe06ce99c8dc Mon Sep 17 00:00:00 2001 From: "Kleber S. Souza" Date: Mon, 4 May 2009 10:41:02 -0300 Subject: [SCSI] ipr: fix PCI permanent error handler The ipr driver can hang if it encounters enough PCI errors to trigger the permanent error handler. The driver will attempt to initiate a "bringdown" of the adapter and fail all pending ops back. However, this bringdown is unlike any other bringdown of the adapter in the code as the driver. In this code path we end up failing back ops with allow_cmds still set to 1. This results in some commands, the HCAM commands in particular, getting immediately re-issued to the adapter on the done call, which results in an infinite loop in ipr_fail_all_ops. Fix this by setting allow_cmds to zero in this path. Signed-off-by: Kleber S. Souza [brking@linux.vnet.ibm.com: alternate patch substituted] Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ipr.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 764cfcccec67..0f8bc772b112 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -7003,6 +7003,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev) ioa_cfg->sdt_state = ABORT_DUMP; ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES; ioa_cfg->in_ioa_bringdown = 1; + ioa_cfg->allow_cmds = 0; ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); } -- cgit v1.2.3 From 2b288133ab6306b1761e0a2ef943b944ead6ad69 Mon Sep 17 00:00:00 2001 From: Andy Yan Date: Mon, 11 May 2009 20:01:55 +0800 Subject: [SCSI] mvsas: bug fix with setting task management frame type Correct frame type setting according to parameter. Signed-off-by: Ying Chu Signed-off-by: Andy Yan Signed-off-by: Ke Wei Signed-off-by: James Bottomley --- drivers/scsi/mvsas/mv_sas.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index d79ac179eaff..f709319c2b6d 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -761,9 +761,11 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, flags |= MCH_FBURST; fburst = (1 << 7); } - hdr->flags = cpu_to_le32(flags | - (tei->n_elem << MCH_PRD_LEN_SHIFT) | - (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT)); + if (is_tmf) + flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT); + else + flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT); + hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT)); hdr->tags = cpu_to_le32(tag); hdr->data_len = cpu_to_le32(task->total_xfer_len); -- cgit v1.2.3 From 0b84b7094e87769120def1e703b8b4d037281038 Mon Sep 17 00:00:00 2001 From: Andy Yan Date: Mon, 11 May 2009 20:05:26 +0800 Subject: [SCSI] mvsas: bug fix of dead lock TMF task should be issued with Interrupt Disabled, or Deadlock may take place. Clean-up unused parameters and conditonal lock. Signed-off-by: Ying Chu Signed-off-by: Andy Yan Signed-off-by: Ke Wei Signed-off-by: James Bottomley --- drivers/scsi/mvsas/mv_sas.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index f709319c2b6d..4279b5e8808a 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -868,8 +868,8 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, #define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE))) static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, - struct completion *completion, int lock, - int is_tmf, struct mvs_tmf_task *tmf) + struct completion *completion,int is_tmf, + struct mvs_tmf_task *tmf) { struct domain_device *dev = task->dev; struct mvs_info *mvi; @@ -892,8 +892,7 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, mvi = mvs_find_dev_mvi(task->dev); - if (lock) - spin_lock_irqsave(&mvi->lock, flags); + spin_lock_irqsave(&mvi->lock, flags); do { dev = t->dev; mvi_dev = (struct mvs_device *)dev->lldd_dev; @@ -1020,15 +1019,14 @@ out_done: MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); } - if (lock) - spin_unlock_irqrestore(&mvi->lock, flags); + spin_unlock_irqrestore(&mvi->lock, flags); return rc; } int mvs_queue_command(struct sas_task *task, const int num, gfp_t gfp_flags) { - return mvs_task_exec(task, num, gfp_flags, NULL, 1, 0, NULL); + return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL); } static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) @@ -1448,7 +1446,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev, task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ; add_timer(&task->timer); - res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 0, 1, tmf); + res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf); if (res) { del_timer(&task->timer); -- cgit v1.2.3 From 0f980a871678b7ec143fcb45b31bf9234e4585c8 Mon Sep 17 00:00:00 2001 From: Andy Yan Date: Mon, 11 May 2009 21:49:52 +0800 Subject: [SCSI] mvsas: bug fix, null pointer may be used Null pointer check to avoid corruption. Signed-off-by: Ying Chu Signed-off-by: Andy Yan Signed-off-by: Ke Wei Signed-off-by: James Bottomley --- drivers/scsi/mvsas/mv_sas.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 4279b5e8808a..3fc396fc050d 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -1873,11 +1873,11 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) } out: - if (mvi_dev) + if (mvi_dev) { mvi_dev->runing_req--; - if (sas_protocol_ata(task->task_proto)) - mvs_free_reg_set(mvi, mvi_dev); - + if (sas_protocol_ata(task->task_proto)) + mvs_free_reg_set(mvi, mvi_dev); + } mvs_slot_task_free(mvi, task, slot, slot_idx); sts = tstat->stat; -- cgit v1.2.3 From 77db27cdcbc8ed371fd2f154cbadc7ff32ae8901 Mon Sep 17 00:00:00 2001 From: Andy Yan Date: Mon, 11 May 2009 21:56:31 +0800 Subject: [SCSI] mvsas: correct bit map usage Utilize DECLARE_BITMAP to define the tags array. Signed-off-by: Ying Chu Signed-off-by: Andy Yan Signed-off-by: Ke Wei Signed-off-by: James Bottomley --- drivers/scsi/mvsas/mv_sas.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h index 75b9748ae7cc..93735edff507 100644 --- a/drivers/scsi/mvsas/mv_sas.h +++ b/drivers/scsi/mvsas/mv_sas.h @@ -313,8 +313,7 @@ struct mvs_info { const struct mvs_chip_info *chip; int tags_num; - u8 tags[MVS_SLOTS >> 3]; - + DECLARE_BITMAP(tags, MVS_SLOTS); /* further per-slot information */ struct mvs_phy phy[MVS_MAX_PHYS]; struct mvs_port port[MVS_MAX_PHYS]; -- cgit v1.2.3 From 9870d9a2428550e7ac3164a26306ad07a99051ae Mon Sep 17 00:00:00 2001 From: Andy Yan Date: Mon, 11 May 2009 22:19:25 +0800 Subject: [SCSI] mvsas: performance improvement using domain_device->lldd_dev Using sticky field to improve retrieve performance by eliminating some lookups in . Remove some spurious casts. Signed-off-by: Ying Chu Signed-off-by: Andy Yan Signed-off-by: Ke Wei Signed-off-by: James Bottomley --- drivers/scsi/mvsas/mv_sas.c | 29 ++++++++++++++--------------- drivers/scsi/mvsas/mv_sas.h | 3 ++- 2 files changed, 16 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 3fc396fc050d..c05e4c05a414 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -225,7 +225,8 @@ struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) int mvs_find_dev_phyno(struct domain_device *dev, int *phyno) { unsigned long i = 0, j = 0, n = 0, num = 0; - struct mvs_info *mvi = mvs_find_dev_mvi(dev); + struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; + struct mvs_info *mvi = mvi_dev->mvi_info; struct sas_ha_struct *sha = dev->port->ha; while (sha->sas_port[i]) { @@ -872,8 +873,8 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, struct mvs_tmf_task *tmf) { struct domain_device *dev = task->dev; - struct mvs_info *mvi; - struct mvs_device *mvi_dev; + struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; + struct mvs_info *mvi = mvi_dev->mvi_info; struct mvs_task_exec_info tei; struct sas_task *t = task; struct mvs_slot_info *slot; @@ -890,8 +891,6 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, return 0; } - mvi = mvs_find_dev_mvi(task->dev); - spin_lock_irqsave(&mvi->lock, flags); do { dev = t->dev; @@ -1320,7 +1319,7 @@ int mvs_dev_found_notify(struct domain_device *dev, int lock) } dev->lldd_dev = (void *)mvi_device; mvi_device->dev_type = dev->dev_type; - + mvi_device->mvi_info = mvi; if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { int phy_id; u8 phy_num = parent_dev->ex_dev.num_phys; @@ -1357,10 +1356,8 @@ int mvs_dev_found(struct domain_device *dev) void mvs_dev_gone_notify(struct domain_device *dev, int lock) { unsigned long flags = 0; - struct mvs_info *mvi; struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; - - mvi = mvs_find_dev_mvi(dev); + struct mvs_info *mvi = mvi_dev->mvi_info; if (lock) spin_lock_irqsave(&mvi->lock, flags); @@ -1535,8 +1532,8 @@ int mvs_lu_reset(struct domain_device *dev, u8 *lun) unsigned long flags; int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED; struct mvs_tmf_task tmf_task; - struct mvs_info *mvi = mvs_find_dev_mvi(dev); struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev; + struct mvs_info *mvi = mvi_dev->mvi_info; tmf_task.tmf = TMF_LU_RESET; mvi_dev->dev_status = MVS_DEV_EH; @@ -1558,8 +1555,8 @@ int mvs_I_T_nexus_reset(struct domain_device *dev) { unsigned long flags; int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED; - struct mvs_info *mvi = mvs_find_dev_mvi(dev); - struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; + struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev; + struct mvs_info *mvi = mvi_dev->mvi_info; if (mvi_dev->dev_status != MVS_DEV_EH) return TMF_RESP_FUNC_COMPLETE; @@ -1587,7 +1584,8 @@ int mvs_query_task(struct sas_task *task) if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; struct domain_device *dev = task->dev; - struct mvs_info *mvi = mvs_find_dev_mvi(dev); + struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; + struct mvs_info *mvi = mvi_dev->mvi_info; int_to_scsilun(cmnd->device->lun, &lun); rc = mvs_find_tag(mvi, task, &tag); @@ -1619,10 +1617,12 @@ int mvs_abort_task(struct sas_task *task) struct scsi_lun lun; struct mvs_tmf_task tmf_task; struct domain_device *dev = task->dev; - struct mvs_info *mvi = mvs_find_dev_mvi(dev); + struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; + struct mvs_info *mvi = mvi_dev->mvi_info; int rc = TMF_RESP_FUNC_FAILED; unsigned long flags; u32 tag; + if (mvi->exp_req) mvi->exp_req--; spin_lock_irqsave(&task->task_state_lock, flags); @@ -1652,7 +1652,6 @@ int mvs_abort_task(struct sas_task *task) if (rc == TMF_RESP_FUNC_COMPLETE) { u32 slot_no; struct mvs_slot_info *slot; - struct mvs_info *mvi = mvs_find_dev_mvi(dev); if (task->lldd_task) { slot = (struct mvs_slot_info *)task->lldd_task; diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h index 93735edff507..aa2270af1bac 100644 --- a/drivers/scsi/mvsas/mv_sas.h +++ b/drivers/scsi/mvsas/mv_sas.h @@ -231,7 +231,9 @@ struct mvs_phy { }; struct mvs_device { + struct list_head dev_entry; enum sas_dev_type dev_type; + struct mvs_info *mvi_info; struct domain_device *sas_device; u32 attached_phy; u32 device_id; @@ -239,7 +241,6 @@ struct mvs_device { u8 taskfileset; u8 dev_status; u16 reserved; - struct list_head dev_entry; }; struct mvs_slot_info { -- cgit v1.2.3 From 5a2537959fa8781012e8c286fc1614e0f6991327 Mon Sep 17 00:00:00 2001 From: Zhenwen Xu Date: Tue, 12 May 2009 13:29:13 -0700 Subject: [SCSI] NCR_D700: fix IRQ handler return type drivers/scsi/NCR_D700.c: In function `NCR_D700_probe': drivers/scsi/NCR_D700.c:322: warning: passing argument 2 of `request_irq' from incompatible pointer type drivers/scsi/NCR_D700.c:322: warning: passing argument 2 of `request_irq' from incompatible pointer type drivers/scsi/NCR_D700.c:322: warning: passing argument 2 of `request_irq' from incompatible pointer type Signed-off-by: Zhenwen Xu Signed-off-by: Andrew Morton Signed-off-by: James Bottomley --- drivers/scsi/NCR_D700.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c index c889d8458684..1cdf09a4779a 100644 --- a/drivers/scsi/NCR_D700.c +++ b/drivers/scsi/NCR_D700.c @@ -224,7 +224,7 @@ NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq, return ret; } -static int +static irqreturn_t NCR_D700_intr(int irq, void *data) { struct NCR_D700_private *p = (struct NCR_D700_private *)data; -- cgit v1.2.3 From 10eb0f013c63c71c82ede77945a5f390c10cfda6 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 13 May 2009 17:57:38 -0500 Subject: [SCSI] iscsi: pass ep connect shost When we create the tcp/ip connection by calling ep_connect, we currently just go by the routing table info. I think there are two problems with this. 1. Some drivers do not have access to a routing table. Some drivers like qla4xxx do not even know about other ports. 2. If you have two initiator ports on the same subnet, the user may have set things up so that session1 was supposed to be run through port1. and session2 was supposed to be run through port2. It looks like we could end with both sessions going through one of the ports. Fixes for cxgb3i from Karen Xie. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/infiniband/ulp/iser/iscsi_iser.c | 3 +- drivers/scsi/cxgb3i/cxgb3i.h | 1 - drivers/scsi/cxgb3i/cxgb3i_iscsi.c | 25 +++++++++++++--- drivers/scsi/cxgb3i/cxgb3i_offload.c | 23 ++++++++------ drivers/scsi/cxgb3i/cxgb3i_offload.h | 3 +- drivers/scsi/scsi_transport_iscsi.c | 51 +++++++++++++++++++++++++------- 6 files changed, 79 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 75223f50de58..ffbe0c76bc11 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -517,7 +517,8 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s } static struct iscsi_endpoint * -iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking) +iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, + int non_blocking) { int err; struct iser_conn *ib_conn; diff --git a/drivers/scsi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgb3i/cxgb3i.h index 59b0958d2d11..e3133b58e594 100644 --- a/drivers/scsi/cxgb3i/cxgb3i.h +++ b/drivers/scsi/cxgb3i/cxgb3i.h @@ -144,7 +144,6 @@ struct cxgb3i_adapter *cxgb3i_adapter_find_by_tdev(struct t3cdev *); void cxgb3i_adapter_open(struct t3cdev *); void cxgb3i_adapter_close(struct t3cdev *); -struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *); struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *, struct net_device *); void cxgb3i_hba_host_remove(struct cxgb3i_hba *); diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c index 9212400b9b13..04a43744aedf 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c +++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c @@ -178,7 +178,7 @@ void cxgb3i_adapter_close(struct t3cdev *t3dev) * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure via net_device * @t3dev: t3cdev adapter */ -struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev) +static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev) { struct cxgb3i_adapter *snic; int i; @@ -261,20 +261,27 @@ void cxgb3i_hba_host_remove(struct cxgb3i_hba *hba) /** * cxgb3i_ep_connect - establish TCP connection to target portal + * @shost: scsi host to use * @dst_addr: target IP address * @non_blocking: blocking or non-blocking call * * Initiates a TCP/IP connection to the dst_addr */ -static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr, +static struct iscsi_endpoint *cxgb3i_ep_connect(struct Scsi_Host *shost, + struct sockaddr *dst_addr, int non_blocking) { struct iscsi_endpoint *ep; struct cxgb3i_endpoint *cep; - struct cxgb3i_hba *hba; + struct cxgb3i_hba *hba = NULL; struct s3_conn *c3cn = NULL; int err = 0; + if (shost) + hba = iscsi_host_priv(shost); + + cxgb3i_api_debug("shost 0x%p, hba 0x%p.\n", shost, hba); + c3cn = cxgb3i_c3cn_create(); if (!c3cn) { cxgb3i_log_info("ep connect OOM.\n"); @@ -282,17 +289,27 @@ static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr, goto release_conn; } - err = cxgb3i_c3cn_connect(c3cn, (struct sockaddr_in *)dst_addr); + err = cxgb3i_c3cn_connect(hba ? hba->ndev : NULL, c3cn, + (struct sockaddr_in *)dst_addr); if (err < 0) { cxgb3i_log_info("ep connect failed.\n"); goto release_conn; } + hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev); if (!hba) { err = -ENOSPC; cxgb3i_log_info("NOT going through cxgbi device.\n"); goto release_conn; } + + if (shost && hba != iscsi_host_priv(shost)) { + err = -ENOSPC; + cxgb3i_log_info("Could not connect through request host%u\n", + shost->host_no); + goto release_conn; + } + if (c3cn_is_closing(c3cn)) { err = -ENOSPC; cxgb3i_log_info("ep connect unable to connect.\n"); diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c index e11c9c180f39..c1d5be4adf9c 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_offload.c +++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c @@ -1479,12 +1479,13 @@ static struct net_device *cxgb3_egress_dev(struct net_device *root_dev, return NULL; } -static struct rtable *find_route(__be32 saddr, __be32 daddr, +static struct rtable *find_route(struct net_device *dev, + __be32 saddr, __be32 daddr, __be16 sport, __be16 dport) { struct rtable *rt; struct flowi fl = { - .oif = 0, + .oif = dev ? dev->ifindex : 0, .nl_u = { .ip4_u = { .daddr = daddr, @@ -1573,36 +1574,40 @@ out_err: * * return 0 if active open request is sent, < 0 otherwise. */ -int cxgb3i_c3cn_connect(struct s3_conn *c3cn, struct sockaddr_in *usin) +int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn, + struct sockaddr_in *usin) { struct rtable *rt; - struct net_device *dev; struct cxgb3i_sdev_data *cdata; struct t3cdev *cdev; __be32 sipv4; int err; + c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev); + if (usin->sin_family != AF_INET) return -EAFNOSUPPORT; c3cn->daddr.sin_port = usin->sin_port; c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr; - rt = find_route(c3cn->saddr.sin_addr.s_addr, + rt = find_route(dev, c3cn->saddr.sin_addr.s_addr, c3cn->daddr.sin_addr.s_addr, c3cn->saddr.sin_port, c3cn->daddr.sin_port); if (rt == NULL) { - c3cn_conn_debug("NO route to 0x%x, port %u.\n", + c3cn_conn_debug("NO route to 0x%x, port %u, dev %s.\n", c3cn->daddr.sin_addr.s_addr, - ntohs(c3cn->daddr.sin_port)); + ntohs(c3cn->daddr.sin_port), + dev ? dev->name : "any"); return -ENETUNREACH; } if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { - c3cn_conn_debug("multi-cast route to 0x%x, port %u.\n", + c3cn_conn_debug("multi-cast route to 0x%x, port %u, dev %s.\n", c3cn->daddr.sin_addr.s_addr, - ntohs(c3cn->daddr.sin_port)); + ntohs(c3cn->daddr.sin_port), + dev ? dev->name : "any"); ip_rt_put(rt); return -ENETUNREACH; } diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.h b/drivers/scsi/cxgb3i/cxgb3i_offload.h index ebfca960c0a9..6a1d86b1fafe 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_offload.h +++ b/drivers/scsi/cxgb3i/cxgb3i_offload.h @@ -169,7 +169,8 @@ void cxgb3i_sdev_add(struct t3cdev *, struct cxgb3_client *); void cxgb3i_sdev_remove(struct t3cdev *); struct s3_conn *cxgb3i_c3cn_create(void); -int cxgb3i_c3cn_connect(struct s3_conn *, struct sockaddr_in *); +int cxgb3i_c3cn_connect(struct net_device *, struct s3_conn *, + struct sockaddr_in *); void cxgb3i_c3cn_rx_credits(struct s3_conn *, int); int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *); void cxgb3i_c3cn_release(struct s3_conn *); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 0a2ce7b6325c..d69a53aa406f 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -1268,26 +1268,54 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) return err; } +static int iscsi_if_ep_connect(struct iscsi_transport *transport, + struct iscsi_uevent *ev, int msg_type) +{ + struct iscsi_endpoint *ep; + struct sockaddr *dst_addr; + struct Scsi_Host *shost = NULL; + int non_blocking, err = 0; + + if (!transport->ep_connect) + return -EINVAL; + + if (msg_type == ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST) { + shost = scsi_host_lookup(ev->u.ep_connect_through_host.host_no); + if (!shost) { + printk(KERN_ERR "ep connect failed. Could not find " + "host no %u\n", + ev->u.ep_connect_through_host.host_no); + return -ENODEV; + } + non_blocking = ev->u.ep_connect_through_host.non_blocking; + } else + non_blocking = ev->u.ep_connect.non_blocking; + + dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev)); + ep = transport->ep_connect(shost, dst_addr, non_blocking); + if (IS_ERR(ep)) { + err = PTR_ERR(ep); + goto release_host; + } + + ev->r.ep_connect_ret.handle = ep->id; +release_host: + if (shost) + scsi_host_put(shost); + return err; +} + static int iscsi_if_transport_ep(struct iscsi_transport *transport, struct iscsi_uevent *ev, int msg_type) { struct iscsi_endpoint *ep; - struct sockaddr *dst_addr; int rc = 0; switch (msg_type) { + case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST: case ISCSI_UEVENT_TRANSPORT_EP_CONNECT: - if (!transport->ep_connect) - return -EINVAL; - - dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev)); - ep = transport->ep_connect(dst_addr, - ev->u.ep_connect.non_blocking); - if (IS_ERR(ep)) - return PTR_ERR(ep); - - ev->r.ep_connect_ret.handle = ep->id; + rc = iscsi_if_ep_connect(transport, ev, msg_type); break; case ISCSI_UEVENT_TRANSPORT_EP_POLL: if (!transport->ep_poll) @@ -1469,6 +1497,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) case ISCSI_UEVENT_TRANSPORT_EP_CONNECT: case ISCSI_UEVENT_TRANSPORT_EP_POLL: case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: + case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST: err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type); break; case ISCSI_UEVENT_TGT_DSCVR: -- cgit v1.2.3 From 184b57c630c86d35b7f92d4b6545fdf07647c5d5 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 13 May 2009 17:57:39 -0500 Subject: [SCSI] libiscsi: check of LLD has a alloc pdu callout. bnx2i does not have one. It currently preallocates the bdt when the session is setup. We probably want to change that to a dma pool, then allocate from the pool in the alloc pdu. Until then check if there is a alloc pdu callout. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/libiscsi.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index e72b4ad47d35..11bc3e1fbd5a 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -257,9 +257,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) itt_t itt; int rc; - rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD); - if (rc) - return rc; + if (conn->session->tt->alloc_pdu) { + rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD); + if (rc) + return rc; + } hdr = (struct iscsi_cmd *) task->hdr; itt = hdr->itt; memset(hdr, 0, sizeof(*hdr)); @@ -566,11 +568,14 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, } else task->data_count = 0; - if (conn->session->tt->alloc_pdu(task, hdr->opcode)) { - iscsi_conn_printk(KERN_ERR, conn, "Could not allocate " - "pdu for mgmt task.\n"); - goto requeue_task; + if (conn->session->tt->alloc_pdu) { + if (conn->session->tt->alloc_pdu(task, hdr->opcode)) { + iscsi_conn_printk(KERN_ERR, conn, "Could not allocate " + "pdu for mgmt task.\n"); + goto requeue_task; + } } + itt = task->hdr->itt; task->hdr_len = sizeof(struct iscsi_hdr); memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr)); -- cgit v1.2.3 From 5700b1af93388544843a453e3c68f8f928bd1e88 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 13 May 2009 17:57:40 -0500 Subject: [SCSI] libiscsi: handle param allocation failures If we could not allocate the initiator name or some other id like the hwaddress or netdev, then userspace could deal with the failure by just running in a dregraded mode. Now we want to be able to switch values for the params and we want some feedback, so this patch will check if a string like the initiatorname could not be allocated and return an error. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/libiscsi.c | 108 +++++++++++++++--------------------------------- 1 file changed, 33 insertions(+), 75 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 11bc3e1fbd5a..b4aaf2e5fe7a 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -2656,6 +2656,23 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session, } EXPORT_SYMBOL_GPL(iscsi_conn_bind); +static int iscsi_switch_str_param(char **param, char *new_val_buf) +{ + char *new_val; + + if (*param) { + if (!strcmp(*param, new_val_buf)) + return 0; + } + + new_val = kstrdup(new_val_buf, GFP_NOIO); + if (!new_val) + return -ENOMEM; + + kfree(*param); + *param = new_val; + return 0; +} int iscsi_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf, int buflen) @@ -2728,38 +2745,15 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn, sscanf(buf, "%u", &conn->exp_statsn); break; case ISCSI_PARAM_USERNAME: - kfree(session->username); - session->username = kstrdup(buf, GFP_KERNEL); - if (!session->username) - return -ENOMEM; - break; + return iscsi_switch_str_param(&session->username, buf); case ISCSI_PARAM_USERNAME_IN: - kfree(session->username_in); - session->username_in = kstrdup(buf, GFP_KERNEL); - if (!session->username_in) - return -ENOMEM; - break; + return iscsi_switch_str_param(&session->username_in, buf); case ISCSI_PARAM_PASSWORD: - kfree(session->password); - session->password = kstrdup(buf, GFP_KERNEL); - if (!session->password) - return -ENOMEM; - break; + return iscsi_switch_str_param(&session->password, buf); case ISCSI_PARAM_PASSWORD_IN: - kfree(session->password_in); - session->password_in = kstrdup(buf, GFP_KERNEL); - if (!session->password_in) - return -ENOMEM; - break; + return iscsi_switch_str_param(&session->password_in, buf); case ISCSI_PARAM_TARGET_NAME: - /* this should not change between logins */ - if (session->targetname) - break; - - session->targetname = kstrdup(buf, GFP_KERNEL); - if (!session->targetname) - return -ENOMEM; - break; + return iscsi_switch_str_param(&session->targetname, buf); case ISCSI_PARAM_TPGT: sscanf(buf, "%d", &session->tpgt); break; @@ -2767,25 +2761,11 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn, sscanf(buf, "%d", &conn->persistent_port); break; case ISCSI_PARAM_PERSISTENT_ADDRESS: - /* - * this is the address returned in discovery so it should - * not change between logins. - */ - if (conn->persistent_address) - break; - - conn->persistent_address = kstrdup(buf, GFP_KERNEL); - if (!conn->persistent_address) - return -ENOMEM; - break; + return iscsi_switch_str_param(&conn->persistent_address, buf); case ISCSI_PARAM_IFACE_NAME: - if (!session->ifacename) - session->ifacename = kstrdup(buf, GFP_KERNEL); - break; + return iscsi_switch_str_param(&session->ifacename, buf); case ISCSI_PARAM_INITIATOR_NAME: - if (!session->initiatorname) - session->initiatorname = kstrdup(buf, GFP_KERNEL); - break; + return iscsi_switch_str_param(&session->initiatorname, buf); default: return -ENOSYS; } @@ -2856,10 +2836,7 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session, len = sprintf(buf, "%s\n", session->ifacename); break; case ISCSI_PARAM_INITIATOR_NAME: - if (!session->initiatorname) - len = sprintf(buf, "%s\n", "unknown"); - else - len = sprintf(buf, "%s\n", session->initiatorname); + len = sprintf(buf, "%s\n", session->initiatorname); break; default: return -ENOSYS; @@ -2925,29 +2902,16 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, switch (param) { case ISCSI_HOST_PARAM_NETDEV_NAME: - if (!ihost->netdev) - len = sprintf(buf, "%s\n", "default"); - else - len = sprintf(buf, "%s\n", ihost->netdev); + len = sprintf(buf, "%s\n", ihost->netdev); break; case ISCSI_HOST_PARAM_HWADDRESS: - if (!ihost->hwaddress) - len = sprintf(buf, "%s\n", "default"); - else - len = sprintf(buf, "%s\n", ihost->hwaddress); + len = sprintf(buf, "%s\n", ihost->hwaddress); break; case ISCSI_HOST_PARAM_INITIATOR_NAME: - if (!ihost->initiatorname) - len = sprintf(buf, "%s\n", "unknown"); - else - len = sprintf(buf, "%s\n", ihost->initiatorname); + len = sprintf(buf, "%s\n", ihost->initiatorname); break; case ISCSI_HOST_PARAM_IPADDRESS: - if (!strlen(ihost->local_address)) - len = sprintf(buf, "%s\n", "unknown"); - else - len = sprintf(buf, "%s\n", - ihost->local_address); + len = sprintf(buf, "%s\n", ihost->local_address); break; default: return -ENOSYS; @@ -2964,17 +2928,11 @@ int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param, switch (param) { case ISCSI_HOST_PARAM_NETDEV_NAME: - if (!ihost->netdev) - ihost->netdev = kstrdup(buf, GFP_KERNEL); - break; + return iscsi_switch_str_param(&ihost->netdev, buf); case ISCSI_HOST_PARAM_HWADDRESS: - if (!ihost->hwaddress) - ihost->hwaddress = kstrdup(buf, GFP_KERNEL); - break; + return iscsi_switch_str_param(&ihost->hwaddress, buf); case ISCSI_HOST_PARAM_INITIATOR_NAME: - if (!ihost->initiatorname) - ihost->initiatorname = kstrdup(buf, GFP_KERNEL); - break; + return iscsi_switch_str_param(&ihost->initiatorname, buf); default: return -ENOSYS; } -- cgit v1.2.3 From 8f9256cea10ca43ac80f66e176643eb41db34244 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 13 May 2009 17:57:41 -0500 Subject: [SCSI] libiscsi: export iscsi_itt_to_task for bnx2i bnx2i needs to be able to look up mgmt task like login and nop, because it does some processing of them on the completion path. This exports iscsi_itt_to_task so it can look up the task. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/libiscsi.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index b4aaf2e5fe7a..a6e6eef04fed 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -828,7 +828,7 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, * * The session lock must be held. */ -static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) +struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) { struct iscsi_session *session = conn->session; int i; @@ -845,6 +845,7 @@ static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) return session->cmds[i]; } +EXPORT_SYMBOL_GPL(iscsi_itt_to_task); /** * __iscsi_complete_pdu - complete pdu -- cgit v1.2.3 From edbc9aa0580c0aca96ac8d11bfb2defa81d91bb3 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 13 May 2009 17:57:42 -0500 Subject: [SCSI] libiscsi: have iscsi_data_in_rsp call iscsi_update_cmdsn This has iscsi_data_in_rsp call iscsi_update_cmdsn when a pdu is completed like is done for other pdu's that are don. For libiscsi_tcp, this means that it calls iscsi_update_cmdsn when it is handling the pdu internally to only transfer data, but if there is status then it does not need to call it since the completion handling will do it. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/libiscsi.c | 1 + drivers/scsi/libiscsi_tcp.c | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index a6e6eef04fed..047543cd3fc1 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -729,6 +729,7 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS)) return; + iscsi_update_cmdsn(conn->session, (struct iscsi_nopin *)hdr); sc->result = (DID_OK << 16) | rhdr->cmd_status; conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW | diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index b579ca9f4836..db93cd0dfdb6 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c @@ -473,7 +473,13 @@ static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task) int datasn = be32_to_cpu(rhdr->datasn); unsigned total_in_length = scsi_in(task->sc)->length; - iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr); + /* + * lib iscsi will update this in the completion handling if there + * is status. + */ + if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS)) + iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr); + if (tcp_conn->in.datalen == 0) return 0; -- cgit v1.2.3 From 26013ad4c43f49a038a6489c35e9b901491339fe Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 13 May 2009 17:57:43 -0500 Subject: [SCSI] libiscsi: fix nop response/reply and session cleanup race If we are responding to a nop from the target by sending our nop, and the session is getting torn down, then iscsi_start_session_recovery could set the conn stop bits while the recv path is sending the nop response and we will hit the bug ons in __iscsi_conn_send_pdu. This has us check the state in __iscsi_conn_send_pdu and fail all incoming mgmt IO if we are not logged in and if the pdu is not login related. It also changes the ordering of the setting of conn stop state bits so they are set after the session state is set (both are set under the session lock). Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/libiscsi.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 047543cd3fc1..212fe2027a8c 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -546,6 +546,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, */ task = conn->login_task; else { + if (session->state != ISCSI_STATE_LOGGED_IN) + return NULL; + BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); @@ -2566,8 +2569,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, { int old_stop_stage; - del_timer_sync(&conn->transport_timer); - mutex_lock(&session->eh_mutex); spin_lock_bh(&session->lock); if (conn->stop_stage == STOP_CONN_TERM) { @@ -2585,13 +2586,17 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, session->state = ISCSI_STATE_TERMINATE; else if (conn->stop_stage != STOP_CONN_RECOVER) session->state = ISCSI_STATE_IN_RECOVERY; + spin_unlock_bh(&session->lock); + del_timer_sync(&conn->transport_timer); + iscsi_suspend_tx(conn); + + spin_lock_bh(&session->lock); old_stop_stage = conn->stop_stage; conn->stop_stage = flag; conn->c_stage = ISCSI_CONN_STOPPED; spin_unlock_bh(&session->lock); - iscsi_suspend_tx(conn); /* * for connection level recovery we should not calculate * header digest. conn->hdr_size used for optimization -- cgit v1.2.3 From d1acfae514425d680912907c6554852f1e258551 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 13 May 2009 17:57:44 -0500 Subject: [SCSI] libiscsi_tcp: update recv tracking for each skb instead of iscsi pdu Everytime we read in a pdu libiscsi will update a tracking field. It uses this to decide when to check if the transport might be bad. If we have not got data in recv_timeout seconds then we will send a iscsi ping/nop. If we are on a slow link then it could take a while to read in all the data for a data_in. In that case we might send a ping/nop when we do not need to or we might drop a session thinking it is bad when the lower layer is making forward progress on it. This patch has libiscsi_tcp update the recv tracking for each skb (basically network packet from our point of view) instead of the entire iscsi pdu+data, so we account for these cases where data is coming in slowly. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/libiscsi_tcp.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index db93cd0dfdb6..b84a1d853f29 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c @@ -863,6 +863,12 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb, int rc = 0; ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset); + /* + * Update for each skb instead of pdu, because over slow networks a + * data_in's data could take a while to read in. We also want to + * account for r2ts. + */ + conn->last_recv = jiffies; if (unlikely(conn->suspend_rx)) { ISCSI_DBG_TCP(conn, "Rx suspended!\n"); -- cgit v1.2.3 From 4c48a82935f833d94fcf44c2b0c5d2922acfc77a Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 13 May 2009 17:57:45 -0500 Subject: [SCSI] libiscsi: fix iscsi transport checks to account for slower links If we have not got any pdus for recv_timeout seconds, then we will send a iscsi ping/nop to make sure the target is still around. The problem is if this is a slow link, and the ping got queued after the data for a data_out (read), then the transport code could think the ping has failed when it is just slowly making its way through the network. This patch has us check if we are making progress while the nop is outstanding. If we are still reading in data, then we do not fail the session at that time. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/libiscsi.c | 38 +++++++++++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 212fe2027a8c..c648bd328a21 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1677,6 +1677,22 @@ static void iscsi_start_tx(struct iscsi_conn *conn) iscsi_conn_queue_work(conn); } +/* + * We want to make sure a ping is in flight. It has timed out. + * And we are not busy processing a pdu that is making + * progress but got started before the ping and is taking a while + * to complete so the ping is just stuck behind it in a queue. + */ +static int iscsi_has_ping_timed_out(struct iscsi_conn *conn) +{ + if (conn->ping_task && + time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + + (conn->ping_timeout * HZ), jiffies)) + return 1; + else + return 0; +} + static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) { struct iscsi_cls_session *cls_session; @@ -1712,16 +1728,20 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) * if the ping timedout then we are in the middle of cleaning up * and can let the iscsi eh handle it */ - if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + - (conn->ping_timeout * HZ), jiffies)) + if (iscsi_has_ping_timed_out(conn)) { rc = BLK_EH_RESET_TIMER; + goto done; + } /* * if we are about to check the transport then give the command * more time */ if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ), - jiffies)) + jiffies)) { rc = BLK_EH_RESET_TIMER; + goto done; + } + /* if in the middle of checking the transport then give us more time */ if (conn->ping_task) rc = BLK_EH_RESET_TIMER; @@ -1748,13 +1768,13 @@ static void iscsi_check_transport_timeouts(unsigned long data) recv_timeout *= HZ; last_recv = conn->last_recv; - if (conn->ping_task && - time_before_eq(conn->last_ping + (conn->ping_timeout * HZ), - jiffies)) { + + if (iscsi_has_ping_timed_out(conn)) { iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs " - "expired, last rx %lu, last ping %lu, " - "now %lu\n", conn->ping_timeout, last_recv, - conn->last_ping, jiffies); + "expired, recv timeout %d, last rx %lu, " + "last ping %lu, now %lu\n", + conn->ping_timeout, conn->recv_timeout, + last_recv, conn->last_ping, jiffies); spin_unlock(&session->lock); iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); return; -- cgit v1.2.3 From 3bbaaad95fd38dedb7c66a601f14825b4e0c5a59 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 13 May 2009 17:57:46 -0500 Subject: [SCSI] libiscsi: handle cleanup task races bnx2i needs to send a hardware specific cleanup command if a command has not completed normally (iscsi/scsi response from target), and the session is still ok (this is the case when we send a TMF to stop the command). At this time it will need to drop the session lock. The problem with the current code is that fail_all_commands assumes we will hold the lock the entire time, so it uses list_for_each_entry_safe. If while bnx2i drops the session lock multiple cmds complete then list_for_each_entry_safe will not handle this correctly. This patch removes the running lists and just has us loop over the cmds array (in later patches we will then replace that array with a block tag map at the session level). It also fixes up the completion path so that if the TMF code and the normal recv path were completing the same command then they both do not try to do release the refcount taken when the task is queued. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/libiscsi.c | 225 +++++++++++++++++++++++++----------------------- 1 file changed, 116 insertions(+), 109 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index c648bd328a21..a9d7e520e551 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -109,7 +109,7 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr) * if the window closed with IO queued, then kick the * xmit thread */ - if (!list_empty(&session->leadconn->xmitqueue) || + if (!list_empty(&session->leadconn->cmdqueue) || !list_empty(&session->leadconn->mgmtqueue)) { if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD)) iscsi_conn_queue_work(session->leadconn); @@ -366,7 +366,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) return -EIO; task->state = ISCSI_TASK_RUNNING; - list_move_tail(&task->running, &conn->run_list); conn->scsicmd_pdus_cnt++; ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x " @@ -382,26 +381,23 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) } /** - * iscsi_complete_command - finish a task + * iscsi_free_task - free a task * @task: iscsi cmd task * * Must be called with session lock. * This function returns the scsi command to scsi-ml or cleans * up mgmt tasks then returns the task to the pool. */ -static void iscsi_complete_command(struct iscsi_task *task) +static void iscsi_free_task(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; struct iscsi_session *session = conn->session; struct scsi_cmnd *sc = task->sc; session->tt->cleanup_task(task); - list_del_init(&task->running); - task->state = ISCSI_TASK_COMPLETED; + task->state = ISCSI_TASK_FREE; task->sc = NULL; - if (conn->task == task) - conn->task = NULL; /* * login task is preallocated so do not free */ @@ -410,9 +406,6 @@ static void iscsi_complete_command(struct iscsi_task *task) __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*)); - if (conn->ping_task == task) - conn->ping_task = NULL; - if (sc) { task->sc = NULL; /* SCSI eh reuses commands to verify us */ @@ -435,7 +428,7 @@ EXPORT_SYMBOL_GPL(__iscsi_get_task); static void __iscsi_put_task(struct iscsi_task *task) { if (atomic_dec_and_test(&task->refcount)) - iscsi_complete_command(task); + iscsi_free_task(task); } void iscsi_put_task(struct iscsi_task *task) @@ -448,14 +441,50 @@ void iscsi_put_task(struct iscsi_task *task) } EXPORT_SYMBOL_GPL(iscsi_put_task); +/** + * iscsi_complete_task - finish a task + * @task: iscsi cmd task + * + * Must be called with session lock. + */ +static void iscsi_complete_task(struct iscsi_task *task) +{ + struct iscsi_conn *conn = task->conn; + + if (task->state == ISCSI_TASK_COMPLETED) + return; + WARN_ON_ONCE(task->state == ISCSI_TASK_FREE); + + task->state = ISCSI_TASK_COMPLETED; + + if (!list_empty(&task->running)) + list_del_init(&task->running); + + if (conn->task == task) + conn->task = NULL; + + if (conn->ping_task == task) + conn->ping_task = NULL; + + /* release get from queueing */ + __iscsi_put_task(task); +} + /* - * session lock must be held + * session lock must be held and if not called for a task that is + * still pending or from the xmit thread, then xmit thread must + * be suspended. */ -static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task, - int err) +static void fail_scsi_task(struct iscsi_task *task, int err) { + struct iscsi_conn *conn = task->conn; struct scsi_cmnd *sc; + /* + * if a command completes and we get a successful tmf response + * we will hit this because the scsi eh abort code does not take + * a ref to the task. + */ sc = task->sc; if (!sc) return; @@ -475,10 +504,7 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task, scsi_in(sc)->resid = scsi_in(sc)->length; } - if (conn->task == task) - conn->task = NULL; - /* release ref from queuecommand */ - __iscsi_put_task(task); + iscsi_complete_task(task); } static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, @@ -518,7 +544,6 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, session->state = ISCSI_STATE_LOGGING_OUT; task->state = ISCSI_TASK_RUNNING; - list_move_tail(&task->running, &conn->mgmt_run_list); ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x " "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt, task->data_count); @@ -564,6 +589,8 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, atomic_set(&task->refcount, 1); task->conn = conn; task->sc = NULL; + INIT_LIST_HEAD(&task->running); + task->state = ISCSI_TASK_PENDING; if (data_size) { memcpy(task->data, data, data_size); @@ -575,7 +602,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, if (conn->session->tt->alloc_pdu(task, hdr->opcode)) { iscsi_conn_printk(KERN_ERR, conn, "Could not allocate " "pdu for mgmt task.\n"); - goto requeue_task; + goto free_task; } } @@ -591,30 +618,22 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, task->conn->session->age); } - INIT_LIST_HEAD(&task->running); - list_add_tail(&task->running, &conn->mgmtqueue); - if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) { if (iscsi_prep_mgmt_task(conn, task)) goto free_task; if (session->tt->xmit_task(task)) goto free_task; - - } else + } else { + list_add_tail(&task->running, &conn->mgmtqueue); iscsi_conn_queue_work(conn); + } return task; free_task: __iscsi_put_task(task); return NULL; - -requeue_task: - if (task != conn->login_task) - __kfifo_put(session->cmdpool.queue, (void*)&task, - sizeof(void*)); - return NULL; } int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, @@ -709,11 +728,10 @@ invalid_datalen: sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; } out: - ISCSI_DBG_SESSION(session, "done [sc %p res %d itt 0x%x]\n", + ISCSI_DBG_SESSION(session, "cmd rsp done [sc %p res %d itt 0x%x]\n", sc, sc->result, task->itt); conn->scsirsp_pdus_cnt++; - - __iscsi_put_task(task); + iscsi_complete_task(task); } /** @@ -747,8 +765,11 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; } + ISCSI_DBG_SESSION(conn->session, "data in with status done " + "[sc %p res %d itt 0x%x]\n", + sc, sc->result, task->itt); conn->scsirsp_pdus_cnt++; - __iscsi_put_task(task); + iscsi_complete_task(task); } static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) @@ -969,7 +990,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, } iscsi_tmf_rsp(conn, hdr); - __iscsi_put_task(task); + iscsi_complete_task(task); break; case ISCSI_OP_NOOP_IN: iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); @@ -987,7 +1008,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, goto recv_pdu; mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout); - __iscsi_put_task(task); + iscsi_complete_task(task); break; default: rc = ISCSI_ERR_BAD_OPCODE; @@ -999,7 +1020,7 @@ out: recv_pdu: if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) rc = ISCSI_ERR_CONN_FAILED; - __iscsi_put_task(task); + iscsi_complete_task(task); return rc; } EXPORT_SYMBOL_GPL(__iscsi_complete_pdu); @@ -1176,7 +1197,12 @@ void iscsi_requeue_task(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; - list_move_tail(&task->running, &conn->requeue); + /* + * this may be on the requeue list already if the xmit_task callout + * is handling the r2ts while we are adding new ones + */ + if (list_empty(&task->running)) + list_add_tail(&task->running, &conn->requeue); iscsi_conn_queue_work(conn); } EXPORT_SYMBOL_GPL(iscsi_requeue_task); @@ -1216,6 +1242,7 @@ check_mgmt: while (!list_empty(&conn->mgmtqueue)) { conn->task = list_entry(conn->mgmtqueue.next, struct iscsi_task, running); + list_del_init(&conn->task->running); if (iscsi_prep_mgmt_task(conn, conn->task)) { __iscsi_put_task(conn->task); conn->task = NULL; @@ -1227,23 +1254,26 @@ check_mgmt: } /* process pending command queue */ - while (!list_empty(&conn->xmitqueue)) { + while (!list_empty(&conn->cmdqueue)) { if (conn->tmf_state == TMF_QUEUED) break; - conn->task = list_entry(conn->xmitqueue.next, + conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task, running); + list_del_init(&conn->task->running); if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { - fail_command(conn, conn->task, DID_IMM_RETRY << 16); + fail_scsi_task(conn->task, DID_IMM_RETRY << 16); continue; } rc = iscsi_prep_scsi_cmd_pdu(conn->task); if (rc) { if (rc == -ENOMEM) { + list_add_tail(&conn->task->running, + &conn->cmdqueue); conn->task = NULL; goto again; } else - fail_command(conn, conn->task, DID_ABORT << 16); + fail_scsi_task(conn->task, DID_ABORT << 16); continue; } rc = iscsi_xmit_task(conn); @@ -1270,8 +1300,8 @@ check_mgmt: conn->task = list_entry(conn->requeue.next, struct iscsi_task, running); + list_del_init(&conn->task->running); conn->task->state = ISCSI_TASK_RUNNING; - list_move_tail(conn->requeue.next, &conn->run_list); rc = iscsi_xmit_task(conn); if (rc) goto again; @@ -1412,7 +1442,6 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) reason = FAILURE_OOM; goto reject; } - list_add_tail(&task->running, &conn->xmitqueue); if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) { reason = iscsi_prep_scsi_cmd_pdu(task); @@ -1429,8 +1458,10 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) reason = FAILURE_SESSION_NOT_READY; goto prepd_reject; } - } else + } else { + list_add_tail(&task->running, &conn->cmdqueue); iscsi_conn_queue_work(conn); + } session->queued_cmdsn++; spin_unlock(&session->lock); @@ -1439,7 +1470,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) prepd_reject: sc->scsi_done = NULL; - iscsi_complete_command(task); + iscsi_complete_task(task); reject: spin_unlock(&session->lock); ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n", @@ -1449,7 +1480,7 @@ reject: prepd_fault: sc->scsi_done = NULL; - iscsi_complete_command(task); + iscsi_complete_task(task); fault: spin_unlock(&session->lock); ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n", @@ -1618,44 +1649,24 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, * Fail commands. session lock held and recv side suspended and xmit * thread flushed */ -static void fail_all_commands(struct iscsi_conn *conn, unsigned lun, - int error) +static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun, + int error) { - struct iscsi_task *task, *tmp; - - if (conn->task) { - if (lun == -1 || - (conn->task->sc && conn->task->sc->device->lun == lun)) - conn->task = NULL; - } + struct iscsi_task *task; + int i; - /* flush pending */ - list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) { - if (lun == task->sc->device->lun || lun == -1) { - ISCSI_DBG_SESSION(conn->session, - "failing pending sc %p itt 0x%x\n", - task->sc, task->itt); - fail_command(conn, task, error << 16); - } - } + for (i = 0; i < conn->session->cmds_max; i++) { + task = conn->session->cmds[i]; + if (!task->sc || task->state == ISCSI_TASK_FREE) + continue; - list_for_each_entry_safe(task, tmp, &conn->requeue, running) { - if (lun == task->sc->device->lun || lun == -1) { - ISCSI_DBG_SESSION(conn->session, - "failing requeued sc %p itt 0x%x\n", - task->sc, task->itt); - fail_command(conn, task, error << 16); - } - } + if (lun != -1 && lun != task->sc->device->lun) + continue; - /* fail all other running */ - list_for_each_entry_safe(task, tmp, &conn->run_list, running) { - if (lun == task->sc->device->lun || lun == -1) { - ISCSI_DBG_SESSION(conn->session, - "failing in progress sc %p itt 0x%x\n", - task->sc, task->itt); - fail_command(conn, task, error << 16); - } + ISCSI_DBG_SESSION(conn->session, + "failing sc %p itt 0x%x state %d\n", + task->sc, task->itt, task->state); + fail_scsi_task(task, error << 16); } } @@ -1859,7 +1870,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) } if (task->state == ISCSI_TASK_PENDING) { - fail_command(conn, task, DID_ABORT << 16); + fail_scsi_task(task, DID_ABORT << 16); goto success; } @@ -1890,7 +1901,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) * then sent more data for the cmd. */ spin_lock(&session->lock); - fail_command(conn, task, DID_ABORT << 16); + fail_scsi_task(task, DID_ABORT << 16); conn->tmf_state = TMF_INITIAL; spin_unlock(&session->lock); iscsi_start_tx(conn); @@ -1997,7 +2008,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc) iscsi_suspend_tx(conn); spin_lock_bh(&session->lock); - fail_all_commands(conn, sc->device->lun, DID_ERROR); + fail_scsi_tasks(conn, sc->device->lun, DID_ERROR); conn->tmf_state = TMF_INITIAL; spin_unlock_bh(&session->lock); @@ -2304,6 +2315,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost, if (cmd_task_size) task->dd_data = &task[1]; task->itt = cmd_i; + task->state = ISCSI_TASK_FREE; INIT_LIST_HEAD(&task->running); } @@ -2390,10 +2402,8 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, conn->transport_timer.data = (unsigned long)conn; conn->transport_timer.function = iscsi_check_transport_timeouts; - INIT_LIST_HEAD(&conn->run_list); - INIT_LIST_HEAD(&conn->mgmt_run_list); INIT_LIST_HEAD(&conn->mgmtqueue); - INIT_LIST_HEAD(&conn->xmitqueue); + INIT_LIST_HEAD(&conn->cmdqueue); INIT_LIST_HEAD(&conn->requeue); INIT_WORK(&conn->xmitwork, iscsi_xmitworker); @@ -2561,27 +2571,24 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn) EXPORT_SYMBOL_GPL(iscsi_conn_start); static void -flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn) +fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn) { - struct iscsi_task *task, *tmp; + struct iscsi_task *task; + int i; - /* handle pending */ - list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) { - ISCSI_DBG_SESSION(session, "flushing pending mgmt task " - "itt 0x%x\n", task->itt); - /* release ref from prep task */ - __iscsi_put_task(task); - } + for (i = 0; i < conn->session->cmds_max; i++) { + task = conn->session->cmds[i]; + if (task->sc) + continue; - /* handle running */ - list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) { - ISCSI_DBG_SESSION(session, "flushing running mgmt task " - "itt 0x%x\n", task->itt); - /* release ref from prep task */ - __iscsi_put_task(task); - } + if (task->state == ISCSI_TASK_FREE) + continue; - conn->task = NULL; + ISCSI_DBG_SESSION(conn->session, + "failing mgmt itt 0x%x state %d\n", + task->itt, task->state); + iscsi_complete_task(task); + } } static void iscsi_start_session_recovery(struct iscsi_session *session, @@ -2638,10 +2645,10 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, */ spin_lock_bh(&session->lock); if (flag == STOP_CONN_RECOVER) - fail_all_commands(conn, -1, DID_TRANSPORT_DISRUPTED); + fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED); else - fail_all_commands(conn, -1, DID_ERROR); - flush_control_queues(session, conn); + fail_scsi_tasks(conn, -1, DID_ERROR); + fail_mgmt_tasks(session, conn); spin_unlock_bh(&session->lock); mutex_unlock(&session->eh_mutex); } -- cgit v1.2.3 From 301e0f7e4d78e956c58b66888e134dbdb44ea28e Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 13 May 2009 17:57:47 -0500 Subject: [SCSI] libiscsi: don't let io sit in queue when session has failed If the session is failed, but we have not yet fully transitioned to the recovery stage we were still queueuing IO. The idea is that for some failures we can recvover at the command level and still continue to execute other IO. Well, we never have added the recovery within a command code, so queueing up IO here just creates the possibility that it might time time out so this just has us requeue the IO the scsi layer for now. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/libiscsi.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index a9d7e520e551..57eb3af40e96 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1390,13 +1390,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) goto fault; } - /* - * ISCSI_STATE_FAILED is a temp. state. The recovery - * code will decide what is best to do with command queued - * during this time - */ - if (session->state != ISCSI_STATE_LOGGED_IN && - session->state != ISCSI_STATE_FAILED) { + if (session->state != ISCSI_STATE_LOGGED_IN) { /* * to handle the race between when we set the recovery state * and block the session we requeue here (commands could @@ -1404,12 +1398,15 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) * up because the block code is not locked) */ switch (session->state) { + case ISCSI_STATE_FAILED: case ISCSI_STATE_IN_RECOVERY: reason = FAILURE_SESSION_IN_RECOVERY; - goto reject; + sc->result = DID_IMM_RETRY << 16; + break; case ISCSI_STATE_LOGGING_OUT: reason = FAILURE_SESSION_LOGGING_OUT; - goto reject; + sc->result = DID_IMM_RETRY << 16; + break; case ISCSI_STATE_RECOVERY_FAILED: reason = FAILURE_SESSION_RECOVERY_TIMEOUT; sc->result = DID_TRANSPORT_FAILFAST << 16; -- cgit v1.2.3 From 1336aed10b8af791378b017f0fa8da4e5b827b8d Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 13 May 2009 17:57:48 -0500 Subject: [SCSI] libiscsi: check if iscsi host has work queue before queueing work Instead of having libiscsi check if the offload bit is set, have it check if the lld created a work queue. I think this is more clear. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/libiscsi.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 57eb3af40e96..dafa054537f6 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -81,7 +81,8 @@ inline void iscsi_conn_queue_work(struct iscsi_conn *conn) struct Scsi_Host *shost = conn->session->host; struct iscsi_host *ihost = shost_priv(shost); - queue_work(ihost->workq, &conn->xmitwork); + if (ihost->workq) + queue_work(ihost->workq, &conn->xmitwork); } EXPORT_SYMBOL_GPL(iscsi_conn_queue_work); @@ -110,10 +111,8 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr) * xmit thread */ if (!list_empty(&session->leadconn->cmdqueue) || - !list_empty(&session->leadconn->mgmtqueue)) { - if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD)) - iscsi_conn_queue_work(session->leadconn); - } + !list_empty(&session->leadconn->mgmtqueue)) + iscsi_conn_queue_work(session->leadconn); } } EXPORT_SYMBOL_GPL(iscsi_update_cmdsn); @@ -555,6 +554,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, char *data, uint32_t data_size) { struct iscsi_session *session = conn->session; + struct iscsi_host *ihost = shost_priv(session->host); struct iscsi_task *task; itt_t itt; @@ -618,7 +618,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, task->conn->session->age); } - if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) { + if (!ihost->workq) { if (iscsi_prep_mgmt_task(conn, task)) goto free_task; @@ -1368,6 +1368,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) { struct iscsi_cls_session *cls_session; struct Scsi_Host *host; + struct iscsi_host *ihost; int reason = 0; struct iscsi_session *session; struct iscsi_conn *conn; @@ -1378,6 +1379,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) sc->SCp.ptr = NULL; host = sc->device->host; + ihost = shost_priv(host); spin_unlock(host->host_lock); cls_session = starget_to_session(scsi_target(sc->device)); @@ -1440,7 +1442,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) goto reject; } - if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) { + if (!ihost->workq) { reason = iscsi_prep_scsi_cmd_pdu(task); if (reason) { if (reason == -ENOMEM) { @@ -1673,7 +1675,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn) struct iscsi_host *ihost = shost_priv(shost); set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); - if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD)) + if (ihost->workq) flush_workqueue(ihost->workq); } EXPORT_SYMBOL_GPL(iscsi_suspend_tx); @@ -1681,8 +1683,7 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx); static void iscsi_start_tx(struct iscsi_conn *conn) { clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); - if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD)) - iscsi_conn_queue_work(conn); + iscsi_conn_queue_work(conn); } /* -- cgit v1.2.3 From b3cd5050bf8eb32ceecee129cac7c59e6f1668c4 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 13 May 2009 17:57:49 -0500 Subject: [SCSI] libiscsi: add task aborted state If a task did not complete normally due to a TMF, libiscsi will now complete the task with the state ISCSI_TASK_ABRT_TMF. Drivers like bnx2i that need to free resources if a command did not complete normally can then check the task state. If a driver does not need to send a special command if we have dropped the session then they can check for ISCSI_TASK_ABRT_SESS_RECOV. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/infiniband/ulp/iser/iscsi_iser.c | 7 ++-- drivers/scsi/libiscsi.c | 60 +++++++++++++++++++------------- drivers/scsi/libiscsi_tcp.c | 4 +-- 3 files changed, 39 insertions(+), 32 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index ffbe0c76bc11..0ba6ec876296 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -257,11 +257,8 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task) { struct iscsi_iser_task *iser_task = task->dd_data; - /* - * mgmt tasks do not need special cleanup and we do not - * allocate anything in the init task callout - */ - if (!task->sc || task->state == ISCSI_TASK_PENDING) + /* mgmt tasks do not need special cleanup */ + if (!task->sc) return; if (iser_task->status == ISER_TASK_STATUS_STARTED) { diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index dafa054537f6..b00be6c3efc1 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -443,18 +443,20 @@ EXPORT_SYMBOL_GPL(iscsi_put_task); /** * iscsi_complete_task - finish a task * @task: iscsi cmd task + * @state: state to complete task with * * Must be called with session lock. */ -static void iscsi_complete_task(struct iscsi_task *task) +static void iscsi_complete_task(struct iscsi_task *task, int state) { struct iscsi_conn *conn = task->conn; - if (task->state == ISCSI_TASK_COMPLETED) + if (task->state == ISCSI_TASK_COMPLETED || + task->state == ISCSI_TASK_ABRT_TMF || + task->state == ISCSI_TASK_ABRT_SESS_RECOV) return; WARN_ON_ONCE(task->state == ISCSI_TASK_FREE); - - task->state = ISCSI_TASK_COMPLETED; + task->state = state; if (!list_empty(&task->running)) list_del_init(&task->running); @@ -478,6 +480,7 @@ static void fail_scsi_task(struct iscsi_task *task, int err) { struct iscsi_conn *conn = task->conn; struct scsi_cmnd *sc; + int state; /* * if a command completes and we get a successful tmf response @@ -488,14 +491,20 @@ static void fail_scsi_task(struct iscsi_task *task, int err) if (!sc) return; - if (task->state == ISCSI_TASK_PENDING) + if (task->state == ISCSI_TASK_PENDING) { /* * cmd never made it to the xmit thread, so we should not count * the cmd in the sequencing */ conn->session->queued_cmdsn--; + /* it was never sent so just complete like normal */ + state = ISCSI_TASK_COMPLETED; + } else if (err == DID_TRANSPORT_DISRUPTED) + state = ISCSI_TASK_ABRT_SESS_RECOV; + else + state = ISCSI_TASK_ABRT_TMF; - sc->result = err; + sc->result = err << 16; if (!scsi_bidi_cmnd(sc)) scsi_set_resid(sc, scsi_bufflen(sc)); else { @@ -503,7 +512,7 @@ static void fail_scsi_task(struct iscsi_task *task, int err) scsi_in(sc)->resid = scsi_in(sc)->length; } - iscsi_complete_task(task); + iscsi_complete_task(task, state); } static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, @@ -731,7 +740,7 @@ out: ISCSI_DBG_SESSION(session, "cmd rsp done [sc %p res %d itt 0x%x]\n", sc, sc->result, task->itt); conn->scsirsp_pdus_cnt++; - iscsi_complete_task(task); + iscsi_complete_task(task, ISCSI_TASK_COMPLETED); } /** @@ -769,7 +778,7 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, "[sc %p res %d itt 0x%x]\n", sc, sc->result, task->itt); conn->scsirsp_pdus_cnt++; - iscsi_complete_task(task); + iscsi_complete_task(task, ISCSI_TASK_COMPLETED); } static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) @@ -990,7 +999,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, } iscsi_tmf_rsp(conn, hdr); - iscsi_complete_task(task); + iscsi_complete_task(task, ISCSI_TASK_COMPLETED); break; case ISCSI_OP_NOOP_IN: iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); @@ -1008,7 +1017,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, goto recv_pdu; mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout); - iscsi_complete_task(task); + iscsi_complete_task(task, ISCSI_TASK_COMPLETED); break; default: rc = ISCSI_ERR_BAD_OPCODE; @@ -1020,7 +1029,7 @@ out: recv_pdu: if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) rc = ISCSI_ERR_CONN_FAILED; - iscsi_complete_task(task); + iscsi_complete_task(task, ISCSI_TASK_COMPLETED); return rc; } EXPORT_SYMBOL_GPL(__iscsi_complete_pdu); @@ -1262,7 +1271,7 @@ check_mgmt: struct iscsi_task, running); list_del_init(&conn->task->running); if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { - fail_scsi_task(conn->task, DID_IMM_RETRY << 16); + fail_scsi_task(conn->task, DID_IMM_RETRY); continue; } rc = iscsi_prep_scsi_cmd_pdu(conn->task); @@ -1273,7 +1282,7 @@ check_mgmt: conn->task = NULL; goto again; } else - fail_scsi_task(conn->task, DID_ABORT << 16); + fail_scsi_task(conn->task, DID_ABORT); continue; } rc = iscsi_xmit_task(conn); @@ -1469,7 +1478,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) prepd_reject: sc->scsi_done = NULL; - iscsi_complete_task(task); + iscsi_complete_task(task, ISCSI_TASK_COMPLETED); reject: spin_unlock(&session->lock); ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n", @@ -1479,7 +1488,7 @@ reject: prepd_fault: sc->scsi_done = NULL; - iscsi_complete_task(task); + iscsi_complete_task(task, ISCSI_TASK_COMPLETED); fault: spin_unlock(&session->lock); ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n", @@ -1665,7 +1674,7 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun, ISCSI_DBG_SESSION(conn->session, "failing sc %p itt 0x%x state %d\n", task->sc, task->itt, task->state); - fail_scsi_task(task, error << 16); + fail_scsi_task(task, error); } } @@ -1868,7 +1877,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) } if (task->state == ISCSI_TASK_PENDING) { - fail_scsi_task(task, DID_ABORT << 16); + fail_scsi_task(task, DID_ABORT); goto success; } @@ -1899,7 +1908,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) * then sent more data for the cmd. */ spin_lock(&session->lock); - fail_scsi_task(task, DID_ABORT << 16); + fail_scsi_task(task, DID_ABORT); conn->tmf_state = TMF_INITIAL; spin_unlock(&session->lock); iscsi_start_tx(conn); @@ -2572,7 +2581,7 @@ static void fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn) { struct iscsi_task *task; - int i; + int i, state; for (i = 0; i < conn->session->cmds_max; i++) { task = conn->session->cmds[i]; @@ -2585,7 +2594,11 @@ fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn) ISCSI_DBG_SESSION(conn->session, "failing mgmt itt 0x%x state %d\n", task->itt, task->state); - iscsi_complete_task(task); + state = ISCSI_TASK_ABRT_SESS_RECOV; + if (task->state == ISCSI_TASK_PENDING) + state = ISCSI_TASK_COMPLETED; + iscsi_complete_task(task, state); + } } @@ -2642,10 +2655,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, * flush queues. */ spin_lock_bh(&session->lock); - if (flag == STOP_CONN_RECOVER) - fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED); - else - fail_scsi_tasks(conn, -1, DID_ERROR); + fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED); fail_mgmt_tasks(session, conn); spin_unlock_bh(&session->lock); mutex_unlock(&session->eh_mutex); diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index b84a1d853f29..2bc07090321d 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c @@ -440,8 +440,8 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task) struct iscsi_tcp_task *tcp_task = task->dd_data; struct iscsi_r2t_info *r2t; - /* nothing to do for mgmt or pending tasks */ - if (!task->sc || task->state == ISCSI_TASK_PENDING) + /* nothing to do for mgmt */ + if (!task->sc) return; /* flush task's r2t queues */ -- cgit v1.2.3 From 4421c9ebeeacf3d9c4e6aa558e1a777178e71add Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 13 May 2009 17:57:50 -0500 Subject: [SCSI] libiscsi: add debug printks for iscsi command completion path This patch just adds some debug statements for the abort and completion paths. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/libiscsi.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index b00be6c3efc1..59908aead531 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -393,10 +393,12 @@ static void iscsi_free_task(struct iscsi_task *task) struct iscsi_session *session = conn->session; struct scsi_cmnd *sc = task->sc; + ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n", + task->itt, task->state, task->sc); + session->tt->cleanup_task(task); task->state = ISCSI_TASK_FREE; task->sc = NULL; - /* * login task is preallocated so do not free */ @@ -451,6 +453,9 @@ static void iscsi_complete_task(struct iscsi_task *task, int state) { struct iscsi_conn *conn = task->conn; + ISCSI_DBG_SESSION(conn->session, + "complete task itt 0x%x state %d sc %p\n", + task->itt, task->state, task->sc); if (task->state == ISCSI_TASK_COMPLETED || task->state == ISCSI_TASK_ABRT_TMF || task->state == ISCSI_TASK_ABRT_SESS_RECOV) @@ -1836,6 +1841,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; + ISCSI_DBG_SESSION(session, "aborting sc %p\n", sc); + mutex_lock(&session->eh_mutex); spin_lock_bh(&session->lock); /* @@ -1858,6 +1865,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) sc->SCp.phase != session->age) { spin_unlock_bh(&session->lock); mutex_unlock(&session->eh_mutex); + ISCSI_DBG_SESSION(session, "failing abort due to dropped " + "session.\n"); return FAILED; } -- cgit v1.2.3 From f9da3be5afc08c40e7f7a395c8935d500a6898b1 Mon Sep 17 00:00:00 2001 From: Andy Yan Date: Thu, 14 May 2009 20:41:21 -0400 Subject: [SCSI] mvsas: remove all the casts from void * or to void * Signed-off-by: Ying Chu Signed-off-by: Andy Yan Signed-off-by: Ke Wei --- drivers/scsi/mvsas/mv_sas.c | 34 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index c05e4c05a414..0d2138641214 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -28,7 +28,7 @@ static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) { if (task->lldd_task) { struct mvs_slot_info *slot; - slot = (struct mvs_slot_info *) task->lldd_task; + slot = task->lldd_task; *tag = slot->slot_tag; return 1; } @@ -37,7 +37,7 @@ static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) void mvs_tag_clear(struct mvs_info *mvi, u32 tag) { - void *bitmap = (void *) &mvi->tags; + void *bitmap = &mvi->tags; clear_bit(tag, bitmap); } @@ -48,14 +48,14 @@ void mvs_tag_free(struct mvs_info *mvi, u32 tag) void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) { - void *bitmap = (void *) &mvi->tags; + void *bitmap = &mvi->tags; set_bit(tag, bitmap); } inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) { unsigned int index, tag; - void *bitmap = (void *) &mvi->tags; + void *bitmap = &mvi->tags; index = find_first_zero_bit(bitmap, mvi->tags_num); tag = index; @@ -591,8 +591,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, { struct sas_task *task = tei->task; struct domain_device *dev = task->dev; - struct mvs_device *mvi_dev = - (struct mvs_device *)dev->lldd_dev; + struct mvs_device *mvi_dev = dev->lldd_dev; struct mvs_cmd_hdr *hdr = tei->hdr; struct asd_sas_port *sas_port = dev->port; struct mvs_slot_info *slot; @@ -733,8 +732,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, struct mvs_cmd_hdr *hdr = tei->hdr; struct mvs_port *port = tei->port; struct domain_device *dev = task->dev; - struct mvs_device *mvi_dev = - (struct mvs_device *)dev->lldd_dev; + struct mvs_device *mvi_dev = dev->lldd_dev; struct asd_sas_port *sas_port = dev->port; struct mvs_slot_info *slot; void *buf_prd; @@ -894,7 +892,7 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, spin_lock_irqsave(&mvi->lock, flags); do { dev = t->dev; - mvi_dev = (struct mvs_device *)dev->lldd_dev; + mvi_dev = dev->lldd_dev; if (DEV_IS_GONE(mvi_dev)) { if (mvi_dev) mv_dprintk("device %d not ready.\n", @@ -987,7 +985,7 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, } slot->task = t; slot->port = tei.port; - t->lldd_task = (void *) slot; + t->lldd_task = slot; list_add_tail(&slot->entry, &tei.port->list); /* TODO: select normal or high priority */ spin_lock(&t->task_state_lock); @@ -1139,7 +1137,7 @@ static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf) if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01)) s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10); - return (void *)s; + return s; } static u32 mvs_is_sig_fis_received(u32 irq_status) @@ -1176,7 +1174,7 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) sas_phy->oob_mode = SATA_OOB_MODE; phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); - mvs_get_d2h_reg(mvi, i, (void *)id); + mvs_get_d2h_reg(mvi, i, id); } else { u32 tmp; dev_printk(KERN_DEBUG, mvi->dev, @@ -1317,7 +1315,7 @@ int mvs_dev_found_notify(struct domain_device *dev, int lock) res = -1; goto found_out; } - dev->lldd_dev = (void *)mvi_device; + dev->lldd_dev = mvi_device; mvi_device->dev_type = dev->dev_type; mvi_device->mvi_info = mvi; if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { @@ -1356,7 +1354,7 @@ int mvs_dev_found(struct domain_device *dev) void mvs_dev_gone_notify(struct domain_device *dev, int lock) { unsigned long flags = 0; - struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; + struct mvs_device *mvi_dev = dev->lldd_dev; struct mvs_info *mvi = mvi_dev->mvi_info; if (lock) @@ -1532,7 +1530,7 @@ int mvs_lu_reset(struct domain_device *dev, u8 *lun) unsigned long flags; int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED; struct mvs_tmf_task tmf_task; - struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev; + struct mvs_device * mvi_dev = dev->lldd_dev; struct mvs_info *mvi = mvi_dev->mvi_info; tmf_task.tmf = TMF_LU_RESET; @@ -1654,7 +1652,7 @@ int mvs_abort_task(struct sas_task *task) struct mvs_slot_info *slot; if (task->lldd_task) { - slot = (struct mvs_slot_info *)task->lldd_task; + slot = task->lldd_task; slot_no = (u32) (slot - mvi->slot_info); mvs_slot_complete(mvi, slot_no, 1); } @@ -1708,7 +1706,7 @@ int mvs_clear_task_set(struct domain_device *dev, u8 *lun) static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, u32 slot_idx, int err) { - struct mvs_device *mvi_dev = (struct mvs_device *)task->dev->lldd_dev; + struct mvs_device *mvi_dev = task->dev->lldd_dev; struct task_status_struct *tstat = &task->task_status; struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; int stat = SAM_GOOD; @@ -1785,7 +1783,7 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) return -1; tstat = &task->task_status; - mvi_dev = (struct mvs_device *)task->dev->lldd_dev; + mvi_dev = task->dev->lldd_dev; mvs_hba_cq_dump(mvi); -- cgit v1.2.3 From f0216ae9bd0ea8a45736f386a3b8058aefc64dd8 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Fri, 15 May 2009 13:18:14 +0200 Subject: [SCSI] zfcp: Use correct req_id for traces The zfcp traces used the fsf_req address in place of the req_id. Change this to save the correct req_id. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_dbf.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 0a1a5dd8d018..b99b87ce5a39 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -163,7 +163,7 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) } response->fsf_command = fsf_req->fsf_command; - response->fsf_reqid = (unsigned long)fsf_req; + response->fsf_reqid = fsf_req->req_id; response->fsf_seqno = fsf_req->seq_no; response->fsf_issued = fsf_req->issued; response->fsf_prot_status = qtcb->prefix.prot_status; @@ -737,7 +737,7 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) spin_lock_irqsave(&adapter->san_dbf_lock, flags); memset(r, 0, sizeof(*r)); strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE); - r->fsf_reqid = (unsigned long)fsf_req; + r->fsf_reqid = fsf_req->req_id; r->fsf_seqno = fsf_req->seq_no; r->s_id = fc_host_port_id(adapter->scsi_host); r->d_id = wka_port->d_id; @@ -773,7 +773,7 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) spin_lock_irqsave(&adapter->san_dbf_lock, flags); memset(r, 0, sizeof(*r)); strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE); - r->fsf_reqid = (unsigned long)fsf_req; + r->fsf_reqid = fsf_req->req_id; r->fsf_seqno = fsf_req->seq_no; r->s_id = wka_port->d_id; r->d_id = fc_host_port_id(adapter->scsi_host); @@ -803,7 +803,7 @@ static void zfcp_san_dbf_event_els(const char *tag, int level, spin_lock_irqsave(&adapter->san_dbf_lock, flags); memset(rec, 0, sizeof(*rec)); strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); - rec->fsf_reqid = (unsigned long)fsf_req; + rec->fsf_reqid = fsf_req->req_id; rec->fsf_seqno = fsf_req->seq_no; rec->s_id = s_id; rec->d_id = d_id; @@ -965,7 +965,7 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, ZFCP_DBF_SCSI_FCP_SNS_INFO); } - rec->fsf_reqid = (unsigned long)fsf_req; + rec->fsf_reqid = fsf_req->req_id; rec->fsf_seqno = fsf_req->seq_no; rec->fsf_issued = fsf_req->issued; } -- cgit v1.2.3 From 955a21555dc8dc4a81da7063d595cd7558f413ce Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Fri, 15 May 2009 13:18:15 +0200 Subject: [SCSI] zfcp: Update message and add description Update the newly introduced message for the boxed status to conform to match the style of s390 and zfcp messages. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_ccw.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index 733fe3bf6285..4f19b28e187f 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -176,8 +176,8 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event) "ccnoti4", NULL); break; case CIO_BOXED: - dev_warn(&adapter->ccw_device->dev, - "The ccw device did not respond in time.\n"); + dev_warn(&adapter->ccw_device->dev, "The FCP device " + "did not respond within the specified time\n"); zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL); break; } -- cgit v1.2.3 From a40a1bafe7da0afe61b1c20fc50e18c07ce724f9 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Fri, 15 May 2009 13:18:16 +0200 Subject: [SCSI] zfcp: Make queue_depth adjustable zfcp did always set the queue_depth for SCSI devices to 32, not allowing to change this. Introduce a kernel parameter zfcp.queue_depth and the change_queue_depth callback to allow changing the queue_depth when it is required. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_scsi.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index e8fbeaeb5fbf..7d0da230eb63 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -12,6 +12,10 @@ #include "zfcp_ext.h" #include +static unsigned int default_depth = 32; +module_param_named(queue_depth, default_depth, uint, 0600); +MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices"); + /* Find start of Sense Information in FCP response unit*/ char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu) { @@ -24,6 +28,12 @@ char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu) return fcp_sns_info_ptr; } +static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth) +{ + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); + return sdev->queue_depth; +} + static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) { struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; @@ -34,7 +44,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) static int zfcp_scsi_slave_configure(struct scsi_device *sdp) { if (sdp->tagged_supported) - scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, 32); + scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, default_depth); else scsi_adjust_queue_depth(sdp, 0, 1); return 0; @@ -647,6 +657,7 @@ struct zfcp_data zfcp_data = { .name = "zfcp", .module = THIS_MODULE, .proc_name = "zfcp", + .change_queue_depth = zfcp_scsi_change_queue_depth, .slave_alloc = zfcp_scsi_slave_alloc, .slave_configure = zfcp_scsi_slave_configure, .slave_destroy = zfcp_scsi_slave_destroy, -- cgit v1.2.3 From bc90c8632f63cd94246e3fb6f1b6d7ecba48f1a0 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Fri, 15 May 2009 13:18:17 +0200 Subject: [SCSI] zfcp: Remove unnecessary default case and assignments enum dma_data_direction only has the 4 values DMA_BIDIRECTIONAL, DMA_TO_DEVICE, DMA_FROM_DEVICE and DMA_NONE. No need to have the default case. While changing this, setup sbtype in one place to make sparse happy. The default value of retval is already -EIO, so remove the additional assignment for these two cases. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_fsf.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 74dee32afba8..8411730d44ba 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -2314,7 +2314,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, { struct zfcp_fsf_req *req; struct fcp_cmnd_iu *fcp_cmnd_iu; - unsigned int sbtype; + unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; int real_bytes, retval = -EIO; struct zfcp_adapter *adapter = unit->port->adapter; @@ -2356,11 +2356,9 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, switch (scsi_cmnd->sc_data_direction) { case DMA_NONE: req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; - sbtype = SBAL_FLAGS0_TYPE_READ; break; case DMA_FROM_DEVICE: req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ; - sbtype = SBAL_FLAGS0_TYPE_READ; fcp_cmnd_iu->rddata = 1; break; case DMA_TO_DEVICE: @@ -2369,8 +2367,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, fcp_cmnd_iu->wddata = 1; break; case DMA_BIDIRECTIONAL: - default: - retval = -EIO; goto failed_scsi_cmnd; } @@ -2394,9 +2390,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, scsi_sglist(scsi_cmnd), FSF_MAX_SBALS_PER_REQ); if (unlikely(real_bytes < 0)) { - if (req->sbal_number < FSF_MAX_SBALS_PER_REQ) - retval = -EIO; - else { + if (req->sbal_number >= FSF_MAX_SBALS_PER_REQ) { dev_err(&adapter->ccw_device->dev, "Oversize data package, unit 0x%016Lx " "on port 0x%016Lx closed\n", -- cgit v1.2.3 From dceab655d9f7d99881c2033c8ff4e1c7b444e104 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Fri, 15 May 2009 13:18:18 +0200 Subject: [SCSI] zfcp: Add comments to switch/case fallthroughs Add comments where there is a deliberate fall through in switch/case statements. This makes some code checkers happy and makes it clear that there is no missing break statement. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_erp.c | 1 + drivers/s390/scsi/zfcp_fsf.c | 5 +++++ 2 files changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index fdc9b4352a64..3f64aded6caa 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -880,6 +880,7 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) zfcp_port_put(port); return ZFCP_ERP_CONTINUES; } + /* fall through */ case ZFCP_ERP_STEP_NAMESERVER_LOOKUP: if (!port->d_id) return ZFCP_ERP_FAILED; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 8411730d44ba..b550c249389d 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -526,6 +526,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) break; case FSF_TOPO_AL: fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; + /* fall through */ default: dev_err(&adapter->ccw_device->dev, "Unknown or unsupported arbitrated loop " @@ -897,6 +898,7 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) switch (fsq->word[0]) { case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: zfcp_test_link(unit->port); + /* fall through */ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; @@ -993,6 +995,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) break; case FSF_PORT_HANDLE_NOT_VALID: zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req); + /* fall through */ case FSF_GENERIC_COMMAND_REJECTED: case FSF_PAYLOAD_SIZE_MISMATCH: case FSF_REQUEST_SIZE_TOO_LARGE: @@ -1590,8 +1593,10 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: dev_warn(&req->adapter->ccw_device->dev, "Opening WKA port 0x%x failed\n", wka_port->d_id); + /* fall through */ case FSF_ADAPTER_STATUS_AVAILABLE: req->status |= ZFCP_STATUS_FSFREQ_ERROR; + /* fall through */ case FSF_ACCESS_DENIED: wka_port->status = ZFCP_WKA_PORT_OFFLINE; break; -- cgit v1.2.3 From a17c5855643afa7838f542cbd0a1ed9a73968cef Mon Sep 17 00:00:00 2001 From: Martin Petermann Date: Fri, 15 May 2009 13:18:19 +0200 Subject: [SCSI] zfcp: Increase ref counter for port open requests In rare cases, open port request might timeout, erp calls zfcp_port_put, port gets dequeued. Now, the late returning (or dismissed) fsf-port-open calls the fsf_port_open_handler that tries to reference the port data structure leading to a kernel oops. Signed-off-by: Martin Petermann Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_fsf.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index b550c249389d..6fae2688fede 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -1402,7 +1402,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) struct fsf_plogi *plogi; if (req->status & ZFCP_STATUS_FSFREQ_ERROR) - return; + goto out; switch (header->fsf_status) { case FSF_PORT_ALREADY_OPEN: @@ -1464,6 +1464,9 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; } + +out: + zfcp_port_put(port); } /** @@ -1476,6 +1479,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) struct qdio_buffer_element *sbale; struct zfcp_adapter *adapter = erp_action->adapter; struct zfcp_fsf_req *req; + struct zfcp_port *port = erp_action->port; int retval = -EIO; spin_lock_bh(&adapter->req_q_lock); @@ -1496,16 +1500,18 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; req->handler = zfcp_fsf_open_port_handler; - req->qtcb->bottom.support.d_id = erp_action->port->d_id; - req->data = erp_action->port; + req->qtcb->bottom.support.d_id = port->d_id; + req->data = port; req->erp_action = erp_action; erp_action->fsf_req = req; + zfcp_port_get(port); zfcp_fsf_start_erp_timer(req); retval = zfcp_fsf_req_send(req); if (retval) { zfcp_fsf_req_free(req); erp_action->fsf_req = NULL; + zfcp_port_put(port); } out: spin_unlock_bh(&adapter->req_q_lock); -- cgit v1.2.3 From ea460a81919f2b3410e7fb30183c0256d068d87a Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Fri, 15 May 2009 13:18:20 +0200 Subject: [SCSI] zfcp: Changed D_ID left port disabled If the destination ID (D_ID) of a remote storage port changed, e.g. re-plugged cable on the switch in a different switch port, the port was never (re-)attached within Linux. This patch fixes the broken mapping between the WWPN and the D_ID. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_erp.c | 7 ++++++- drivers/s390/scsi/zfcp_fc.c | 7 ++++++- 2 files changed, 12 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 3f64aded6caa..e50ea465bc2b 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -895,8 +895,13 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) act->step = ZFCP_ERP_STEP_PORT_CLOSING; return ZFCP_ERP_CONTINUES; } - /* fall through otherwise */ } + if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) { + port->d_id = 0; + _zfcp_erp_port_reopen(port, 0, "erpsoc1", NULL); + return ZFCP_ERP_EXIT; + } + /* fall through otherwise */ } return ZFCP_ERP_FAILED; } diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 19ae0842047c..bb2752b4130f 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -150,9 +150,14 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, struct zfcp_port *port; read_lock_irqsave(&zfcp_data.config_lock, flags); - list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) + list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) { if ((port->d_id & range) == (elem->nport_did & range)) zfcp_test_link(port); + if (!port->d_id) + zfcp_erp_port_reopen(port, + ZFCP_STATUS_COMMON_ERP_FAILED, + "fcrscn1", NULL); + } read_unlock_irqrestore(&zfcp_data.config_lock, flags); } -- cgit v1.2.3 From 6fcf41d1d8796f41b893754324704e23971c2ea1 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Fri, 15 May 2009 13:18:21 +0200 Subject: [SCSI] zfcp: Keep ccw device and model id in zfcp_ccw.c Keep the information about the device and model id in zfcp_ccw. This requires an additional helper function to check for the privileged cfdc subchannel, but it allows the removal of the redundant defines from the zfcp_def header file. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_ccw.c | 26 ++++++++++++++++++-------- drivers/s390/scsi/zfcp_def.h | 7 ------- drivers/s390/scsi/zfcp_ext.h | 1 + drivers/s390/scsi/zfcp_fsf.c | 2 +- 4 files changed, 20 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index 4f19b28e187f..b2fe5cdbcaee 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -11,6 +11,24 @@ #include "zfcp_ext.h" +#define ZFCP_MODEL_PRIV 0x4 + +static struct ccw_device_id zfcp_ccw_device_id[] = { + { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) }, + { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, ZFCP_MODEL_PRIV) }, + {}, +}; +MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id); + +/** + * zfcp_ccw_priv_sch - check if subchannel is privileged + * @adapter: Adapter/Subchannel to check + */ +int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter) +{ + return adapter->ccw_device->id.dev_model == ZFCP_MODEL_PRIV; +} + /** * zfcp_ccw_probe - probe function of zfcp driver * @ccw_device: pointer to belonging ccw device @@ -199,14 +217,6 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev) up(&zfcp_data.config_sema); } -static struct ccw_device_id zfcp_ccw_device_id[] = { - { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) }, - { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x4) }, /* priv. */ - {}, -}; - -MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id); - static struct ccw_driver zfcp_ccw_driver = { .owner = THIS_MODULE, .name = "zfcp", diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 4c362a9069f0..2074d45dbf6c 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -47,13 +47,6 @@ /********************* CIO/QDIO SPECIFIC DEFINES *****************************/ -/* Adapter Identification Parameters */ -#define ZFCP_CONTROL_UNIT_TYPE 0x1731 -#define ZFCP_CONTROL_UNIT_MODEL 0x03 -#define ZFCP_DEVICE_TYPE 0x1732 -#define ZFCP_DEVICE_MODEL 0x03 -#define ZFCP_DEVICE_MODEL_PRIV 0x04 - /* DMQ bug workaround: don't use last SBALE */ #define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1) diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 2e31b536548c..120a9a1c81f7 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -27,6 +27,7 @@ extern int zfcp_sg_setup_table(struct scatterlist *, int); /* zfcp_ccw.c */ extern int zfcp_ccw_register(void); +extern int zfcp_ccw_priv_sch(struct zfcp_adapter *); extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *); /* zfcp_cfdc.c */ diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 6fae2688fede..e6dae3744e79 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -1887,7 +1887,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req) if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) && (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) && - (adapter->ccw_device->id.dev_model != ZFCP_DEVICE_MODEL_PRIV)) { + !zfcp_ccw_priv_sch(adapter)) { exclusive = (bottom->lun_access_info & FSF_UNIT_ACCESS_EXCLUSIVE); readwrite = (bottom->lun_access_info & -- cgit v1.2.3 From 3c621b3ee1432e7a2aca4a3b670b1d05f19ecf9c Mon Sep 17 00:00:00 2001 From: Eric Moore Date: Mon, 18 May 2009 12:59:41 -0600 Subject: [SCSI] mpt2sas: T10 DIF Support This add support for type 1 and 3 DIF support per the Oracle API. Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.h | 1 + drivers/scsi/mpt2sas/mpt2sas_scsih.c | 118 +++++++++++++++++++++++++++++++++++ 2 files changed, 119 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 36b1d1052ba1..aba81fb320b7 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -61,6 +61,7 @@ #include #include #include +#include #include "mpt2sas_debug.h" diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index e3a7967259e7..5394f6196416 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -2389,6 +2389,106 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) } } +/** + * _scsih_setup_eedp - setup MPI request for EEDP transfer + * @scmd: pointer to scsi command object + * @mpi_request: pointer to the SCSI_IO reqest message frame + * + * Supporting protection 1 and 3. + * + * Returns nothing + */ +static void +_scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request) +{ + u16 eedp_flags; + unsigned char prot_op = scsi_get_prot_op(scmd); + unsigned char prot_type = scsi_get_prot_type(scmd); + + if (prot_type == SCSI_PROT_DIF_TYPE0 || + prot_type == SCSI_PROT_DIF_TYPE2 || + prot_op == SCSI_PROT_NORMAL) + return; + + if (prot_op == SCSI_PROT_READ_STRIP) + eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP; + else if (prot_op == SCSI_PROT_WRITE_INSERT) + eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; + else + return; + + mpi_request->EEDPBlockSize = scmd->device->sector_size; + + switch (prot_type) { + case SCSI_PROT_DIF_TYPE1: + + /* + * enable ref/guard checking + * auto increment ref tag + */ + mpi_request->EEDPFlags = eedp_flags | + MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | + MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | + MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; + mpi_request->CDB.EEDP32.PrimaryReferenceTag = + cpu_to_be32(scsi_get_lba(scmd)); + + break; + + case SCSI_PROT_DIF_TYPE3: + + /* + * enable guard checking + */ + mpi_request->EEDPFlags = eedp_flags | + MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; + + break; + } +} + +/** + * _scsih_eedp_error_handling - return sense code for EEDP errors + * @scmd: pointer to scsi command object + * @ioc_status: ioc status + * + * Returns nothing + */ +static void +_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) +{ + u8 ascq; + u8 sk; + u8 host_byte; + + switch (ioc_status) { + case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: + ascq = 0x01; + break; + case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: + ascq = 0x02; + break; + case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: + ascq = 0x03; + break; + default: + ascq = 0x00; + break; + } + + if (scmd->sc_data_direction == DMA_TO_DEVICE) { + sk = ILLEGAL_REQUEST; + host_byte = DID_ABORT; + } else { + sk = ABORTED_COMMAND; + host_byte = DID_OK; + } + + scsi_build_sense_buffer(0, scmd->sense_buffer, sk, 0x10, ascq); + scmd->result = DRIVER_SENSE << 24 | (host_byte << 16) | + SAM_STAT_CHECK_CONDITION; +} + /** * scsih_qcmd - main scsi request entry point * @scmd: pointer to scsi command object @@ -2470,6 +2570,7 @@ scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) } mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t)); + _scsih_setup_eedp(scmd, mpi_request); mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; if (sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) @@ -2604,6 +2705,15 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: desc_ioc_state = "scsi ext terminated"; break; + case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: + desc_ioc_state = "eedp guard error"; + break; + case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: + desc_ioc_state = "eedp ref tag error"; + break; + case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: + desc_ioc_state = "eedp app tag error"; + break; default: desc_ioc_state = "unknown"; break; @@ -2939,6 +3049,11 @@ scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) scmd->result = DID_RESET << 16; break; + case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: + case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: + case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: + _scsih_eedp_error_handling(scmd, ioc_status); + break; case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: case MPI2_IOCSTATUS_INVALID_FUNCTION: case MPI2_IOCSTATUS_INVALID_SGL: @@ -5503,6 +5618,9 @@ scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto out_add_shost_fail; } + scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION + | SHOST_DIF_TYPE3_PROTECTION); + /* event thread */ snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), "fw_event%d", ioc->id); -- cgit v1.2.3 From 993e0da7b767c0a7c1fd0079b16f3d28e6f25a48 Mon Sep 17 00:00:00 2001 From: Eric Moore Date: Mon, 18 May 2009 13:00:45 -0600 Subject: [SCSI] mpt2sas: LUN Reset Support Adding new eh_target_reset_handler for target reset. Change the eh_device_reset_handler so its sending MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, instead of MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET. Add new function _scsih_scsi_lookup_find_by_lun as a sanity check to insure I_T_L commands are completed upon completing lun reset. Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 111 ++++++++++++++++++++++++++++++++++- 1 file changed, 109 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 5394f6196416..f45837630b24 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -883,6 +883,41 @@ _scsih_scsi_lookup_find_by_target(struct MPT2SAS_ADAPTER *ioc, int id, return found; } +/** + * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun + * @ioc: per adapter object + * @id: target id + * @lun: lun number + * @channel: channel + * Context: This function will acquire ioc->scsi_lookup_lock. + * + * This will search for a matching channel:id:lun in the scsi_lookup array, + * returning 1 if found. + */ +static u8 +_scsih_scsi_lookup_find_by_lun(struct MPT2SAS_ADAPTER *ioc, int id, + unsigned int lun, int channel) +{ + u8 found; + unsigned long flags; + int i; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + found = 0; + for (i = 0 ; i < ioc->request_depth; i++) { + if (ioc->scsi_lookup[i].scmd && + (ioc->scsi_lookup[i].scmd->device->id == id && + ioc->scsi_lookup[i].scmd->device->channel == channel && + ioc->scsi_lookup[i].scmd->device->lun == lun)) { + found = 1; + goto out; + } + } + out: + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return found; +} + /** * _scsih_get_chain_buffer_dma - obtain block of chains (dma address) * @ioc: per adapter object @@ -1889,7 +1924,6 @@ scsih_abort(struct scsi_cmnd *scmd) return r; } - /** * scsih_dev_reset - eh threads main device reset routine * @sdev: scsi device struct @@ -1906,7 +1940,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd) u16 handle; int r; - printk(MPT2SAS_INFO_FMT "attempting target reset! scmd(%p)\n", + printk(MPT2SAS_INFO_FMT "attempting device reset! scmd(%p)\n", ioc->name, scmd); scsi_print_command(scmd); @@ -1939,6 +1973,78 @@ scsih_dev_reset(struct scsi_cmnd *scmd) goto out; } + mutex_lock(&ioc->tm_cmds.mutex); + mpt2sas_scsih_issue_tm(ioc, handle, 0, + MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, scmd->device->lun, + 30); + + /* + * sanity check see whether all commands to this device been + * completed + */ + if (_scsih_scsi_lookup_find_by_lun(ioc, scmd->device->id, + scmd->device->lun, scmd->device->channel)) + r = FAILED; + else + r = SUCCESS; + ioc->tm_cmds.status = MPT2_CMD_NOT_USED; + mutex_unlock(&ioc->tm_cmds.mutex); + + out: + printk(MPT2SAS_INFO_FMT "device reset: %s scmd(%p)\n", + ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + return r; +} + +/** + * scsih_target_reset - eh threads main target reset routine + * @sdev: scsi device struct + * + * Returns SUCCESS if command aborted else FAILED + */ +static int +scsih_target_reset(struct scsi_cmnd *scmd) +{ + struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + struct MPT2SAS_DEVICE *sas_device_priv_data; + struct _sas_device *sas_device; + unsigned long flags; + u16 handle; + int r; + + printk(MPT2SAS_INFO_FMT "attempting target reset! scmd(%p)\n", + ioc->name, scmd); + scsi_print_command(scmd); + + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + printk(MPT2SAS_INFO_FMT "target been deleted! scmd(%p)\n", + ioc->name, scmd); + scmd->result = DID_NO_CONNECT << 16; + scmd->scsi_done(scmd); + r = SUCCESS; + goto out; + } + + /* for hidden raid components obtain the volume_handle */ + handle = 0; + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = _scsih_sas_device_find_by_handle(ioc, + sas_device_priv_data->sas_target->handle); + if (sas_device) + handle = sas_device->volume_handle; + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } else + handle = sas_device_priv_data->sas_target->handle; + + if (!handle) { + scmd->result = DID_RESET << 16; + r = FAILED; + goto out; + } + mutex_lock(&ioc->tm_cmds.mutex); mpt2sas_scsih_issue_tm(ioc, handle, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30); @@ -5255,6 +5361,7 @@ static struct scsi_host_template scsih_driver_template = { .change_queue_type = scsih_change_queue_type, .eh_abort_handler = scsih_abort, .eh_device_reset_handler = scsih_dev_reset, + .eh_target_reset_handler = scsih_target_reset, .eh_host_reset_handler = scsih_host_reset, .bios_param = scsih_bios_param, .can_queue = 1, -- cgit v1.2.3 From ddf59a35e98aa12255ed64c892271339504cc65c Mon Sep 17 00:00:00 2001 From: Eric Moore Date: Mon, 18 May 2009 13:01:29 -0600 Subject: [SCSI] mpt2sas: add query task support for MPT2COMMAND ioctl This patch will find an active mid for a query_task request via the ioctl path. This code is already there for task_abort, so this patch combining code using the same fuction _ctl_set_task_mid(), previously _ctl_do_task_abort(). Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_ctl.c | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index ba6ab170bdf0..14e473d1fa7b 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c @@ -473,7 +473,7 @@ _ctl_poll(struct file *filep, poll_table *wait) } /** - * _ctl_do_task_abort - assign an active smid to the abort_task + * _ctl_set_task_mid - assign an active smid to tm request * @ioc: per adapter object * @karg - (struct mpt2_ioctl_command) * @tm_request - pointer to mf from user space @@ -482,7 +482,7 @@ _ctl_poll(struct file *filep, poll_table *wait) * during failure, the reply frame is filled. */ static int -_ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg, +_ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg, Mpi2SCSITaskManagementRequest_t *tm_request) { u8 found = 0; @@ -494,6 +494,14 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg, Mpi2SCSITaskManagementReply_t *tm_reply; u32 sz; u32 lun; + char *desc = NULL; + + if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) + desc = "abort_task"; + else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) + desc = "query_task"; + else + return 0; lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); @@ -517,13 +525,13 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg, spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); if (!found) { - dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: " - "DevHandle(0x%04x), lun(%d), no active mid!!\n", ioc->name, - tm_request->DevHandle, lun)); + dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " + "handle(0x%04x), lun(%d), no active mid!!\n", ioc->name, + desc, tm_request->DevHandle, lun)); tm_reply = ioc->ctl_cmds.reply; tm_reply->DevHandle = tm_request->DevHandle; tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; - tm_reply->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK; + tm_reply->TaskType = tm_request->TaskType; tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4; tm_reply->VP_ID = tm_request->VP_ID; tm_reply->VF_ID = tm_request->VF_ID; @@ -535,9 +543,9 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg, return 1; } - dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: " - "DevHandle(0x%04x), lun(%d), smid(%d)\n", ioc->name, - tm_request->DevHandle, lun, tm_request->TaskMID)); + dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " + "handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name, + desc, tm_request->DevHandle, lun, tm_request->TaskMID)); return 0; } @@ -739,8 +747,10 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, (Mpi2SCSITaskManagementRequest_t *)mpi_request; if (tm_request->TaskType == - MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { - if (_ctl_do_task_abort(ioc, &karg, tm_request)) { + MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || + tm_request->TaskType == + MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) { + if (_ctl_set_task_mid(ioc, &karg, tm_request)) { mpt2sas_base_free_smid(ioc, smid); goto out; } -- cgit v1.2.3 From d5d135b3a76750df250e18956476b6b2c4ad97c2 Mon Sep 17 00:00:00 2001 From: Eric Moore Date: Mon, 18 May 2009 13:02:08 -0600 Subject: [SCSI] mpt2sas: using the same naming convention for all static function This fix's is for all local function so their name has the "_" preceeding the module name, then function name. Most the code is already is using this naming convention. Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 142 +++++++++++++++---------------- drivers/scsi/mpt2sas/mpt2sas_transport.c | 36 ++++---- 2 files changed, 89 insertions(+), 89 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index f45837630b24..2a01a5f2a84d 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -197,12 +197,12 @@ static struct pci_device_id scsih_pci_table[] = { MODULE_DEVICE_TABLE(pci, scsih_pci_table); /** - * scsih_set_debug_level - global setting of ioc->logging_level. + * _scsih_set_debug_level - global setting of ioc->logging_level. * * Note: The logging levels are defined in mpt2sas_debug.h. */ static int -scsih_set_debug_level(const char *val, struct kernel_param *kp) +_scsih_set_debug_level(const char *val, struct kernel_param *kp) { int ret = param_set_int(val, kp); struct MPT2SAS_ADAPTER *ioc; @@ -215,7 +215,7 @@ scsih_set_debug_level(const char *val, struct kernel_param *kp) ioc->logging_level = logging_level; return 0; } -module_param_call(logging_level, scsih_set_debug_level, param_get_int, +module_param_call(logging_level, _scsih_set_debug_level, param_get_int, &logging_level, 0644); /** @@ -1082,14 +1082,14 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc, } /** - * scsih_change_queue_depth - setting device queue depth + * _scsih_change_queue_depth - setting device queue depth * @sdev: scsi device struct * @qdepth: requested queue depth * * Returns queue depth. */ static int -scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) +_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) { struct Scsi_Host *shost = sdev->host; int max_depth; @@ -1114,14 +1114,14 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) } /** - * scsih_change_queue_depth - changing device queue tag type + * _scsih_change_queue_depth - changing device queue tag type * @sdev: scsi device struct * @tag_type: requested tag type * * Returns queue tag type. */ static int -scsih_change_queue_type(struct scsi_device *sdev, int tag_type) +_scsih_change_queue_type(struct scsi_device *sdev, int tag_type) { if (sdev->tagged_supported) { scsi_set_tag_type(sdev, tag_type); @@ -1136,14 +1136,14 @@ scsih_change_queue_type(struct scsi_device *sdev, int tag_type) } /** - * scsih_target_alloc - target add routine + * _scsih_target_alloc - target add routine * @starget: scsi target struct * * Returns 0 if ok. Any other return is assumed to be an error and * the device is ignored. */ static int -scsih_target_alloc(struct scsi_target *starget) +_scsih_target_alloc(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(&starget->dev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); @@ -1198,13 +1198,13 @@ scsih_target_alloc(struct scsi_target *starget) } /** - * scsih_target_destroy - target destroy routine + * _scsih_target_destroy - target destroy routine * @starget: scsi target struct * * Returns nothing. */ static void -scsih_target_destroy(struct scsi_target *starget) +_scsih_target_destroy(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(&starget->dev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); @@ -1247,14 +1247,14 @@ scsih_target_destroy(struct scsi_target *starget) } /** - * scsih_slave_alloc - device add routine + * _scsih_slave_alloc - device add routine * @sdev: scsi device struct * * Returns 0 if ok. Any other return is assumed to be an error and * the device is ignored. */ static int -scsih_slave_alloc(struct scsi_device *sdev) +_scsih_slave_alloc(struct scsi_device *sdev) { struct Scsi_Host *shost; struct MPT2SAS_ADAPTER *ioc; @@ -1308,13 +1308,13 @@ scsih_slave_alloc(struct scsi_device *sdev) } /** - * scsih_slave_destroy - device destroy routine + * _scsih_slave_destroy - device destroy routine * @sdev: scsi device struct * * Returns nothing. */ static void -scsih_slave_destroy(struct scsi_device *sdev) +_scsih_slave_destroy(struct scsi_device *sdev) { struct MPT2SAS_TARGET *sas_target_priv_data; struct scsi_target *starget; @@ -1330,13 +1330,13 @@ scsih_slave_destroy(struct scsi_device *sdev) } /** - * scsih_display_sata_capabilities - sata capabilities + * _scsih_display_sata_capabilities - sata capabilities * @ioc: per adapter object * @sas_device: the sas_device object * @sdev: scsi device struct */ static void -scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc, +_scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc, struct _sas_device *sas_device, struct scsi_device *sdev) { Mpi2ConfigReply_t mpi_reply; @@ -1436,14 +1436,14 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc, } /** - * scsih_slave_configure - device configure routine. + * _scsih_slave_configure - device configure routine. * @sdev: scsi device struct * * Returns 0 if ok. Any other return is assumed to be an error and * the device is ignored. */ static int -scsih_slave_configure(struct scsi_device *sdev) +_scsih_slave_configure(struct scsi_device *sdev) { struct Scsi_Host *shost = sdev->host; struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); @@ -1524,7 +1524,7 @@ scsih_slave_configure(struct scsi_device *sdev) r_level, raid_device->handle, (unsigned long long)raid_device->wwid, raid_device->num_pds, ds); - scsih_change_queue_depth(sdev, qdepth); + _scsih_change_queue_depth(sdev, qdepth); return 0; } @@ -1567,10 +1567,10 @@ scsih_slave_configure(struct scsi_device *sdev) sas_device->slot); if (!ssp_target) - scsih_display_sata_capabilities(ioc, sas_device, sdev); + _scsih_display_sata_capabilities(ioc, sas_device, sdev); } - scsih_change_queue_depth(sdev, qdepth); + _scsih_change_queue_depth(sdev, qdepth); if (ssp_target) sas_read_port_mode_page(sdev); @@ -1578,7 +1578,7 @@ scsih_slave_configure(struct scsi_device *sdev) } /** - * scsih_bios_param - fetch head, sector, cylinder info for a disk + * _scsih_bios_param - fetch head, sector, cylinder info for a disk * @sdev: scsi device struct * @bdev: pointer to block device context * @capacity: device size (in 512 byte sectors) @@ -1590,7 +1590,7 @@ scsih_slave_configure(struct scsi_device *sdev) * Return nothing. */ static int -scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, +_scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int params[]) { int heads; @@ -1671,7 +1671,7 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code) } /** - * scsih_tm_done - tm completion routine + * _scsih_tm_done - tm completion routine * @ioc: per adapter object * @smid: system request message index * @VF_ID: virtual function id @@ -1683,7 +1683,7 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code) * Return nothing. */ static void -scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) +_scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) { MPI2DefaultReply_t *mpi_reply; @@ -1858,13 +1858,13 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun, } /** - * scsih_abort - eh threads main abort routine + * _scsih_abort - eh threads main abort routine * @sdev: scsi device struct * * Returns SUCCESS if command aborted else FAILED */ static int -scsih_abort(struct scsi_cmnd *scmd) +_scsih_abort(struct scsi_cmnd *scmd) { struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); struct MPT2SAS_DEVICE *sas_device_priv_data; @@ -1925,13 +1925,13 @@ scsih_abort(struct scsi_cmnd *scmd) } /** - * scsih_dev_reset - eh threads main device reset routine + * _scsih_dev_reset - eh threads main device reset routine * @sdev: scsi device struct * * Returns SUCCESS if command aborted else FAILED */ static int -scsih_dev_reset(struct scsi_cmnd *scmd) +_scsih_dev_reset(struct scsi_cmnd *scmd) { struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); struct MPT2SAS_DEVICE *sas_device_priv_data; @@ -1997,13 +1997,13 @@ scsih_dev_reset(struct scsi_cmnd *scmd) } /** - * scsih_target_reset - eh threads main target reset routine + * _scsih_target_reset - eh threads main target reset routine * @sdev: scsi device struct * * Returns SUCCESS if command aborted else FAILED */ static int -scsih_target_reset(struct scsi_cmnd *scmd) +_scsih_target_reset(struct scsi_cmnd *scmd) { struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); struct MPT2SAS_DEVICE *sas_device_priv_data; @@ -2068,13 +2068,13 @@ scsih_target_reset(struct scsi_cmnd *scmd) } /** - * scsih_abort - eh threads main host reset routine + * _scsih_abort - eh threads main host reset routine * @sdev: scsi device struct * * Returns SUCCESS if command aborted else FAILED */ static int -scsih_host_reset(struct scsi_cmnd *scmd) +_scsih_host_reset(struct scsi_cmnd *scmd) { struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); int r, retval; @@ -2596,7 +2596,7 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) } /** - * scsih_qcmd - main scsi request entry point + * _scsih_qcmd - main scsi request entry point * @scmd: pointer to scsi command object * @done: function pointer to be invoked on completion * @@ -2607,7 +2607,7 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full */ static int -scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) +_scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) { struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); struct MPT2SAS_DEVICE *sas_device_priv_data; @@ -2999,7 +2999,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle) } /** - * scsih_io_done - scsi request callback + * _scsih_io_done - scsi request callback * @ioc: per adapter object * @smid: system request message index * @VF_ID: virtual function id @@ -3010,7 +3010,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle) * Return nothing. */ static void -scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) +_scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) { Mpi2SCSIIORequest_t *mpi_request; Mpi2SCSIIOReply_t *mpi_reply; @@ -5351,19 +5351,19 @@ static struct scsi_host_template scsih_driver_template = { .module = THIS_MODULE, .name = "Fusion MPT SAS Host", .proc_name = MPT2SAS_DRIVER_NAME, - .queuecommand = scsih_qcmd, - .target_alloc = scsih_target_alloc, - .slave_alloc = scsih_slave_alloc, - .slave_configure = scsih_slave_configure, - .target_destroy = scsih_target_destroy, - .slave_destroy = scsih_slave_destroy, - .change_queue_depth = scsih_change_queue_depth, - .change_queue_type = scsih_change_queue_type, - .eh_abort_handler = scsih_abort, - .eh_device_reset_handler = scsih_dev_reset, - .eh_target_reset_handler = scsih_target_reset, - .eh_host_reset_handler = scsih_host_reset, - .bios_param = scsih_bios_param, + .queuecommand = _scsih_qcmd, + .target_alloc = _scsih_target_alloc, + .slave_alloc = _scsih_slave_alloc, + .slave_configure = _scsih_slave_configure, + .target_destroy = _scsih_target_destroy, + .slave_destroy = _scsih_slave_destroy, + .change_queue_depth = _scsih_change_queue_depth, + .change_queue_type = _scsih_change_queue_type, + .eh_abort_handler = _scsih_abort, + .eh_device_reset_handler = _scsih_dev_reset, + .eh_target_reset_handler = _scsih_target_reset, + .eh_host_reset_handler = _scsih_host_reset, + .bios_param = _scsih_bios_param, .can_queue = 1, .this_id = -1, .sg_tablesize = MPT2SAS_SG_DEPTH, @@ -5450,13 +5450,13 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc, } /** - * scsih_remove - detach and remove add host + * _scsih_remove - detach and remove add host * @pdev: PCI device struct * * Return nothing. */ static void __devexit -scsih_remove(struct pci_dev *pdev) +_scsih_remove(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); @@ -5664,14 +5664,14 @@ _scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc) } /** - * scsih_probe - attach and add scsi host + * _scsih_probe - attach and add scsi host * @pdev: PCI device struct * @id: pci device id * * Returns 0 success, anything else error. */ static int -scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) +_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct MPT2SAS_ADAPTER *ioc; struct Scsi_Host *shost; @@ -5761,14 +5761,14 @@ scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) #ifdef CONFIG_PM /** - * scsih_suspend - power management suspend main entry point + * _scsih_suspend - power management suspend main entry point * @pdev: PCI device struct * @state: PM state change to (usually PCI_D3) * * Returns 0 success, anything else error. */ static int -scsih_suspend(struct pci_dev *pdev, pm_message_t state) +_scsih_suspend(struct pci_dev *pdev, pm_message_t state) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); @@ -5789,13 +5789,13 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state) } /** - * scsih_resume - power management resume main entry point + * _scsih_resume - power management resume main entry point * @pdev: PCI device struct * * Returns 0 success, anything else error. */ static int -scsih_resume(struct pci_dev *pdev) +_scsih_resume(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); @@ -5824,22 +5824,22 @@ scsih_resume(struct pci_dev *pdev) static struct pci_driver scsih_driver = { .name = MPT2SAS_DRIVER_NAME, .id_table = scsih_pci_table, - .probe = scsih_probe, - .remove = __devexit_p(scsih_remove), + .probe = _scsih_probe, + .remove = __devexit_p(_scsih_remove), #ifdef CONFIG_PM - .suspend = scsih_suspend, - .resume = scsih_resume, + .suspend = _scsih_suspend, + .resume = _scsih_resume, #endif }; /** - * scsih_init - main entry point for this driver. + * _scsih_init - main entry point for this driver. * * Returns 0 success, anything else error. */ static int __init -scsih_init(void) +_scsih_init(void) { int error; @@ -5855,10 +5855,10 @@ scsih_init(void) mpt2sas_base_initialize_callback_handler(); /* queuecommand callback hander */ - scsi_io_cb_idx = mpt2sas_base_register_callback_handler(scsih_io_done); + scsi_io_cb_idx = mpt2sas_base_register_callback_handler(_scsih_io_done); /* task managment callback handler */ - tm_cb_idx = mpt2sas_base_register_callback_handler(scsih_tm_done); + tm_cb_idx = mpt2sas_base_register_callback_handler(_scsih_tm_done); /* base internal commands callback handler */ base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done); @@ -5884,12 +5884,12 @@ scsih_init(void) } /** - * scsih_exit - exit point for this driver (when it is a module). + * _scsih_exit - exit point for this driver (when it is a module). * * Returns 0 success, anything else error. */ static void __exit -scsih_exit(void) +_scsih_exit(void) { printk(KERN_INFO "mpt2sas version %s unloading\n", MPT2SAS_DRIVER_VERSION); @@ -5907,5 +5907,5 @@ scsih_exit(void) mpt2sas_ctl_exit(); } -module_init(scsih_init); -module_exit(scsih_exit); +module_init(_scsih_init); +module_exit(_scsih_exit); diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c index e03dc0b1e1a0..e4d858617c8d 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_transport.c +++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c @@ -264,7 +264,7 @@ struct rep_manu_reply{ }; /** - * transport_expander_report_manufacture - obtain SMP report_manufacture + * _transport_expander_report_manufacture - obtain SMP report_manufacture * @ioc: per adapter object * @sas_address: expander sas address * @edev: the sas_expander_device object @@ -274,7 +274,7 @@ struct rep_manu_reply{ * Returns 0 for success, non-zero for failure. */ static int -transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc, +_transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, struct sas_expander_device *edev) { Mpi2SmpPassthroughRequest_t *mpi_request; @@ -578,7 +578,7 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle, MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER || mpt2sas_port->remote_identify.device_type == MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER) - transport_expander_report_manufacture(ioc, + _transport_expander_report_manufacture(ioc, mpt2sas_port->remote_identify.sas_address, rphy_to_expander_device(rphy)); @@ -852,7 +852,7 @@ rphy_to_ioc(struct sas_rphy *rphy) } /** - * transport_get_linkerrors - + * _transport_get_linkerrors - * @phy: The sas phy object * * Only support sas_host direct attached phys. @@ -860,7 +860,7 @@ rphy_to_ioc(struct sas_rphy *rphy) * */ static int -transport_get_linkerrors(struct sas_phy *phy) +_transport_get_linkerrors(struct sas_phy *phy) { struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy); struct _sas_phy *mpt2sas_phy; @@ -903,14 +903,14 @@ transport_get_linkerrors(struct sas_phy *phy) } /** - * transport_get_enclosure_identifier - + * _transport_get_enclosure_identifier - * @phy: The sas phy object * * Obtain the enclosure logical id for an expander. * Returns 0 for success, non-zero for failure. */ static int -transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) +_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) { struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy); struct _sas_node *sas_expander; @@ -929,13 +929,13 @@ transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) } /** - * transport_get_bay_identifier - + * _transport_get_bay_identifier - * @phy: The sas phy object * * Returns the slot id for a device that resides inside an enclosure. */ static int -transport_get_bay_identifier(struct sas_rphy *rphy) +_transport_get_bay_identifier(struct sas_rphy *rphy) { struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy); struct _sas_device *sas_device; @@ -953,7 +953,7 @@ transport_get_bay_identifier(struct sas_rphy *rphy) } /** - * transport_phy_reset - + * _transport_phy_reset - * @phy: The sas phy object * @hard_reset: * @@ -961,7 +961,7 @@ transport_get_bay_identifier(struct sas_rphy *rphy) * Returns 0 for success, non-zero for failure. */ static int -transport_phy_reset(struct sas_phy *phy, int hard_reset) +_transport_phy_reset(struct sas_phy *phy, int hard_reset) { struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy); struct _sas_phy *mpt2sas_phy; @@ -1002,7 +1002,7 @@ transport_phy_reset(struct sas_phy *phy, int hard_reset) } /** - * transport_smp_handler - transport portal for smp passthru + * _transport_smp_handler - transport portal for smp passthru * @shost: shost object * @rphy: sas transport rphy object * @req: @@ -1012,7 +1012,7 @@ transport_phy_reset(struct sas_phy *phy, int hard_reset) * smp_rep_general /sys/class/bsg/expander-5:0 */ static int -transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, +_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, struct request *req) { struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); @@ -1201,11 +1201,11 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, } struct sas_function_template mpt2sas_transport_functions = { - .get_linkerrors = transport_get_linkerrors, - .get_enclosure_identifier = transport_get_enclosure_identifier, - .get_bay_identifier = transport_get_bay_identifier, - .phy_reset = transport_phy_reset, - .smp_handler = transport_smp_handler, + .get_linkerrors = _transport_get_linkerrors, + .get_enclosure_identifier = _transport_get_enclosure_identifier, + .get_bay_identifier = _transport_get_bay_identifier, + .phy_reset = _transport_phy_reset, + .smp_handler = _transport_smp_handler, }; struct scsi_transport_template *mpt2sas_transport_template; -- cgit v1.2.3 From d17bf602fbfe4a5ab134a90f6c082fa161d398ee Mon Sep 17 00:00:00 2001 From: Eric Moore Date: Mon, 18 May 2009 13:02:49 -0600 Subject: [SCSI] mpt2sas: bump driver version to 01.100.03.00 Bump driver version. Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index aba81fb320b7..286c185fa9e4 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -69,10 +69,10 @@ #define MPT2SAS_DRIVER_NAME "mpt2sas" #define MPT2SAS_AUTHOR "LSI Corporation " #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" -#define MPT2SAS_DRIVER_VERSION "01.100.02.00" +#define MPT2SAS_DRIVER_VERSION "01.100.03.00" #define MPT2SAS_MAJOR_VERSION 01 #define MPT2SAS_MINOR_VERSION 100 -#define MPT2SAS_BUILD_VERSION 02 +#define MPT2SAS_BUILD_VERSION 03 #define MPT2SAS_RELEASE_VERSION 00 /* -- cgit v1.2.3 From a366695592ebc9151dd5a248681270f0925d8324 Mon Sep 17 00:00:00 2001 From: Abhijeet Joglekar Date: Fri, 1 May 2009 10:01:26 -0700 Subject: [SCSI] libfc,fcoe,fnic: Separate rport and lport max retry counts This allows fnic to configure number of retries for lport and rport separately. Signed-off-by: Abhijeet Joglekar Acked-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 1 + drivers/scsi/fnic/fnic_main.c | 1 + drivers/scsi/libfc/fc_rport.c | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index d08121f246c3..6acb7778f557 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -146,6 +146,7 @@ static int fcoe_lport_config(struct fc_lport *lp) lp->link_up = 0; lp->qfull = 0; lp->max_retry_count = 3; + lp->max_rport_retry_count = 3; lp->e_d_tov = 2 * 1000; /* FC-FS default */ lp->r_a_tov = 2 * 2 * 1000; lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 32ef6b87d895..a84072865fc2 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -680,6 +680,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev, } lp->max_retry_count = fnic->config.flogi_retries; + lp->max_rport_retry_count = fnic->config.plogi_retries; lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | FCP_SPPF_CONF_COMPL); if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 3f5094ebc397..7bfbff7e0efb 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -478,7 +478,7 @@ static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp) if (PTR_ERR(fp) == -FC_EX_CLOSED) return fc_rport_error(rport, fp); - if (rdata->retries < rdata->local_port->max_retry_count) { + if (rdata->retries < rdata->local_port->max_rport_retry_count) { FC_DEBUG_RPORT("Error %ld in state %s, retrying\n", PTR_ERR(fp), fc_rport_state(rport)); rdata->retries++; -- cgit v1.2.3 From 6a68afe3a2971953e218e509b16eae0ece43f9ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krzysztof=20Ha=C5=82asa?= Date: Sat, 23 May 2009 23:14:10 +0200 Subject: IXP4xx: Ethernet and WAN drivers now support "high" hardware queues. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Krzysztof Hałasa --- drivers/net/arm/ixp4xx_eth.c | 15 +++++++++------ drivers/net/wan/ixp4xx_hss.c | 10 ++++++---- 2 files changed, 15 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c index a740053d3af3..d304c731c470 100644 --- a/drivers/net/arm/ixp4xx_eth.c +++ b/drivers/net/arm/ixp4xx_eth.c @@ -456,7 +456,8 @@ static inline void queue_put_desc(unsigned int queue, u32 phys, debug_desc(phys, desc); BUG_ON(phys & 0x1F); qmgr_put_entry(queue, phys); - BUG_ON(qmgr_stat_overflow(queue)); + /* Don't check for queue overflow here, we've allocated sufficient + length and queues >= 32 don't support this check anyway. */ } @@ -512,8 +513,8 @@ static int eth_poll(struct napi_struct *napi, int budget) #endif napi_complete(napi); qmgr_enable_irq(rxq); - if (!qmgr_stat_empty(rxq) && - napi_reschedule(napi)) { + if (!qmgr_stat_nearly_empty(rxq) && + napi_reschedule(napi)) { /* really empty in fact */ #if DEBUG_RX printk(KERN_DEBUG "%s: eth_poll" " napi_reschedule successed\n", @@ -630,7 +631,8 @@ static void eth_txdone_irq(void *unused) port->tx_buff_tab[n_desc] = NULL; } - start = qmgr_stat_empty(port->plat->txreadyq); + /* really empty in fact */ + start = qmgr_stat_nearly_empty(port->plat->txreadyq); queue_put_desc(port->plat->txreadyq, phys, desc); if (start) { #if DEBUG_TX @@ -708,13 +710,14 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev) queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); dev->trans_start = jiffies; - if (qmgr_stat_empty(txreadyq)) { + if (qmgr_stat_nearly_empty(txreadyq)) { /* really empty in fact */ #if DEBUG_TX printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name); #endif netif_stop_queue(dev); /* we could miss TX ready interrupt */ - if (!qmgr_stat_empty(txreadyq)) { + /* really empty in fact */ + if (!qmgr_stat_nearly_empty(txreadyq)) { #if DEBUG_TX printk(KERN_DEBUG "%s: eth_xmit ready again\n", dev->name); diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index 765a7f5d6aa4..1e56e58c6605 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c @@ -579,7 +579,8 @@ static inline void queue_put_desc(unsigned int queue, u32 phys, debug_desc(phys, desc); BUG_ON(phys & 0x1F); qmgr_put_entry(queue, phys); - BUG_ON(qmgr_stat_overflow(queue)); + /* Don't check for queue overflow here, we've allocated sufficient + length and queues >= 32 don't support this check anyway. */ } @@ -789,7 +790,8 @@ static void hss_hdlc_txdone_irq(void *pdev) free_buffer_irq(port->tx_buff_tab[n_desc]); port->tx_buff_tab[n_desc] = NULL; - start = qmgr_stat_empty(port->plat->txreadyq); + /* really empty in fact */ + start = qmgr_stat_nearly_empty(port->plat->txreadyq); queue_put_desc(port->plat->txreadyq, tx_desc_phys(port, n_desc), desc); if (start) { @@ -867,13 +869,13 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev) queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc); dev->trans_start = jiffies; - if (qmgr_stat_empty(txreadyq)) { + if (qmgr_stat_nearly_empty(txreadyq)) { /* really empty in fact */ #if DEBUG_TX printk(KERN_DEBUG "%s: hss_hdlc_xmit queue full\n", dev->name); #endif netif_stop_queue(dev); /* we could miss TX ready interrupt */ - if (!qmgr_stat_empty(txreadyq)) { + if (!qmgr_stat_nearly_empty(txreadyq)) { #if DEBUG_TX printk(KERN_DEBUG "%s: hss_hdlc_xmit ready again\n", dev->name); -- cgit v1.2.3 From 2e418400728a9fcacb2ab75f0547584a56b8a584 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krzysztof=20Ha=C5=82asa?= Date: Sat, 23 May 2009 23:14:59 +0200 Subject: IXP4xx: Whitespace fixes in the Ethernet driver. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Krzysztof Hałasa --- drivers/net/arm/ixp4xx_eth.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c index d304c731c470..672c9626b9ca 100644 --- a/drivers/net/arm/ixp4xx_eth.c +++ b/drivers/net/arm/ixp4xx_eth.c @@ -817,29 +817,29 @@ static int request_queues(struct port *port) int err; err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0, - "%s:RX-free", port->netdev->name); + "%s:RX-free", port->netdev->name); if (err) return err; err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0, - "%s:RX", port->netdev->name); + "%s:RX", port->netdev->name); if (err) goto rel_rxfree; err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0, - "%s:TX", port->netdev->name); + "%s:TX", port->netdev->name); if (err) goto rel_rx; err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0, - "%s:TX-ready", port->netdev->name); + "%s:TX-ready", port->netdev->name); if (err) goto rel_tx; /* TX-done queue handles skbs sent out by the NPEs */ if (!ports_open) { err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0, - "%s:TX-done", DRV_NAME); + "%s:TX-done", DRV_NAME); if (err) goto rel_txready; } -- cgit v1.2.3 From 9733bb8e9ce0078f55405ce470a62ec0a551fe99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krzysztof=20Ha=C5=82asa?= Date: Mon, 25 May 2009 13:25:34 +0200 Subject: IXP4xx: Change QMgr function names to qmgr_stat_*_watermark and clean the comments. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Krzysztof Hałasa --- drivers/net/arm/ixp4xx_eth.c | 13 ++++++------- drivers/net/wan/ixp4xx_hss.c | 9 ++++----- 2 files changed, 10 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c index 672c9626b9ca..b6d188115caf 100644 --- a/drivers/net/arm/ixp4xx_eth.c +++ b/drivers/net/arm/ixp4xx_eth.c @@ -513,8 +513,8 @@ static int eth_poll(struct napi_struct *napi, int budget) #endif napi_complete(napi); qmgr_enable_irq(rxq); - if (!qmgr_stat_nearly_empty(rxq) && - napi_reschedule(napi)) { /* really empty in fact */ + if (!qmgr_stat_below_low_watermark(rxq) && + napi_reschedule(napi)) { /* not empty again */ #if DEBUG_RX printk(KERN_DEBUG "%s: eth_poll" " napi_reschedule successed\n", @@ -631,10 +631,9 @@ static void eth_txdone_irq(void *unused) port->tx_buff_tab[n_desc] = NULL; } - /* really empty in fact */ - start = qmgr_stat_nearly_empty(port->plat->txreadyq); + start = qmgr_stat_below_low_watermark(port->plat->txreadyq); queue_put_desc(port->plat->txreadyq, phys, desc); - if (start) { + if (start) { /* TX-ready queue was empty */ #if DEBUG_TX printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n", port->netdev->name); @@ -710,14 +709,14 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev) queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); dev->trans_start = jiffies; - if (qmgr_stat_nearly_empty(txreadyq)) { /* really empty in fact */ + if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */ #if DEBUG_TX printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name); #endif netif_stop_queue(dev); /* we could miss TX ready interrupt */ /* really empty in fact */ - if (!qmgr_stat_nearly_empty(txreadyq)) { + if (!qmgr_stat_below_low_watermark(txreadyq)) { #if DEBUG_TX printk(KERN_DEBUG "%s: eth_xmit ready again\n", dev->name); diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index 1e56e58c6605..a6dc317083d3 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c @@ -790,11 +790,10 @@ static void hss_hdlc_txdone_irq(void *pdev) free_buffer_irq(port->tx_buff_tab[n_desc]); port->tx_buff_tab[n_desc] = NULL; - /* really empty in fact */ - start = qmgr_stat_nearly_empty(port->plat->txreadyq); + start = qmgr_stat_below_low_watermark(port->plat->txreadyq); queue_put_desc(port->plat->txreadyq, tx_desc_phys(port, n_desc), desc); - if (start) { + if (start) { /* TX-ready queue was empty */ #if DEBUG_TX printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq xmit" " ready\n", dev->name); @@ -869,13 +868,13 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev) queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc); dev->trans_start = jiffies; - if (qmgr_stat_nearly_empty(txreadyq)) { /* really empty in fact */ + if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */ #if DEBUG_TX printk(KERN_DEBUG "%s: hss_hdlc_xmit queue full\n", dev->name); #endif netif_stop_queue(dev); /* we could miss TX ready interrupt */ - if (!qmgr_stat_nearly_empty(txreadyq)) { + if (!qmgr_stat_below_low_watermark(txreadyq)) { #if DEBUG_TX printk(KERN_DEBUG "%s: hss_hdlc_xmit ready again\n", dev->name); -- cgit v1.2.3 From c1f67a88bf62fac0f4151c007b361199c2cd1988 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Wed, 27 May 2009 14:36:16 -0700 Subject: IB/mthca: Add module parameter for number of MTTs per segment The current MTT allocator uses kmalloc() to allocate a buffer for its buddy allocator, and thus is limited in the amount of MTT segments that it can control. As a result, the size of memory that can be registered is limited too. This patch uses a module parameter to control the number of MTT entries that each segment represents, allowing more memory to be registered with the same number of segments. Signed-off-by: Eli Cohen Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mthca/mthca_cmd.c | 2 +- drivers/infiniband/hw/mthca/mthca_dev.h | 1 + drivers/infiniband/hw/mthca/mthca_main.c | 17 ++++++++++++++--- drivers/infiniband/hw/mthca/mthca_mr.c | 16 ++++++++-------- drivers/infiniband/hw/mthca/mthca_profile.c | 4 ++-- 5 files changed, 26 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 6d55f9d748f6..8c2ed994d540 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c @@ -1059,7 +1059,7 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MTT_OFFSET); if (mthca_is_memfree(dev)) dev_lim->reserved_mtts = ALIGN((1 << (field >> 4)) * sizeof(u64), - MTHCA_MTT_SEG_SIZE) / MTHCA_MTT_SEG_SIZE; + dev->limits.mtt_seg_size) / dev->limits.mtt_seg_size; else dev_lim->reserved_mtts = 1 << (field >> 4); MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET); diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index 252590116df5..9ef611f6dd36 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h @@ -159,6 +159,7 @@ struct mthca_limits { int reserved_eqs; int num_mpts; int num_mtt_segs; + int mtt_seg_size; int fmr_reserved_mtts; int reserved_mtts; int reserved_mrws; diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 1d83cf7caf38..13da9f1d24c0 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c @@ -125,6 +125,10 @@ module_param_named(fmr_reserved_mtts, hca_profile.fmr_reserved_mtts, int, 0444); MODULE_PARM_DESC(fmr_reserved_mtts, "number of memory translation table segments reserved for FMR"); +static int log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8); +module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); +MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)"); + static char mthca_version[] __devinitdata = DRV_NAME ": Mellanox InfiniBand HCA driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; @@ -162,6 +166,7 @@ static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim) int err; u8 status; + mdev->limits.mtt_seg_size = (1 << log_mtts_per_seg) * 8; err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status); if (err) { mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n"); @@ -460,11 +465,11 @@ static int mthca_init_icm(struct mthca_dev *mdev, } /* CPU writes to non-reserved MTTs, while HCA might DMA to reserved mtts */ - mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * MTHCA_MTT_SEG_SIZE, - dma_get_cache_alignment()) / MTHCA_MTT_SEG_SIZE; + mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * mdev->limits.mtt_seg_size, + dma_get_cache_alignment()) / mdev->limits.mtt_seg_size; mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base, - MTHCA_MTT_SEG_SIZE, + mdev->limits.mtt_seg_size, mdev->limits.num_mtt_segs, mdev->limits.reserved_mtts, 1, 0); @@ -1315,6 +1320,12 @@ static void __init mthca_validate_profile(void) printk(KERN_WARNING PFX "Corrected fmr_reserved_mtts to %d.\n", hca_profile.fmr_reserved_mtts); } + + if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) { + printk(KERN_WARNING PFX "bad log_mtts_per_seg (%d). Using default - %d\n", + log_mtts_per_seg, ilog2(MTHCA_MTT_SEG_SIZE / 8)); + log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8); + } } static int __init mthca_init(void) diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index 882e6b735915..d606edf10858 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c @@ -220,7 +220,7 @@ static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size, mtt->buddy = buddy; mtt->order = 0; - for (i = MTHCA_MTT_SEG_SIZE / 8; i < size; i <<= 1) + for (i = dev->limits.mtt_seg_size / 8; i < size; i <<= 1) ++mtt->order; mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy); @@ -267,7 +267,7 @@ static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, while (list_len > 0) { mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base + - mtt->first_seg * MTHCA_MTT_SEG_SIZE + + mtt->first_seg * dev->limits.mtt_seg_size + start_index * 8); mtt_entry[1] = 0; for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i) @@ -326,7 +326,7 @@ static void mthca_tavor_write_mtt_seg(struct mthca_dev *dev, u64 __iomem *mtts; int i; - mtts = dev->mr_table.tavor_fmr.mtt_base + mtt->first_seg * MTHCA_MTT_SEG_SIZE + + mtts = dev->mr_table.tavor_fmr.mtt_base + mtt->first_seg * dev->limits.mtt_seg_size + start_index * sizeof (u64); for (i = 0; i < list_len; ++i) mthca_write64_raw(cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT), @@ -345,10 +345,10 @@ static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev, /* For Arbel, all MTTs must fit in the same page. */ BUG_ON(s / PAGE_SIZE != (s + list_len * sizeof(u64) - 1) / PAGE_SIZE); /* Require full segments */ - BUG_ON(s % MTHCA_MTT_SEG_SIZE); + BUG_ON(s % dev->limits.mtt_seg_size); mtts = mthca_table_find(dev->mr_table.mtt_table, mtt->first_seg + - s / MTHCA_MTT_SEG_SIZE, &dma_handle); + s / dev->limits.mtt_seg_size, &dma_handle); BUG_ON(!mtts); @@ -479,7 +479,7 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, if (mr->mtt) mpt_entry->mtt_seg = cpu_to_be64(dev->mr_table.mtt_base + - mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE); + mr->mtt->first_seg * dev->limits.mtt_seg_size); if (0) { mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey); @@ -626,7 +626,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd, goto err_out_table; } - mtt_seg = mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE; + mtt_seg = mr->mtt->first_seg * dev->limits.mtt_seg_size; if (mthca_is_memfree(dev)) { mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table, @@ -908,7 +908,7 @@ int mthca_init_mr_table(struct mthca_dev *dev) dev->mr_table.mtt_base); dev->mr_table.tavor_fmr.mtt_base = - ioremap(addr, mtts * MTHCA_MTT_SEG_SIZE); + ioremap(addr, mtts * dev->limits.mtt_seg_size); if (!dev->mr_table.tavor_fmr.mtt_base) { mthca_warn(dev, "MTT ioremap for FMR failed.\n"); err = -ENOMEM; diff --git a/drivers/infiniband/hw/mthca/mthca_profile.c b/drivers/infiniband/hw/mthca/mthca_profile.c index d168c2540611..8edb28a9a0e7 100644 --- a/drivers/infiniband/hw/mthca/mthca_profile.c +++ b/drivers/infiniband/hw/mthca/mthca_profile.c @@ -94,7 +94,7 @@ s64 mthca_make_profile(struct mthca_dev *dev, profile[MTHCA_RES_RDB].size = MTHCA_RDB_ENTRY_SIZE; profile[MTHCA_RES_MCG].size = MTHCA_MGM_ENTRY_SIZE; profile[MTHCA_RES_MPT].size = dev_lim->mpt_entry_sz; - profile[MTHCA_RES_MTT].size = MTHCA_MTT_SEG_SIZE; + profile[MTHCA_RES_MTT].size = dev->limits.mtt_seg_size; profile[MTHCA_RES_UAR].size = dev_lim->uar_scratch_entry_sz; profile[MTHCA_RES_UDAV].size = MTHCA_AV_SIZE; profile[MTHCA_RES_UARC].size = request->uarc_size; @@ -232,7 +232,7 @@ s64 mthca_make_profile(struct mthca_dev *dev, dev->limits.num_mtt_segs = profile[i].num; dev->mr_table.mtt_base = profile[i].start; init_hca->mtt_base = profile[i].start; - init_hca->mtt_seg_sz = ffs(MTHCA_MTT_SEG_SIZE) - 7; + init_hca->mtt_seg_sz = ffs(dev->limits.mtt_seg_size) - 7; break; case MTHCA_RES_UAR: dev->limits.num_uars = profile[i].num; -- cgit v1.2.3 From ab6bf42e2339580b5d87746d0ff4da4b1578b03e Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Wed, 27 May 2009 14:38:34 -0700 Subject: mlx4_core: Add module parameter for number of MTTs per segment The current MTT allocator uses kmalloc() to allocate a buffer for its buddy allocator, and thus is limited in the amount of MTT segments that it can control. As a result, the size of memory that can be registered is limited too. This patch uses a module parameter to control the number of MTT entries that each segment represents, allowing more memory to be registered with the same number of segments. Signed-off-by: Eli Cohen Signed-off-by: Roland Dreier --- drivers/net/mlx4/main.c | 14 ++++++++++++-- drivers/net/mlx4/mr.c | 6 +++--- drivers/net/mlx4/profile.c | 2 +- 3 files changed, 16 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 30bea9689694..018348c01193 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c @@ -100,6 +100,10 @@ module_param_named(use_prio, use_prio, bool, 0444); MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " "(0/1, default 0)"); +static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); +module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); +MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)"); + int mlx4_check_port_params(struct mlx4_dev *dev, enum mlx4_port_type *port_type) { @@ -203,12 +207,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.max_cqes = dev_cap->max_cq_sz - 1; dev->caps.reserved_cqs = dev_cap->reserved_cqs; dev->caps.reserved_eqs = dev_cap->reserved_eqs; + dev->caps.mtts_per_seg = 1 << log_mtts_per_seg; dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts, - MLX4_MTT_ENTRY_PER_SEG); + dev->caps.mtts_per_seg); dev->caps.reserved_mrws = dev_cap->reserved_mrws; dev->caps.reserved_uars = dev_cap->reserved_uars; dev->caps.reserved_pds = dev_cap->reserved_pds; - dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; + dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; dev->caps.max_msg_sz = dev_cap->max_msg_sz; dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); dev->caps.flags = dev_cap->flags; @@ -1304,6 +1309,11 @@ static int __init mlx4_verify_params(void) return -1; } + if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) { + printk(KERN_WARNING "mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); + return -1; + } + return 0; } diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index 0caf74cae8bc..3b8973d19933 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c @@ -209,7 +209,7 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, } else mtt->page_shift = page_shift; - for (mtt->order = 0, i = MLX4_MTT_ENTRY_PER_SEG; i < npages; i <<= 1) + for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1) ++mtt->order; mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order); @@ -350,7 +350,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | MLX4_MPT_PD_FLAG_RAE); mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) * - MLX4_MTT_ENTRY_PER_SEG); + dev->caps.mtts_per_seg); } else { mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); } @@ -391,7 +391,7 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64))) return -EINVAL; - if (start_index & (MLX4_MTT_ENTRY_PER_SEG - 1)) + if (start_index & (dev->caps.mtts_per_seg - 1)) return -EINVAL; mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg + diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c index cebdf3243ca1..bd22df95adf9 100644 --- a/drivers/net/mlx4/profile.c +++ b/drivers/net/mlx4/profile.c @@ -98,7 +98,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz; profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz; profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz; - profile[MLX4_RES_MTT].size = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; + profile[MLX4_RES_MTT].size = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE; profile[MLX4_RES_QP].num = request->num_qp; -- cgit v1.2.3 From 7ab1a2b31d4a8b4f519ccff5a84c53a5b87fd1be Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Wed, 27 May 2009 14:42:36 -0700 Subject: RDMA/cxgb3: Report correct port state and MTU Signed-off-by: Steve Wise Signed-off-by: Roland Dreier --- drivers/infiniband/hw/cxgb3/iwch_provider.c | 32 +++++++++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 160ef482712d..e2a63214008a 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -1152,12 +1153,39 @@ static int iwch_query_device(struct ib_device *ibdev, static int iwch_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { + struct iwch_dev *dev; + struct net_device *netdev; + struct in_device *inetdev; + PDBG("%s ibdev %p\n", __func__, ibdev); + dev = to_iwch_dev(ibdev); + netdev = dev->rdev.port_info.lldevs[port-1]; + memset(props, 0, sizeof(struct ib_port_attr)); props->max_mtu = IB_MTU_4096; - props->active_mtu = IB_MTU_2048; - props->state = IB_PORT_ACTIVE; + if (netdev->mtu >= 4096) + props->active_mtu = IB_MTU_4096; + else if (netdev->mtu >= 2048) + props->active_mtu = IB_MTU_2048; + else if (netdev->mtu >= 1024) + props->active_mtu = IB_MTU_1024; + else if (netdev->mtu >= 512) + props->active_mtu = IB_MTU_512; + else + props->active_mtu = IB_MTU_256; + + if (!netif_carrier_ok(netdev)) + props->state = IB_PORT_DOWN; + else { + inetdev = in_dev_get(netdev); + if (inetdev->ifa_list) + props->state = IB_PORT_ACTIVE; + else + props->state = IB_PORT_INIT; + in_dev_put(inetdev); + } + props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_SNMP_TUNNEL_SUP | -- cgit v1.2.3 From 3026c19a14ba71ccd4dc4925abab9395ea12839c Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Wed, 27 May 2009 14:43:39 -0700 Subject: RDMA/cxgb3: Limit fast register size based on T3 limitations T3 firmware only supports one WRs worth of page list for fast register work requests. The driver currently allows 2 WRs worth, which doesn't work for T3, so reduce the limit in the driver. Signed-off-by: Steve Wise Signed-off-by: Roland Dreier --- drivers/infiniband/hw/cxgb3/cxio_wr.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h index ff9be1a13106..32e3b1461d81 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_wr.h +++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h @@ -176,7 +176,7 @@ struct t3_send_wr { struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */ }; -#define T3_MAX_FASTREG_DEPTH 24 +#define T3_MAX_FASTREG_DEPTH 10 #define T3_MAX_FASTREG_FRAG 10 struct t3_fastreg_wr { -- cgit v1.2.3 From 8bd229492209c0c7d050e2f9a600c12f035d72f7 Mon Sep 17 00:00:00 2001 From: Kevin Hilman Date: Thu, 28 May 2009 10:56:16 -0700 Subject: OMAP2/3: PM: push core PM code from linux-omap This patch is to sync the core linux-omap PM code with mainline. This code has evolved and been used for a while the linux-omap tree, but the attempt here is to finally get this into mainline. Following this will be a series of patches from the 'PM branch' of the linux-omap tree to add full PM hardware support from the linux-omap tree. Much of this PM core code was written by Jouni Hogander with significant contributions from Paul Walmsley as well as many others from Nokia, Texas Instruments and linux-omap community. Signed-off-by: Jouni Hogander Cc: Paul Walmsley Signed-off-by: Kevin Hilman --- drivers/mtd/onenand/omap2.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index f2e9de1414df..6391e3dc8002 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c @@ -39,7 +39,6 @@ #include #include #include -#include #include -- cgit v1.2.3 From 088962c243db42b9c608f30be3e3a05a5b696895 Mon Sep 17 00:00:00 2001 From: Andrew de Quincey Date: Thu, 28 May 2009 14:03:31 -0700 Subject: ARM: OMAP1: Make 770 LCD work Make 770 LCD work by adding clk_add_alias(). Also remove the old unused functions. Note that the clk_add_alias() could probably be moved to arch/arm/clkdev.c later on. Cc: linux-fbdev-devel@lists.sourceforge.net Signed-off-by: Andrew de Quincey Signed-off-by: Imre Deak Signed-off-by: Tony Lindgren dev); + clk_disable(hwa742.sys_ck); } static void hwa742_resume(void) { - if (hwa742.power_up != NULL) - hwa742.power_up(hwa742.fbdev->dev); + clk_enable(hwa742.sys_ck); + /* Disable sleep mode */ hwa742_write_reg(HWA742_POWER_SAVE, 0); while (1) { @@ -955,14 +953,13 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode, omapfb_conf = fbdev->dev->platform_data; ctrl_conf = omapfb_conf->ctrl_platform_data; - if (ctrl_conf == NULL || ctrl_conf->get_clock_rate == NULL) { + if (ctrl_conf == NULL) { dev_err(fbdev->dev, "HWA742: missing platform data\n"); r = -ENOENT; goto err1; } - hwa742.power_down = ctrl_conf->power_down; - hwa742.power_up = ctrl_conf->power_up; + hwa742.sys_ck = clk_get(NULL, "hwa_sys_ck"); spin_lock_init(&hwa742.req_lock); @@ -972,12 +969,11 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode, if ((r = hwa742.extif->init(fbdev)) < 0) goto err2; - ext_clk = ctrl_conf->get_clock_rate(fbdev->dev); + ext_clk = clk_get_rate(hwa742.sys_ck); if ((r = calc_extif_timings(ext_clk, &extif_mem_div)) < 0) goto err3; hwa742.extif->set_timings(&hwa742.reg_timings); - if (hwa742.power_up != NULL) - hwa742.power_up(fbdev->dev); + clk_enable(hwa742.sys_ck); calc_hwa742_clk_rates(ext_clk, &sys_clk, &pix_clk); if ((r = calc_extif_timings(sys_clk, &extif_mem_div)) < 0) @@ -1040,8 +1036,7 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode, return 0; err4: - if (hwa742.power_down != NULL) - hwa742.power_down(fbdev->dev); + clk_disable(hwa742.sys_ck); err3: hwa742.extif->cleanup(); err2: @@ -1055,8 +1050,7 @@ static void hwa742_cleanup(void) hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED); hwa742.extif->cleanup(); hwa742.int_ctrl->cleanup(); - if (hwa742.power_down != NULL) - hwa742.power_down(hwa742.fbdev->dev); + clk_disable(hwa742.sys_ck); } struct lcd_ctrl hwa742_ctrl = { -- cgit v1.2.3 From b583f26d510ee1aef7348f38f1d959212f66b5e5 Mon Sep 17 00:00:00 2001 From: David Brownell Date: Thu, 28 May 2009 14:04:03 -0700 Subject: ARM: OMAP3: mmc-twl4030 uses regulator framework Decouple the HSMMC glue from the twl4030 as the only regulator provider, using the regulator framework instead. This makes the glue's "mmc-twl4030" name become a complete misnomer ... this code could probably all migrate into the HSMMC driver now. Tested on 3430SDP (SD and low-voltage MMC) and Beagle (SD), plus some other boards (including Overo) after they were converted to set up MMC regulators properly. Eventually all boards should just associate a regulator with each MMC controller they use. In some cases (Overo MMC2 and Pandora MMC3, at least) that would be a fixed-voltage regulator with no real software control. As a temporary hack (pending regulator-next updates to make the "fixed.c" regulator become usable) there's a new ocr_mask field for those boards. Patch updated with a fix for disabling vcc_aux by Adrian Hunter Cc: Pierre Ossman Signed-off-by: David Brownell Signed-off-by: Tony Lindgren --- drivers/mmc/host/omap_hsmmc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index e62a22a7f00c..2f19c635bc6e 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -1073,7 +1073,6 @@ static int __init omap_mmc_probe(struct platform_device *pdev) mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_seg_size = mmc->max_req_size; - mmc->ocr_avail = mmc_slot(host).ocr_mask; mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED; if (pdata->slots[host->slot_id].wires >= 8) @@ -1110,13 +1109,14 @@ static int __init omap_mmc_probe(struct platform_device *pdev) goto err_irq; } + /* initialize power supplies, gpios, etc */ if (pdata->init != NULL) { if (pdata->init(&pdev->dev) != 0) { - dev_dbg(mmc_dev(host->mmc), - "Unable to configure MMC IRQs\n"); + dev_dbg(mmc_dev(host->mmc), "late init error\n"); goto err_irq_cd_init; } } + mmc->ocr_avail = mmc_slot(host).ocr_mask; /* Request IRQ for card detect */ if ((mmc_slot(host).card_detect_irq)) { -- cgit v1.2.3 From e3a6d01932f343c1cc0218909262f0f68b6f7db4 Mon Sep 17 00:00:00 2001 From: Hartley Sweeten Date: Thu, 28 May 2009 19:56:11 +0100 Subject: [ARM] 5526/1: ep93xx: usb driver cleanup Cleanup the ohci-ep93xx driver. 1) Use the usb.h dbg() macro instead of pr_debug() so that the source filename is prefixed to the message and it is terminated with a linefeed. 2) Add error handling for the clk_get() call. 3) Update clkdev support so that the usb clock is matched by the dev_id instead of the con_id. Signed-off-by: H Hartley Sweeten Signed-off-by: Russell King --- drivers/usb/host/ohci-ep93xx.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/usb/host/ohci-ep93xx.c b/drivers/usb/host/ohci-ep93xx.c index 7cf74f8c2db1..b0dbf4157d29 100644 --- a/drivers/usb/host/ohci-ep93xx.c +++ b/drivers/usb/host/ohci-ep93xx.c @@ -47,7 +47,7 @@ static int usb_hcd_ep93xx_probe(const struct hc_driver *driver, struct usb_hcd *hcd; if (pdev->resource[1].flags != IORESOURCE_IRQ) { - pr_debug("resource[1] is not IORESOURCE_IRQ"); + dbg("resource[1] is not IORESOURCE_IRQ"); return -ENOMEM; } @@ -65,12 +65,18 @@ static int usb_hcd_ep93xx_probe(const struct hc_driver *driver, hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (hcd->regs == NULL) { - pr_debug("ioremap failed"); + dbg("ioremap failed"); retval = -ENOMEM; goto err2; } - usb_host_clock = clk_get(&pdev->dev, "usb_host"); + usb_host_clock = clk_get(&pdev->dev, NULL); + if (IS_ERR(usb_host_clock)) { + dbg("clk_get failed"); + retval = PTR_ERR(usb_host_clock); + goto err3; + } + ep93xx_start_hc(&pdev->dev); ohci_hcd_init(hcd_to_ohci(hcd)); @@ -80,6 +86,7 @@ static int usb_hcd_ep93xx_probe(const struct hc_driver *driver, return retval; ep93xx_stop_hc(&pdev->dev); +err3: iounmap(hcd->regs); err2: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); -- cgit v1.2.3 From 5a9d25150c01bd140ca647b5e7ee75ae18a369a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Va=C5=A1ut?= Date: Thu, 21 May 2009 13:11:05 +0100 Subject: [ARM] 5522/1: PalmLD: IDE support Support for Palm LifeDrive's internal harddrive. Signed-off-by: Marek Vasut Acked-by: Jeff Garzik Signed-off-by: Russell King --- drivers/ata/Kconfig | 9 +++ drivers/ata/Makefile | 1 + drivers/ata/pata_palmld.c | 150 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 160 insertions(+) create mode 100644 drivers/ata/pata_palmld.c (limited to 'drivers') diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 9120717c0701..2aa1908e5ce0 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -535,6 +535,15 @@ config PATA_OPTIDMA If unsure, say N. +config PATA_PALMLD + tristate "Palm LifeDrive PATA support" + depends on MACH_PALMLD + help + This option enables support for Palm LifeDrive's internal ATA + port via the new ATA layer. + + If unsure, say N. + config PATA_PCMCIA tristate "PCMCIA PATA support" depends on PCMCIA diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index 7f1ecf99528c..1558059874f0 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -50,6 +50,7 @@ obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o +obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o obj-$(CONFIG_PATA_PDC2027X) += pata_pdc2027x.o obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c new file mode 100644 index 000000000000..11fb4ccc74b4 --- /dev/null +++ b/drivers/ata/pata_palmld.c @@ -0,0 +1,150 @@ +/* + * drivers/ata/pata_palmld.c + * + * Driver for IDE channel in Palm LifeDrive + * + * Based on research of: + * Alex Osborne + * + * Rewrite for mainline: + * Marek Vasut + * + * Rewritten version based on pata_ixp4xx_cf.c: + * ixp4xx PATA/Compact Flash driver + * Copyright (C) 2006-07 Tower Technologies + * Author: Alessandro Zummo + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define DRV_NAME "pata_palmld" + +static struct scsi_host_template palmld_sht = { + ATA_PIO_SHT(DRV_NAME), +}; + +static struct ata_port_operations palmld_port_ops = { + .inherits = &ata_sff_port_ops, + .sff_data_xfer = ata_sff_data_xfer_noirq, + .cable_detect = ata_cable_40wire, +}; + +static __devinit int palmld_pata_probe(struct platform_device *pdev) +{ + struct ata_host *host; + struct ata_port *ap; + void __iomem *mem; + int ret; + + /* allocate host */ + host = ata_host_alloc(&pdev->dev, 1); + if (!host) + return -ENOMEM; + + /* remap drive's physical memory address */ + mem = devm_ioremap(&pdev->dev, PALMLD_IDE_PHYS, 0x1000); + if (!mem) + return -ENOMEM; + + /* request and activate power GPIO, IRQ GPIO */ + ret = gpio_request(GPIO_NR_PALMLD_IDE_PWEN, "HDD PWR"); + if (ret) + goto err1; + ret = gpio_direction_output(GPIO_NR_PALMLD_IDE_PWEN, 1); + if (ret) + goto err2; + + ret = gpio_request(GPIO_NR_PALMLD_IDE_RESET, "HDD RST"); + if (ret) + goto err2; + ret = gpio_direction_output(GPIO_NR_PALMLD_IDE_RESET, 0); + if (ret) + goto err3; + + /* reset the drive */ + gpio_set_value(GPIO_NR_PALMLD_IDE_RESET, 0); + msleep(30); + gpio_set_value(GPIO_NR_PALMLD_IDE_RESET, 1); + msleep(30); + + /* setup the ata port */ + ap = host->ports[0]; + ap->ops = &palmld_port_ops; + ap->pio_mask = ATA_PIO4; + ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY | ATA_FLAG_PIO_POLLING; + + /* memory mapping voodoo */ + ap->ioaddr.cmd_addr = mem + 0x10; + ap->ioaddr.altstatus_addr = mem + 0xe; + ap->ioaddr.ctl_addr = mem + 0xe; + + /* start the port */ + ata_sff_std_ports(&ap->ioaddr); + + /* activate host */ + return ata_host_activate(host, 0, NULL, IRQF_TRIGGER_RISING, + &palmld_sht); + +err3: + gpio_free(GPIO_NR_PALMLD_IDE_RESET); +err2: + gpio_free(GPIO_NR_PALMLD_IDE_PWEN); +err1: + return ret; +} + +static __devexit int palmld_pata_remove(struct platform_device *dev) +{ + struct ata_host *host = platform_get_drvdata(dev); + + ata_host_detach(host); + + /* power down the HDD */ + gpio_set_value(GPIO_NR_PALMLD_IDE_PWEN, 0); + + gpio_free(GPIO_NR_PALMLD_IDE_RESET); + gpio_free(GPIO_NR_PALMLD_IDE_PWEN); + + return 0; +} + +static struct platform_driver palmld_pata_platform_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, + .probe = palmld_pata_probe, + .remove = __devexit_p(palmld_pata_remove), +}; + +static int __init palmld_pata_init(void) +{ + return platform_driver_register(&palmld_pata_platform_driver); +} + +static void __exit palmld_pata_exit(void) +{ + platform_driver_unregister(&palmld_pata_platform_driver); +} + +MODULE_AUTHOR("Marek Vasut "); +MODULE_DESCRIPTION("PalmLD PATA driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRV_NAME); + +module_init(palmld_pata_init); +module_exit(palmld_pata_exit); -- cgit v1.2.3 From 4dd9e742df98f8f600b4302d3adbb087a68237f7 Mon Sep 17 00:00:00 2001 From: Alessandro Rubini Date: Tue, 5 May 2009 05:54:13 +0100 Subject: [ARM] 5505/1: serial amba-pl011: move to arch_initcall for earlier console Signed-off-by: Alessandro Rubini " Signed-off-by: Russell King --- drivers/serial/amba-pl011.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c index 88fdac51b6c5..4cfa1eb26892 100644 --- a/drivers/serial/amba-pl011.c +++ b/drivers/serial/amba-pl011.c @@ -845,7 +845,11 @@ static void __exit pl011_exit(void) uart_unregister_driver(&amba_reg); } -module_init(pl011_init); +/* + * While this can be a module, if builtin it's most likely the console + * So let's leave module_exit but move module_init to an earlier place + */ +arch_initcall(pl011_init); module_exit(pl011_exit); MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd"); -- cgit v1.2.3 From 9f171adc192fc3c8ffbb691cfdcc70259d75c6ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Sun, 29 Mar 2009 15:47:06 +0800 Subject: hwrng: omap - Move probe function to .devinit.text MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A pointer to omap_rng_probe is passed to the core via platform_driver_register and so the function must not disappear when the .init sections are discarded. Otherwise (if also having HOTPLUG=y) unbinding and binding a device to the driver via sysfs will result in an oops as does a device being registered late. An alternative to this patch is using platform_driver_probe instead of platform_driver_register plus removing the pointer to the probe function from the struct platform_driver. Signed-off-by: Uwe Kleine-König Cc: Russell King Cc: David Brownell Cc: Patrick McHardy Cc: Jan Engelhardt Cc: Michael Buesch Cc: Andrew Morton Signed-off-by: Herbert Xu --- drivers/char/hw_random/omap-rng.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 538313f9e7ac..00dd3de1be51 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -89,7 +89,7 @@ static struct hwrng omap_rng_ops = { .data_read = omap_rng_data_read, }; -static int __init omap_rng_probe(struct platform_device *pdev) +static int __devinit omap_rng_probe(struct platform_device *pdev) { struct resource *res, *mem; int ret; -- cgit v1.2.3 From 56af8cd44b05bd9649103b76a6e1e575682990e4 Mon Sep 17 00:00:00 2001 From: Lee Nipper Date: Sun, 29 Mar 2009 15:50:50 +0800 Subject: crypto: talitos - scaffolding for new algorithm types This patch is preparation for adding new algorithm types. Some elements which are AEAD specific were renamed. The algorithm template structure was changed to use crypto_alg, and talitos_alg_alloc was made more general with respect to algorithm types. ipsec_esp_edesc is renamed to talitos_edesc to use it in the upcoming ablkcipher routines. Signed-off-by: Lee Nipper Signed-off-by: Kim Phillips Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 245 +++++++++++++++++++++++++---------------------- 1 file changed, 129 insertions(+), 116 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index a3918c16b3db..9833961a247e 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -684,8 +684,8 @@ struct talitos_ctx { unsigned int authsize; }; -static int aead_authenc_setauthsize(struct crypto_aead *authenc, - unsigned int authsize) +static int aead_setauthsize(struct crypto_aead *authenc, + unsigned int authsize) { struct talitos_ctx *ctx = crypto_aead_ctx(authenc); @@ -694,8 +694,8 @@ static int aead_authenc_setauthsize(struct crypto_aead *authenc, return 0; } -static int aead_authenc_setkey(struct crypto_aead *authenc, - const u8 *key, unsigned int keylen) +static int aead_setkey(struct crypto_aead *authenc, + const u8 *key, unsigned int keylen) { struct talitos_ctx *ctx = crypto_aead_ctx(authenc); struct rtattr *rta = (void *)key; @@ -740,7 +740,7 @@ badkey: } /* - * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor + * talitos_edesc - s/w-extended descriptor * @src_nents: number of segments in input scatterlist * @dst_nents: number of segments in output scatterlist * @dma_len: length of dma mapped link_tbl space @@ -752,7 +752,7 @@ badkey: * is greater than 1, an integrity check value is concatenated to the end * of link_tbl data */ -struct ipsec_esp_edesc { +struct talitos_edesc { int src_nents; int dst_nents; int dma_len; @@ -762,7 +762,7 @@ struct ipsec_esp_edesc { }; static void ipsec_esp_unmap(struct device *dev, - struct ipsec_esp_edesc *edesc, + struct talitos_edesc *edesc, struct aead_request *areq) { unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE); @@ -795,8 +795,8 @@ static void ipsec_esp_encrypt_done(struct device *dev, int err) { struct aead_request *areq = context; - struct ipsec_esp_edesc *edesc = - container_of(desc, struct ipsec_esp_edesc, desc); + struct talitos_edesc *edesc = + container_of(desc, struct talitos_edesc, desc); struct crypto_aead *authenc = crypto_aead_reqtfm(areq); struct talitos_ctx *ctx = crypto_aead_ctx(authenc); struct scatterlist *sg; @@ -823,8 +823,8 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev, int err) { struct aead_request *req = context; - struct ipsec_esp_edesc *edesc = - container_of(desc, struct ipsec_esp_edesc, desc); + struct talitos_edesc *edesc = + container_of(desc, struct talitos_edesc, desc); struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct talitos_ctx *ctx = crypto_aead_ctx(authenc); struct scatterlist *sg; @@ -855,8 +855,8 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev, int err) { struct aead_request *req = context; - struct ipsec_esp_edesc *edesc = - container_of(desc, struct ipsec_esp_edesc, desc); + struct talitos_edesc *edesc = + container_of(desc, struct talitos_edesc, desc); ipsec_esp_unmap(dev, edesc, req); @@ -910,7 +910,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, /* * fill in and submit ipsec_esp descriptor */ -static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, +static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, u8 *giv, u64 seq, void (*callback) (struct device *dev, struct talitos_desc *desc, @@ -1052,14 +1052,14 @@ static int sg_count(struct scatterlist *sg_list, int nbytes) } /* - * allocate and map the ipsec_esp extended descriptor + * allocate and map the extended descriptor */ -static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, +static struct talitos_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, int icv_stashing) { struct crypto_aead *authenc = crypto_aead_reqtfm(areq); struct talitos_ctx *ctx = crypto_aead_ctx(authenc); - struct ipsec_esp_edesc *edesc; + struct talitos_edesc *edesc; int src_nents, dst_nents, alloc_len, dma_len; gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; @@ -1084,7 +1084,7 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, * allowing for two separate entries for ICV and generated ICV (+ 2), * and the ICV data itself */ - alloc_len = sizeof(struct ipsec_esp_edesc); + alloc_len = sizeof(struct talitos_edesc); if (src_nents || dst_nents) { dma_len = (src_nents + dst_nents + 2) * sizeof(struct talitos_ptr) + ctx->authsize; @@ -1109,11 +1109,11 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, return edesc; } -static int aead_authenc_encrypt(struct aead_request *req) +static int aead_encrypt(struct aead_request *req) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct talitos_ctx *ctx = crypto_aead_ctx(authenc); - struct ipsec_esp_edesc *edesc; + struct talitos_edesc *edesc; /* allocate extended descriptor */ edesc = ipsec_esp_edesc_alloc(req, 0); @@ -1128,13 +1128,13 @@ static int aead_authenc_encrypt(struct aead_request *req) -static int aead_authenc_decrypt(struct aead_request *req) +static int aead_decrypt(struct aead_request *req) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct talitos_ctx *ctx = crypto_aead_ctx(authenc); unsigned int authsize = ctx->authsize; struct talitos_private *priv = dev_get_drvdata(ctx->dev); - struct ipsec_esp_edesc *edesc; + struct talitos_edesc *edesc; struct scatterlist *sg; void *icvdata; @@ -1180,13 +1180,12 @@ static int aead_authenc_decrypt(struct aead_request *req) } } -static int aead_authenc_givencrypt( - struct aead_givcrypt_request *req) +static int aead_givencrypt(struct aead_givcrypt_request *req) { struct aead_request *areq = &req->areq; struct crypto_aead *authenc = crypto_aead_reqtfm(areq); struct talitos_ctx *ctx = crypto_aead_ctx(authenc); - struct ipsec_esp_edesc *edesc; + struct talitos_edesc *edesc; /* allocate extended descriptor */ edesc = ipsec_esp_edesc_alloc(areq, 0); @@ -1205,30 +1204,30 @@ static int aead_authenc_givencrypt( } struct talitos_alg_template { - char name[CRYPTO_MAX_ALG_NAME]; - char driver_name[CRYPTO_MAX_ALG_NAME]; - unsigned int blocksize; - struct aead_alg aead; - struct device *dev; + struct crypto_alg alg; __be32 desc_hdr_template; }; static struct talitos_alg_template driver_algs[] = { - /* single-pass ipsec_esp descriptor */ + /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */ { - .name = "authenc(hmac(sha1),cbc(aes))", - .driver_name = "authenc-hmac-sha1-cbc-aes-talitos", - .blocksize = AES_BLOCK_SIZE, - .aead = { - .setkey = aead_authenc_setkey, - .setauthsize = aead_authenc_setauthsize, - .encrypt = aead_authenc_encrypt, - .decrypt = aead_authenc_decrypt, - .givencrypt = aead_authenc_givencrypt, - .geniv = "", - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = SHA1_DIGEST_SIZE, - }, + .alg = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_type = &crypto_aead_type, + .cra_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "", + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + } + }, .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | DESC_HDR_SEL0_AESU | DESC_HDR_MODE0_AESU_CBC | @@ -1238,19 +1237,23 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_SHA1_HMAC, }, { - .name = "authenc(hmac(sha1),cbc(des3_ede))", - .driver_name = "authenc-hmac-sha1-cbc-3des-talitos", - .blocksize = DES3_EDE_BLOCK_SIZE, - .aead = { - .setkey = aead_authenc_setkey, - .setauthsize = aead_authenc_setauthsize, - .encrypt = aead_authenc_encrypt, - .decrypt = aead_authenc_decrypt, - .givencrypt = aead_authenc_givencrypt, - .geniv = "", - .ivsize = DES3_EDE_BLOCK_SIZE, - .maxauthsize = SHA1_DIGEST_SIZE, - }, + .alg = { + .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_type = &crypto_aead_type, + .cra_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "", + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + } + }, .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | DESC_HDR_SEL0_DEU | DESC_HDR_MODE0_DEU_CBC | @@ -1261,19 +1264,23 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_SHA1_HMAC, }, { - .name = "authenc(hmac(sha256),cbc(aes))", - .driver_name = "authenc-hmac-sha256-cbc-aes-talitos", - .blocksize = AES_BLOCK_SIZE, - .aead = { - .setkey = aead_authenc_setkey, - .setauthsize = aead_authenc_setauthsize, - .encrypt = aead_authenc_encrypt, - .decrypt = aead_authenc_decrypt, - .givencrypt = aead_authenc_givencrypt, - .geniv = "", - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = SHA256_DIGEST_SIZE, - }, + .alg = { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_type = &crypto_aead_type, + .cra_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "", + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + } + }, .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | DESC_HDR_SEL0_AESU | DESC_HDR_MODE0_AESU_CBC | @@ -1283,19 +1290,23 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_SHA256_HMAC, }, { - .name = "authenc(hmac(sha256),cbc(des3_ede))", - .driver_name = "authenc-hmac-sha256-cbc-3des-talitos", - .blocksize = DES3_EDE_BLOCK_SIZE, - .aead = { - .setkey = aead_authenc_setkey, - .setauthsize = aead_authenc_setauthsize, - .encrypt = aead_authenc_encrypt, - .decrypt = aead_authenc_decrypt, - .givencrypt = aead_authenc_givencrypt, - .geniv = "", - .ivsize = DES3_EDE_BLOCK_SIZE, - .maxauthsize = SHA256_DIGEST_SIZE, - }, + .alg = { + .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_type = &crypto_aead_type, + .cra_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "", + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + } + }, .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | DESC_HDR_SEL0_DEU | DESC_HDR_MODE0_DEU_CBC | @@ -1306,19 +1317,23 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_SHA256_HMAC, }, { - .name = "authenc(hmac(md5),cbc(aes))", - .driver_name = "authenc-hmac-md5-cbc-aes-talitos", - .blocksize = AES_BLOCK_SIZE, - .aead = { - .setkey = aead_authenc_setkey, - .setauthsize = aead_authenc_setauthsize, - .encrypt = aead_authenc_encrypt, - .decrypt = aead_authenc_decrypt, - .givencrypt = aead_authenc_givencrypt, - .geniv = "", - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = MD5_DIGEST_SIZE, - }, + .alg = { + .cra_name = "authenc(hmac(md5),cbc(aes))", + .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_type = &crypto_aead_type, + .cra_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "", + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + } + }, .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | DESC_HDR_SEL0_AESU | DESC_HDR_MODE0_AESU_CBC | @@ -1328,19 +1343,23 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_MD5_HMAC, }, { - .name = "authenc(hmac(md5),cbc(des3_ede))", - .driver_name = "authenc-hmac-md5-cbc-3des-talitos", - .blocksize = DES3_EDE_BLOCK_SIZE, - .aead = { - .setkey = aead_authenc_setkey, - .setauthsize = aead_authenc_setauthsize, - .encrypt = aead_authenc_encrypt, - .decrypt = aead_authenc_decrypt, - .givencrypt = aead_authenc_givencrypt, - .geniv = "", - .ivsize = DES3_EDE_BLOCK_SIZE, - .maxauthsize = MD5_DIGEST_SIZE, - }, + .alg = { + .cra_name = "authenc(hmac(md5),cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_type = &crypto_aead_type, + .cra_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "", + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + } + }, .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | DESC_HDR_SEL0_DEU | DESC_HDR_MODE0_DEU_CBC | @@ -1453,19 +1472,13 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, return ERR_PTR(-ENOMEM); alg = &t_alg->crypto_alg; + *alg = template->alg; - snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); - snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", - template->driver_name); alg->cra_module = THIS_MODULE; alg->cra_init = talitos_cra_init; alg->cra_priority = TALITOS_CRA_PRIORITY; - alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; - alg->cra_blocksize = template->blocksize; alg->cra_alignmask = 0; - alg->cra_type = &crypto_aead_type; alg->cra_ctxsize = sizeof(struct talitos_ctx); - alg->cra_u.aead = template->aead; t_alg->desc_hdr_template = template->desc_hdr_template; t_alg->dev = dev; -- cgit v1.2.3 From 4de9d0b547b97e40c93a885ac6246c2c5fef05cb Mon Sep 17 00:00:00 2001 From: Lee Nipper Date: Sun, 29 Mar 2009 15:52:32 +0800 Subject: crypto: talitos - Add ablkcipher algorithms Add these ablkcipher algorithms: cbc(aes), cbc(des3_ede). Added handling of chained scatterlists with zero length entry because eseqiv uses it. Added new map and unmap routines. Signed-off-by: Lee Nipper Signed-off-by: Kim Phillips Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 380 ++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 342 insertions(+), 38 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 9833961a247e..a0b0a6319088 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -44,6 +44,8 @@ #include #include #include +#include +#include #include "talitos.h" @@ -755,12 +757,62 @@ badkey: struct talitos_edesc { int src_nents; int dst_nents; + int src_is_chained; + int dst_is_chained; int dma_len; dma_addr_t dma_link_tbl; struct talitos_desc desc; struct talitos_ptr link_tbl[0]; }; +static int talitos_map_sg(struct device *dev, struct scatterlist *sg, + unsigned int nents, enum dma_data_direction dir, + int chained) +{ + if (unlikely(chained)) + while (sg) { + dma_map_sg(dev, sg, 1, dir); + sg = scatterwalk_sg_next(sg); + } + else + dma_map_sg(dev, sg, nents, dir); + return nents; +} + +static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg, + enum dma_data_direction dir) +{ + while (sg) { + dma_unmap_sg(dev, sg, 1, dir); + sg = scatterwalk_sg_next(sg); + } +} + +static void talitos_sg_unmap(struct device *dev, + struct talitos_edesc *edesc, + struct scatterlist *src, + struct scatterlist *dst) +{ + unsigned int src_nents = edesc->src_nents ? : 1; + unsigned int dst_nents = edesc->dst_nents ? : 1; + + if (src != dst) { + if (edesc->src_is_chained) + talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE); + else + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); + + if (edesc->dst_is_chained) + talitos_unmap_sg_chain(dev, dst, DMA_FROM_DEVICE); + else + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); + } else + if (edesc->src_is_chained) + talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL); + else + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); +} + static void ipsec_esp_unmap(struct device *dev, struct talitos_edesc *edesc, struct aead_request *areq) @@ -772,15 +824,7 @@ static void ipsec_esp_unmap(struct device *dev, dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE); - if (areq->src != areq->dst) { - dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1, - DMA_TO_DEVICE); - dma_unmap_sg(dev, areq->dst, edesc->dst_nents ? : 1, - DMA_FROM_DEVICE); - } else { - dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1, - DMA_BIDIRECTIONAL); - } + talitos_sg_unmap(dev, edesc, areq->src, areq->dst); if (edesc->dma_len) dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, @@ -886,7 +930,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, link_tbl_ptr->j_extent = 0; link_tbl_ptr++; cryptlen -= sg_dma_len(sg); - sg = sg_next(sg); + sg = scatterwalk_sg_next(sg); } /* adjust (decrease) last one (or two) entry's len to cryptlen */ @@ -952,12 +996,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, desc->ptr[4].len = cpu_to_be16(cryptlen); desc->ptr[4].j_extent = authsize; - if (areq->src == areq->dst) - sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1, - DMA_BIDIRECTIONAL); - else - sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1, - DMA_TO_DEVICE); + sg_count = talitos_map_sg(dev, areq->src, + edesc->src_nents ? : 1, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : + DMA_TO_DEVICE, + edesc->src_is_chained); if (sg_count == 1) { desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); @@ -986,8 +1029,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, desc->ptr[5].j_extent = authsize; if (areq->src != areq->dst) { - sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1, - DMA_FROM_DEVICE); + sg_count = talitos_map_sg(dev, areq->dst, + edesc->dst_nents ? : 1, + DMA_FROM_DEVICE, + edesc->dst_is_chained); } if (sg_count == 1) { @@ -1037,15 +1082,18 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, /* * derive number of elements in scatterlist */ -static int sg_count(struct scatterlist *sg_list, int nbytes) +static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained) { struct scatterlist *sg = sg_list; int sg_nents = 0; - while (nbytes) { + *chained = 0; + while (nbytes > 0) { sg_nents++; nbytes -= sg->length; - sg = sg_next(sg); + if (!sg_is_last(sg) && (sg + 1)->length == 0) + *chained = 1; + sg = scatterwalk_sg_next(sg); } return sg_nents; @@ -1054,28 +1102,32 @@ static int sg_count(struct scatterlist *sg_list, int nbytes) /* * allocate and map the extended descriptor */ -static struct talitos_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, - int icv_stashing) +static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, + struct scatterlist *src, + struct scatterlist *dst, + unsigned int cryptlen, + unsigned int authsize, + int icv_stashing, + u32 cryptoflags) { - struct crypto_aead *authenc = crypto_aead_reqtfm(areq); - struct talitos_ctx *ctx = crypto_aead_ctx(authenc); struct talitos_edesc *edesc; int src_nents, dst_nents, alloc_len, dma_len; - gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : + int src_chained, dst_chained = 0; + gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; - if (areq->cryptlen + ctx->authsize > TALITOS_MAX_DATA_LEN) { - dev_err(ctx->dev, "cryptlen exceeds h/w max limit\n"); + if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) { + dev_err(dev, "length exceeds h/w max limit\n"); return ERR_PTR(-EINVAL); } - src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize); + src_nents = sg_count(src, cryptlen + authsize, &src_chained); src_nents = (src_nents == 1) ? 0 : src_nents; - if (areq->dst == areq->src) { + if (dst == src) { dst_nents = src_nents; } else { - dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize); + dst_nents = sg_count(dst, cryptlen + authsize, &dst_chained); dst_nents = (dst_nents == 1) ? 0 : dst_nents; } @@ -1087,28 +1139,41 @@ static struct talitos_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, alloc_len = sizeof(struct talitos_edesc); if (src_nents || dst_nents) { dma_len = (src_nents + dst_nents + 2) * - sizeof(struct talitos_ptr) + ctx->authsize; + sizeof(struct talitos_ptr) + authsize; alloc_len += dma_len; } else { dma_len = 0; - alloc_len += icv_stashing ? ctx->authsize : 0; + alloc_len += icv_stashing ? authsize : 0; } edesc = kmalloc(alloc_len, GFP_DMA | flags); if (!edesc) { - dev_err(ctx->dev, "could not allocate edescriptor\n"); + dev_err(dev, "could not allocate edescriptor\n"); return ERR_PTR(-ENOMEM); } edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; + edesc->src_is_chained = src_chained; + edesc->dst_is_chained = dst_chained; edesc->dma_len = dma_len; - edesc->dma_link_tbl = dma_map_single(ctx->dev, &edesc->link_tbl[0], + edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], edesc->dma_len, DMA_BIDIRECTIONAL); return edesc; } +static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, + int icv_stashing) +{ + struct crypto_aead *authenc = crypto_aead_reqtfm(areq); + struct talitos_ctx *ctx = crypto_aead_ctx(authenc); + + return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, + areq->cryptlen, ctx->authsize, icv_stashing, + areq->base.flags); +} + static int aead_encrypt(struct aead_request *req) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); @@ -1116,7 +1181,7 @@ static int aead_encrypt(struct aead_request *req) struct talitos_edesc *edesc; /* allocate extended descriptor */ - edesc = ipsec_esp_edesc_alloc(req, 0); + edesc = aead_edesc_alloc(req, 0); if (IS_ERR(edesc)) return PTR_ERR(edesc); @@ -1141,7 +1206,7 @@ static int aead_decrypt(struct aead_request *req) req->cryptlen -= authsize; /* allocate extended descriptor */ - edesc = ipsec_esp_edesc_alloc(req, 1); + edesc = aead_edesc_alloc(req, 1); if (IS_ERR(edesc)) return PTR_ERR(edesc); @@ -1188,7 +1253,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *req) struct talitos_edesc *edesc; /* allocate extended descriptor */ - edesc = ipsec_esp_edesc_alloc(areq, 0); + edesc = aead_edesc_alloc(areq, 0); if (IS_ERR(edesc)) return PTR_ERR(edesc); @@ -1203,6 +1268,199 @@ static int aead_givencrypt(struct aead_givcrypt_request *req) ipsec_esp_encrypt_done); } +static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, + const u8 *key, unsigned int keylen) +{ + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); + struct ablkcipher_alg *alg = crypto_ablkcipher_alg(cipher); + + if (keylen > TALITOS_MAX_KEY_SIZE) + goto badkey; + + if (keylen < alg->min_keysize || keylen > alg->max_keysize) + goto badkey; + + memcpy(&ctx->key, key, keylen); + ctx->keylen = keylen; + + return 0; + +badkey: + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; +} + +static void common_nonsnoop_unmap(struct device *dev, + struct talitos_edesc *edesc, + struct ablkcipher_request *areq) +{ + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); + + talitos_sg_unmap(dev, edesc, areq->src, areq->dst); + + if (edesc->dma_len) + dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, + DMA_BIDIRECTIONAL); +} + +static void ablkcipher_done(struct device *dev, + struct talitos_desc *desc, void *context, + int err) +{ + struct ablkcipher_request *areq = context; + struct talitos_edesc *edesc = + container_of(desc, struct talitos_edesc, desc); + + common_nonsnoop_unmap(dev, edesc, areq); + + kfree(edesc); + + areq->base.complete(&areq->base, err); +} + +static int common_nonsnoop(struct talitos_edesc *edesc, + struct ablkcipher_request *areq, + u8 *giv, + void (*callback) (struct device *dev, + struct talitos_desc *desc, + void *context, int error)) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); + struct device *dev = ctx->dev; + struct talitos_desc *desc = &edesc->desc; + unsigned int cryptlen = areq->nbytes; + unsigned int ivsize; + int sg_count, ret; + + /* first DWORD empty */ + desc->ptr[0].len = 0; + desc->ptr[0].ptr = 0; + desc->ptr[0].j_extent = 0; + + /* cipher iv */ + ivsize = crypto_ablkcipher_ivsize(cipher); + map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, giv ?: areq->info, 0, + DMA_TO_DEVICE); + + /* cipher key */ + map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, + (char *)&ctx->key, 0, DMA_TO_DEVICE); + + /* + * cipher in + */ + desc->ptr[3].len = cpu_to_be16(cryptlen); + desc->ptr[3].j_extent = 0; + + sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL + : DMA_TO_DEVICE, + edesc->src_is_chained); + + if (sg_count == 1) { + desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src)); + } else { + sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, + &edesc->link_tbl[0]); + if (sg_count > 1) { + desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; + desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl); + dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, + edesc->dma_len, DMA_BIDIRECTIONAL); + } else { + /* Only one segment now, so no link tbl needed */ + desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src)); + } + } + + /* cipher out */ + desc->ptr[4].len = cpu_to_be16(cryptlen); + desc->ptr[4].j_extent = 0; + + if (areq->src != areq->dst) + sg_count = talitos_map_sg(dev, areq->dst, + edesc->dst_nents ? : 1, + DMA_FROM_DEVICE, + edesc->dst_is_chained); + + if (sg_count == 1) { + desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->dst)); + } else { + struct talitos_ptr *link_tbl_ptr = + &edesc->link_tbl[edesc->src_nents + 1]; + + desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; + desc->ptr[4].ptr = cpu_to_be32((struct talitos_ptr *) + edesc->dma_link_tbl + + edesc->src_nents + 1); + sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, + link_tbl_ptr); + dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, + edesc->dma_len, DMA_BIDIRECTIONAL); + } + + /* iv out */ + map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0, + DMA_FROM_DEVICE); + + /* last DWORD empty */ + desc->ptr[6].len = 0; + desc->ptr[6].ptr = 0; + desc->ptr[6].j_extent = 0; + + ret = talitos_submit(dev, desc, callback, areq); + if (ret != -EINPROGRESS) { + common_nonsnoop_unmap(dev, edesc, areq); + kfree(edesc); + } + return ret; +} + +static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); + + return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, areq->nbytes, + 0, 0, areq->base.flags); +} + +static int ablkcipher_encrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); + struct talitos_edesc *edesc; + + /* allocate extended descriptor */ + edesc = ablkcipher_edesc_alloc(areq); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + /* set encrypt */ + edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; + + return common_nonsnoop(edesc, areq, NULL, ablkcipher_done); +} + +static int ablkcipher_decrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); + struct talitos_edesc *edesc; + + /* allocate extended descriptor */ + edesc = ablkcipher_edesc_alloc(areq); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; + + return common_nonsnoop(edesc, areq, NULL, ablkcipher_done); +} + struct talitos_alg_template { struct crypto_alg alg; __be32 desc_hdr_template; @@ -1368,6 +1626,52 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_INIT | DESC_HDR_MODE1_MDEU_PAD | DESC_HDR_MODE1_MDEU_MD5_HMAC, + }, + /* ABLKCIPHER algorithms. */ + { + .alg = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_type = &crypto_ablkcipher_type, + .cra_ablkcipher = { + .setkey = ablkcipher_setkey, + .encrypt = ablkcipher_encrypt, + .decrypt = ablkcipher_decrypt, + .geniv = "eseqiv", + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_AESU | + DESC_HDR_MODE0_AESU_CBC, + }, + { + .alg = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_type = &crypto_ablkcipher_type, + .cra_ablkcipher = { + .setkey = ablkcipher_setkey, + .encrypt = ablkcipher_encrypt, + .decrypt = ablkcipher_decrypt, + .geniv = "eseqiv", + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_DEU | + DESC_HDR_MODE0_DEU_CBC | + DESC_HDR_MODE0_DEU_3DES, } }; -- cgit v1.2.3 From e938e4656b3ee32e046ee8293411a07be9d72eb8 Mon Sep 17 00:00:00 2001 From: Kim Phillips Date: Sun, 29 Mar 2009 15:53:23 +0800 Subject: crypto: talitos - Whitespace/codingstyle/overrun lines cleanup no functional changes. Signed-off-by: Kim Phillips Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 95 ++++++++++++++++++++++++------------------------ 1 file changed, 48 insertions(+), 47 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index a0b0a6319088..a073e6b6a3c8 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -341,7 +341,8 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) status = error; dma_unmap_single(dev, request->dma_desc, - sizeof(struct talitos_desc), DMA_BIDIRECTIONAL); + sizeof(struct talitos_desc), + DMA_BIDIRECTIONAL); /* copy entries so we can call callback outside lock */ saved_req.desc = request->desc; @@ -415,7 +416,8 @@ static struct talitos_desc *current_desc(struct device *dev, int ch) /* * user diagnostics; report root cause of error based on execution unit status */ -static void report_eu_error(struct device *dev, int ch, struct talitos_desc *desc) +static void report_eu_error(struct device *dev, int ch, + struct talitos_desc *desc) { struct talitos_private *priv = dev_get_drvdata(dev); int i; @@ -863,8 +865,8 @@ static void ipsec_esp_encrypt_done(struct device *dev, } static void ipsec_esp_decrypt_swauth_done(struct device *dev, - struct talitos_desc *desc, void *context, - int err) + struct talitos_desc *desc, + void *context, int err) { struct aead_request *req = context; struct talitos_edesc *edesc = @@ -895,8 +897,8 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev, } static void ipsec_esp_decrypt_hwauth_done(struct device *dev, - struct talitos_desc *desc, void *context, - int err) + struct talitos_desc *desc, + void *context, int err) { struct aead_request *req = context; struct talitos_edesc *edesc = @@ -905,10 +907,9 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev, ipsec_esp_unmap(dev, edesc, req); /* check ICV auth status */ - if (!err) - if ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != - DESC_HDR_LO_ICCR1_PASS) - err = -EBADMSG; + if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != + DESC_HDR_LO_ICCR1_PASS)) + err = -EBADMSG; kfree(edesc); @@ -996,10 +997,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, desc->ptr[4].len = cpu_to_be16(cryptlen); desc->ptr[4].j_extent = authsize; - sg_count = talitos_map_sg(dev, areq->src, - edesc->src_nents ? : 1, - (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : - DMA_TO_DEVICE, + sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL + : DMA_TO_DEVICE, edesc->src_is_chained); if (sg_count == 1) { @@ -1008,19 +1008,21 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, sg_link_tbl_len = cryptlen; if ((edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) && - (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) { + (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) sg_link_tbl_len = cryptlen + authsize; - } + sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len, &edesc->link_tbl[0]); if (sg_count > 1) { desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); - dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, - edesc->dma_len, DMA_BIDIRECTIONAL); + dma_sync_single_for_device(dev, edesc->dma_link_tbl, + edesc->dma_len, + DMA_BIDIRECTIONAL); } else { /* Only one segment now, so no link tbl needed */ - desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); + desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq-> + src)); } } @@ -1028,12 +1030,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, desc->ptr[5].len = cpu_to_be16(cryptlen); desc->ptr[5].j_extent = authsize; - if (areq->src != areq->dst) { + if (areq->src != areq->dst) sg_count = talitos_map_sg(dev, areq->dst, edesc->dst_nents ? : 1, DMA_FROM_DEVICE, edesc->dst_is_chained); - } if (sg_count == 1) { desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); @@ -1078,7 +1079,6 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, return ret; } - /* * derive number of elements in scatterlist */ @@ -1191,8 +1191,6 @@ static int aead_encrypt(struct aead_request *req) return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done); } - - static int aead_decrypt(struct aead_request *req) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); @@ -1211,38 +1209,38 @@ static int aead_decrypt(struct aead_request *req) return PTR_ERR(edesc); if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) && - (((!edesc->src_nents && !edesc->dst_nents) || - priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT))) { + ((!edesc->src_nents && !edesc->dst_nents) || + priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) { /* decrypt and check the ICV */ - edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND | + edesc->desc.hdr = ctx->desc_hdr_template | + DESC_HDR_DIR_INBOUND | DESC_HDR_MODE1_MDEU_CICV; /* reset integrity check result bits */ edesc->desc.hdr_lo = 0; - return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_hwauth_done); + return ipsec_esp(edesc, req, NULL, 0, + ipsec_esp_decrypt_hwauth_done); - } else { - - /* Have to check the ICV with software */ + } - edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; + /* Have to check the ICV with software */ + edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; - /* stash incoming ICV for later cmp with ICV generated by the h/w */ - if (edesc->dma_len) - icvdata = &edesc->link_tbl[edesc->src_nents + - edesc->dst_nents + 2]; - else - icvdata = &edesc->link_tbl[0]; + /* stash incoming ICV for later cmp with ICV generated by the h/w */ + if (edesc->dma_len) + icvdata = &edesc->link_tbl[edesc->src_nents + + edesc->dst_nents + 2]; + else + icvdata = &edesc->link_tbl[0]; - sg = sg_last(req->src, edesc->src_nents ? : 1); + sg = sg_last(req->src, edesc->src_nents ? : 1); - memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize, - ctx->authsize); + memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize, + ctx->authsize); - return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done); - } + return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done); } static int aead_givencrypt(struct aead_givcrypt_request *req) @@ -1368,11 +1366,13 @@ static int common_nonsnoop(struct talitos_edesc *edesc, if (sg_count > 1) { desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl); - dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, - edesc->dma_len, DMA_BIDIRECTIONAL); + dma_sync_single_for_device(dev, edesc->dma_link_tbl, + edesc->dma_len, + DMA_BIDIRECTIONAL); } else { /* Only one segment now, so no link tbl needed */ - desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src)); + desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq-> + src)); } } @@ -1419,7 +1419,8 @@ static int common_nonsnoop(struct talitos_edesc *edesc, return ret; } -static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *areq) +static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * + areq) { struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); -- cgit v1.2.3 From 19bbbc635523703ece28409e59694d5b512b819e Mon Sep 17 00:00:00 2001 From: Kim Phillips Date: Sun, 29 Mar 2009 15:53:59 +0800 Subject: crypto: talitos - containerof related codingstyle no functional changes. Signed-off-by: Kim Phillips Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index a073e6b6a3c8..1cc1c411e551 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -841,13 +841,14 @@ static void ipsec_esp_encrypt_done(struct device *dev, int err) { struct aead_request *areq = context; - struct talitos_edesc *edesc = - container_of(desc, struct talitos_edesc, desc); struct crypto_aead *authenc = crypto_aead_reqtfm(areq); struct talitos_ctx *ctx = crypto_aead_ctx(authenc); + struct talitos_edesc *edesc; struct scatterlist *sg; void *icvdata; + edesc = container_of(desc, struct talitos_edesc, desc); + ipsec_esp_unmap(dev, edesc, areq); /* copy the generated ICV to dst */ @@ -869,13 +870,14 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev, void *context, int err) { struct aead_request *req = context; - struct talitos_edesc *edesc = - container_of(desc, struct talitos_edesc, desc); struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct talitos_ctx *ctx = crypto_aead_ctx(authenc); + struct talitos_edesc *edesc; struct scatterlist *sg; void *icvdata; + edesc = container_of(desc, struct talitos_edesc, desc); + ipsec_esp_unmap(dev, edesc, req); if (!err) { @@ -901,8 +903,9 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev, void *context, int err) { struct aead_request *req = context; - struct talitos_edesc *edesc = - container_of(desc, struct talitos_edesc, desc); + struct talitos_edesc *edesc; + + edesc = container_of(desc, struct talitos_edesc, desc); ipsec_esp_unmap(dev, edesc, req); @@ -1308,8 +1311,9 @@ static void ablkcipher_done(struct device *dev, int err) { struct ablkcipher_request *areq = context; - struct talitos_edesc *edesc = - container_of(desc, struct talitos_edesc, desc); + struct talitos_edesc *edesc; + + edesc = container_of(desc, struct talitos_edesc, desc); common_nonsnoop_unmap(dev, edesc, areq); @@ -1686,12 +1690,14 @@ struct talitos_crypto_alg { static int talitos_cra_init(struct crypto_tfm *tfm) { struct crypto_alg *alg = tfm->__crt_alg; - struct talitos_crypto_alg *talitos_alg = - container_of(alg, struct talitos_crypto_alg, crypto_alg); + struct talitos_crypto_alg *talitos_alg; struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); + talitos_alg = container_of(alg, struct talitos_crypto_alg, crypto_alg); + /* update context with ptr to dev */ ctx->dev = talitos_alg->dev; + /* copy descriptor header template value */ ctx->desc_hdr_template = talitos_alg->desc_hdr_template; -- cgit v1.2.3 From 962a9c99496f98041d14d64a9fdcf58050fefb4d Mon Sep 17 00:00:00 2001 From: Kim Phillips Date: Sun, 29 Mar 2009 15:54:30 +0800 Subject: crypto: talitos - Avoid unnecessary decrypt check the ICV check bit only gets set in decrypt entry points Signed-off-by: Kim Phillips Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 1cc1c411e551..c70775fd3ce2 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1010,8 +1010,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, } else { sg_link_tbl_len = cryptlen; - if ((edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) && - (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) + if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) sg_link_tbl_len = cryptlen + authsize; sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len, -- cgit v1.2.3 From d1c8b0a7692e81b46550bcc493465ed10510cd33 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 21 Apr 2009 14:14:37 +0800 Subject: crypto: padlock - Enable on x86_64 Almost everything stays the same, we need just to use the extended registers on the bit variant. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 2 +- drivers/crypto/padlock-aes.c | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 01afd758072f..39eedd431413 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -12,7 +12,7 @@ if CRYPTO_HW config CRYPTO_DEV_PADLOCK tristate "Support for VIA PadLock ACE" - depends on X86_32 && !UML + depends on !UML select CRYPTO_ALGAPI help Some VIA processors come with an integrated crypto engine diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 856b3cc25583..87f92c39b5f0 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -154,7 +154,11 @@ static inline void padlock_reset_key(struct cword *cword) int cpu = raw_smp_processor_id(); if (cword != per_cpu(last_cword, cpu)) +#ifndef CONFIG_X86_64 asm volatile ("pushfl; popfl"); +#else + asm volatile ("pushfq; popfq"); +#endif } static inline void padlock_store_cword(struct cword *cword) @@ -208,10 +212,19 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, asm volatile ("test $1, %%cl;" "je 1f;" +#ifndef CONFIG_X86_64 "lea -1(%%ecx), %%eax;" "mov $1, %%ecx;" +#else + "lea -1(%%rcx), %%rax;" + "mov $1, %%rcx;" +#endif ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */ +#ifndef CONFIG_X86_64 "mov %%eax, %%ecx;" +#else + "mov %%rax, %%rcx;" +#endif "1:" ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ : "+S"(input), "+D"(output) -- cgit v1.2.3 From 2f8174187f409213e63c3589af163c627e8a182a Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Apr 2009 13:00:15 +0800 Subject: crypto: padlock - Restore dependency on x86 When we added 64-bit support to padlock the dependency on x86 was lost. This causes build failures on non-x86 architectures. Reported-by: Stephen Rothwell Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 39eedd431413..e748e55bd86b 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -12,7 +12,7 @@ if CRYPTO_HW config CRYPTO_DEV_PADLOCK tristate "Support for VIA PadLock ACE" - depends on !UML + depends on X86 && !UML select CRYPTO_ALGAPI help Some VIA processors come with an integrated crypto engine -- cgit v1.2.3 From 608d1cd5d375580a49d01b5ed1f9944f5141ae19 Mon Sep 17 00:00:00 2001 From: Harald Welte Date: Fri, 15 May 2009 15:57:35 +1000 Subject: hwrng: via_rng - The VIA Hardware RNG driver is for the CPU, not Chipset This is a cosmetic change, fixing the MODULE_DESCRIPTION() of via-rng.c Signed-off-by: Harald Welte Signed-off-by: Herbert Xu --- drivers/char/hw_random/via-rng.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index 4e9573c1d39e..02ee63906713 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -205,5 +205,5 @@ static void __exit mod_exit(void) module_init(mod_init); module_exit(mod_exit); -MODULE_DESCRIPTION("H/W RNG driver for VIA chipsets"); +MODULE_DESCRIPTION("H/W RNG driver for VIA CPU with PadLock"); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 858576bdc5d65edf1fffd2e65b2165ec1dc68486 Mon Sep 17 00:00:00 2001 From: Harald Welte Date: Fri, 15 May 2009 16:00:32 +1000 Subject: hwrng: via_rng - Support VIA Nano hardware RNG The VIA Nano CPU supports the same XSTORE instruction based RNG, but it lacks the MSR present in earlier CPUs. Signed-off-by: Harald Welte Signed-off-by: Herbert Xu --- drivers/char/hw_random/via-rng.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'drivers') diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index 02ee63906713..794aacb715c1 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -132,6 +132,19 @@ static int via_rng_init(struct hwrng *rng) struct cpuinfo_x86 *c = &cpu_data(0); u32 lo, hi, old_lo; + /* VIA Nano CPUs don't have the MSR_VIA_RNG anymore. The RNG + * is always enabled if CPUID rng_en is set. There is no + * RNG configuration like it used to be the case in this + * register */ + if ((c->x86 == 6) && (c->x86_model >= 0x0f)) { + if (!cpu_has_xstore_enabled) { + printk(KERN_ERR PFX "can't enable hardware RNG " + "if XSTORE is not enabled\n"); + return -ENODEV; + } + return 0; + } + /* Control the RNG via MSR. Tread lightly and pay very close * close attention to values written, as the reserved fields * are documented to be "undefined and unpredictable"; but it -- cgit v1.2.3 From e9736c16da9077728802f42393d18258e6685428 Mon Sep 17 00:00:00 2001 From: Harald Welte Date: Fri, 15 May 2009 16:01:52 +1000 Subject: hwrng: via_rng - Support VIA Nano hardware RNG on X86_64 builds Fix Kconfig to build via-rng.ko on X86_64 builds, as the VIA Nano CPU supports x86_64, too. Signed-off-by: Harald Welte Signed-off-by: Herbert Xu --- drivers/char/hw_random/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 5fab6470f4b2..9c00440dcf86 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -88,7 +88,7 @@ config HW_RANDOM_N2RNG config HW_RANDOM_VIA tristate "VIA HW Random Number Generator support" - depends on HW_RANDOM && X86_32 + depends on HW_RANDOM && X86 default HW_RANDOM ---help--- This driver provides kernel-side support for the Random Number -- cgit v1.2.3 From f3d8fe40498eea9f45be260bdf6ccada845411f3 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Wed, 27 May 2009 15:16:21 +1000 Subject: crypto: hifn_795x - fix __dev{init,exit} markings The remove member of the pci_driver hifn_pci_driver uses __devexit_p(), so the remove function itself should be marked with __devexit. And where there be __devexit on the remove, so is there __devinit on the probe. Similarly, the module_init/module_exit functions should be declared with plain __init/__exit markings, not the hotplug __dev{init,exit} ones. Signed-off-by: Mike Frysinger Acked-by: Evgeniy Polyakov CC: Patrick McHardy Signed-off-by: Herbert Xu --- drivers/crypto/hifn_795x.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 2bef086fb342..5f753fc08730 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -2564,7 +2564,7 @@ static void hifn_tasklet_callback(unsigned long data) hifn_process_queue(dev); } -static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) +static int __devinit hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int err, i; struct hifn_device *dev; @@ -2696,7 +2696,7 @@ err_out_disable_pci_device: return err; } -static void hifn_remove(struct pci_dev *pdev) +static void __devexit hifn_remove(struct pci_dev *pdev) { int i; struct hifn_device *dev; @@ -2744,7 +2744,7 @@ static struct pci_driver hifn_pci_driver = { .remove = __devexit_p(hifn_remove), }; -static int __devinit hifn_init(void) +static int __init hifn_init(void) { unsigned int freq; int err; @@ -2789,7 +2789,7 @@ static int __devinit hifn_init(void) return 0; } -static void __devexit hifn_fini(void) +static void __exit hifn_fini(void) { pci_unregister_driver(&hifn_pci_driver); -- cgit v1.2.3 From 08ced854fc4a979d9e59ba01000bf96e7057cfbc Mon Sep 17 00:00:00 2001 From: Alexander Clouter Date: Wed, 3 Jun 2009 19:28:03 +1000 Subject: hwrng: timeriomem - Fix potential oops (request_mem_region/__devinit) Fixed oops when calling device_unregister followed by device_register (changing __init to __devinit) and removed request_mem_region() as platform_device_register already does this which can result in EBUSY Signed-off-by: Alexander Clouter Signed-off-by: Herbert Xu --- drivers/char/hw_random/timeriomem-rng.c | 26 ++++++-------------------- 1 file changed, 6 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c index dcd352ad0e7f..a94e930575f2 100644 --- a/drivers/char/hw_random/timeriomem-rng.c +++ b/drivers/char/hw_random/timeriomem-rng.c @@ -88,9 +88,9 @@ static struct hwrng timeriomem_rng_ops = { .priv = 0, }; -static int __init timeriomem_rng_probe(struct platform_device *pdev) +static int __devinit timeriomem_rng_probe(struct platform_device *pdev) { - struct resource *res, *mem; + struct resource *res; int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -98,21 +98,12 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev) if (!res) return -ENOENT; - mem = request_mem_region(res->start, res->end - res->start + 1, - pdev->name); - if (mem == NULL) - return -EBUSY; - - dev_set_drvdata(&pdev->dev, mem); - timeriomem_rng_data = pdev->dev.platform_data; timeriomem_rng_data->address = ioremap(res->start, res->end - res->start + 1); - if (!timeriomem_rng_data->address) { - ret = -ENOMEM; - goto err_ioremap; - } + if (!timeriomem_rng_data->address) + return -EIO; if (timeriomem_rng_data->period != 0 && usecs_to_jiffies(timeriomem_rng_data->period) > 0) { @@ -125,7 +116,7 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev) ret = hwrng_register(&timeriomem_rng_ops); if (ret) - goto err_register; + goto failed; dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n", timeriomem_rng_data->address, @@ -133,24 +124,19 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev) return 0; -err_register: +failed: dev_err(&pdev->dev, "problem registering\n"); iounmap(timeriomem_rng_data->address); -err_ioremap: - release_resource(mem); return ret; } static int __devexit timeriomem_rng_remove(struct platform_device *pdev) { - struct resource *mem = dev_get_drvdata(&pdev->dev); - del_timer_sync(&timeriomem_rng_timer); hwrng_unregister(&timeriomem_rng_ops); iounmap(timeriomem_rng_data->address); - release_resource(mem); return 0; } -- cgit v1.2.3 From 25a52393270ca48c7d0848672ad4423313033c3d Mon Sep 17 00:00:00 2001 From: Joachim Fenkes Date: Wed, 3 Jun 2009 13:25:42 -0700 Subject: IB/ehca: Remove superfluous bitmasks from QP control block All the fields in the control block are nicely right-aligned, so no masking is necessary. Signed-off-by: Joachim Fenkes Signed-off-by: Roland Dreier --- drivers/infiniband/hw/ehca/ehca_classes_pSeries.h | 28 ----------------------- drivers/infiniband/hw/ehca/ehca_qp.c | 18 ++++----------- 2 files changed, 5 insertions(+), 41 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h index 1798e6466bd0..689c35786dd2 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h +++ b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h @@ -165,7 +165,6 @@ struct hcp_modify_qp_control_block { #define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7) #define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8) #define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9) -#define MQPCB_QP_STATE EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11) #define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12) #define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13) @@ -176,60 +175,33 @@ struct hcp_modify_qp_control_block { #define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18) #define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19) #define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20) -#define MQPCB_PATH_MTU EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21) -#define MQPCB_MAX_STATIC_RATE EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22) -#define MQPCB_DLID EHCA_BMASK_IBM(16, 31) #define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23) -#define MQPCB_RNR_RETRY_COUNT EHCA_BMASK_IBM(29, 31) #define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24) -#define MQPCB_SOURCE_PATH_BITS EHCA_BMASK_IBM(25, 31) #define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25) -#define MQPCB_TRAFFIC_CLASS EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26) -#define MQPCB_HOP_LIMIT EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27) -#define MQPCB_SOURCE_GID_IDX EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28) -#define MQPCB_FLOW_LABEL EHCA_BMASK_IBM(12, 31) #define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30) #define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31) -#define MQPCB_SERVICE_LEVEL_AL EHCA_BMASK_IBM(28, 31) #define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32) -#define MQPCB_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(31, 31) #define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33) -#define MQPCB_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31) #define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34) -#define MQPCB_TIMEOUT_AL EHCA_BMASK_IBM(27, 31) #define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35) -#define MQPCB_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36) -#define MQPCB_DLID_AL EHCA_BMASK_IBM(16, 31) #define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37) -#define MQPCB_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31) #define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38) -#define MQPCB_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(25, 31) #define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39) -#define MQPCB_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40) -#define MQPCB_HOP_LIMIT_AL EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41) -#define MQPCB_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42) -#define MQPCB_FLOW_LABEL_AL EHCA_BMASK_IBM(12, 31) #define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44) #define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45) -#define MQPCB_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31) #define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46) -#define MQPCB_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31) #define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47) -#define MQPCB_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(31, 31) -#define MQPCB_QP_NUMBER EHCA_BMASK_IBM( 8, 31) #define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48) -#define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31, 31) #define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49) -#define MQPCB_CURR_SRQ_LIMIT EHCA_BMASK_IBM(16, 31) #define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50) #define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51) diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index ead4e718c082..0338f1fabe8a 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c @@ -1962,19 +1962,13 @@ int ehca_query_qp(struct ib_qp *qp, qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size; qp_attr->dest_qp_num = qpcb->dest_qp_nr; - qp_attr->pkey_index = - EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->prim_p_key_idx); - - qp_attr->port_num = - EHCA_BMASK_GET(MQPCB_PRIM_PHYS_PORT, qpcb->prim_phys_port); - + qp_attr->pkey_index = qpcb->prim_p_key_idx; + qp_attr->port_num = qpcb->prim_phys_port; qp_attr->timeout = qpcb->timeout; qp_attr->retry_cnt = qpcb->retry_count; qp_attr->rnr_retry = qpcb->rnr_retry_count; - qp_attr->alt_pkey_index = - EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->alt_p_key_idx); - + qp_attr->alt_pkey_index = qpcb->alt_p_key_idx; qp_attr->alt_port_num = qpcb->alt_phys_port; qp_attr->alt_timeout = qpcb->timeout_al; @@ -2061,8 +2055,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, update_mask |= EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1) | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1); - mqpcb->curr_srq_limit = - EHCA_BMASK_SET(MQPCB_CURR_SRQ_LIMIT, attr->srq_limit); + mqpcb->curr_srq_limit = attr->srq_limit; mqpcb->qp_aff_asyn_ev_log_reg = EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1); } @@ -2125,8 +2118,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr) srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1; srq_attr->max_sge = 3; - srq_attr->srq_limit = EHCA_BMASK_GET( - MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit); + srq_attr->srq_limit = qpcb->curr_srq_limit; if (ehca_debug_level >= 2) ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); -- cgit v1.2.3 From 1fd1c624362819ecc36db2458c6a972c48ae92d6 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 3 Jun 2009 07:26:58 +0000 Subject: drm/i915: Save/restore cursor state on suspend/resume. This may fix cursor corruption in X on resume, which would persist until the cursor was hidden and then shown again. V2: Also include the cursor control regs. Signed-off-by: Eric Anholt Reviewed-by: Jesse Barnes --- drivers/gpu/drm/i915/i915_drv.h | 8 ++++++++ drivers/gpu/drm/i915/i915_suspend.c | 20 ++++++++++++++++++++ 2 files changed, 28 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c431fa54bbb5..fcaa5444daa0 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -285,6 +285,13 @@ typedef struct drm_i915_private { u8 saveDACMASK; u8 saveCR[37]; uint64_t saveFENCE[16]; + u32 saveCURACNTR; + u32 saveCURAPOS; + u32 saveCURABASE; + u32 saveCURBCNTR; + u32 saveCURBPOS; + u32 saveCURBBASE; + u32 saveCURSIZE; struct { struct drm_mm gtt_space; @@ -642,6 +649,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, void i915_gem_free_all_phys_object(struct drm_device *dev); int i915_gem_object_get_pages(struct drm_gem_object *obj); void i915_gem_object_put_pages(struct drm_gem_object *obj); +void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); /* i915_gem_tiling.c */ void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index ce8a21344a71..a98e2831ed31 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -295,6 +295,16 @@ int i915_save_state(struct drm_device *dev) i915_save_palette(dev, PIPE_B); dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); + /* Cursor state */ + dev_priv->saveCURACNTR = I915_READ(CURACNTR); + dev_priv->saveCURAPOS = I915_READ(CURAPOS); + dev_priv->saveCURABASE = I915_READ(CURABASE); + dev_priv->saveCURBCNTR = I915_READ(CURBCNTR); + dev_priv->saveCURBPOS = I915_READ(CURBPOS); + dev_priv->saveCURBBASE = I915_READ(CURBBASE); + if (!IS_I9XX(dev)) + dev_priv->saveCURSIZE = I915_READ(CURSIZE); + /* CRT state */ dev_priv->saveADPA = I915_READ(ADPA); @@ -480,6 +490,16 @@ int i915_restore_state(struct drm_device *dev) I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); + /* Cursor state */ + I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); + I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); + I915_WRITE(CURABASE, dev_priv->saveCURABASE); + I915_WRITE(CURBPOS, dev_priv->saveCURBPOS); + I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR); + I915_WRITE(CURBBASE, dev_priv->saveCURBBASE); + if (!IS_I9XX(dev)) + I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); + /* CRT state */ I915_WRITE(ADPA, dev_priv->saveADPA); -- cgit v1.2.3 From b962442e46a9340bdbc6711982c59ff0cc2b5afb Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 3 Jun 2009 07:27:35 +0000 Subject: drm/i915: Change GEM throttling to be 20ms like the comment says. keithp didn't like the original 20ms plan because a cooperative client could be starved by an uncooperative client. There may even have been problems with cooperative clients versus cooperative clients. So keithp changed throttle to just wait for the second to last seqno emitted by that client. It worked well, until we started getting more round-trips to the server due to DRI2 -- the server throttles in BlockHandler, and so if you did more than one round trip after finishing your frame, you'd end up unintentionally syncing to the swap. Fix this by keeping track of the client's requests, so the client can wait when it has an outstanding request over 20ms old. This should have non-starving behavior, good behavior in the presence of restarts, and less waiting. Improves high-settings openarena performance on my GM45 by 50%. Signed-off-by: Eric Anholt Reviewed-by: Jesse Barnes --- drivers/gpu/drm/i915/i915_dma.c | 4 +-- drivers/gpu/drm/i915/i915_drv.h | 7 +++-- drivers/gpu/drm/i915/i915_gem.c | 69 ++++++++++++++++++++++++++++++++--------- 3 files changed, 61 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 53d544552625..0c222c28b8c1 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1273,8 +1273,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) file_priv->driver_priv = i915_file_priv; - i915_file_priv->mm.last_gem_seqno = 0; - i915_file_priv->mm.last_gem_throttle_seqno = 0; + INIT_LIST_HEAD(&i915_file_priv->mm.request_list); return 0; } @@ -1311,6 +1310,7 @@ void i915_driver_lastclose(struct drm_device * dev) void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; + i915_gem_release(dev, file_priv); if (!drm_core_check_feature(dev, DRIVER_MODESET)) i915_mem_release(dev, file_priv, dev_priv->agp_heap); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index fcaa5444daa0..e0fac5f62c69 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -498,13 +498,16 @@ struct drm_i915_gem_request { /** Time at which this request was emitted, in jiffies. */ unsigned long emitted_jiffies; + /** global list entry for this request */ struct list_head list; + + /** file_priv list entry for this request */ + struct list_head client_list; }; struct drm_i915_file_private { struct { - uint32_t last_gem_seqno; - uint32_t last_gem_throttle_seqno; + struct list_head request_list; } mm; }; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 39f5c658ef5e..3fbd8a0c40d1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1481,14 +1481,19 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) * Returned sequence numbers are nonzero on success. */ static uint32_t -i915_add_request(struct drm_device *dev, uint32_t flush_domains) +i915_add_request(struct drm_device *dev, struct drm_file *file_priv, + uint32_t flush_domains) { drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_file_private *i915_file_priv = NULL; struct drm_i915_gem_request *request; uint32_t seqno; int was_empty; RING_LOCALS; + if (file_priv != NULL) + i915_file_priv = file_priv->driver_priv; + request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER); if (request == NULL) return 0; @@ -1515,6 +1520,12 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains) request->emitted_jiffies = jiffies; was_empty = list_empty(&dev_priv->mm.request_list); list_add_tail(&request->list, &dev_priv->mm.request_list); + if (i915_file_priv) { + list_add_tail(&request->client_list, + &i915_file_priv->mm.request_list); + } else { + INIT_LIST_HEAD(&request->client_list); + } /* Associate any objects on the flushing list matching the write * domain we're flushing with our flush. @@ -1664,6 +1675,7 @@ i915_gem_retire_requests(struct drm_device *dev) i915_gem_retire_request(dev, request); list_del(&request->list); + list_del(&request->client_list); drm_free(request, sizeof(*request), DRM_MEM_DRIVER); } else break; @@ -1977,7 +1989,7 @@ i915_gem_evict_something(struct drm_device *dev) i915_gem_flush(dev, obj->write_domain, obj->write_domain); - i915_add_request(dev, obj->write_domain); + i915_add_request(dev, NULL, obj->write_domain); obj = NULL; continue; @@ -2248,7 +2260,7 @@ try_again: i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); - seqno = i915_add_request(dev, + seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); if (seqno == 0) return -ENOMEM; @@ -2452,7 +2464,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) /* Queue the GPU write cache flushing we need. */ i915_gem_flush(dev, 0, obj->write_domain); - seqno = i915_add_request(dev, obj->write_domain); + seqno = i915_add_request(dev, NULL, obj->write_domain); obj->write_domain = 0; i915_gem_object_move_to_active(obj, seqno); } @@ -3089,6 +3101,10 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev, /* Throttle our rendering by waiting until the ring has completed our requests * emitted over 20 msec ago. * + * Note that if we were to use the current jiffies each time around the loop, + * we wouldn't escape the function with any frames outstanding if the time to + * render a frame was over 20ms. + * * This should get us reasonable parallelism between CPU and GPU but also * relatively low latency when blocking on a particular request to finish. */ @@ -3097,15 +3113,25 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) { struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; int ret = 0; - uint32_t seqno; + unsigned long recent_enough = jiffies - msecs_to_jiffies(20); mutex_lock(&dev->struct_mutex); - seqno = i915_file_priv->mm.last_gem_throttle_seqno; - i915_file_priv->mm.last_gem_throttle_seqno = - i915_file_priv->mm.last_gem_seqno; - if (seqno) - ret = i915_wait_request(dev, seqno); + while (!list_empty(&i915_file_priv->mm.request_list)) { + struct drm_i915_gem_request *request; + + request = list_first_entry(&i915_file_priv->mm.request_list, + struct drm_i915_gem_request, + client_list); + + if (time_after_eq(request->emitted_jiffies, recent_enough)) + break; + + ret = i915_wait_request(dev, request->seqno); + if (ret != 0) + break; + } mutex_unlock(&dev->struct_mutex); + return ret; } @@ -3187,7 +3213,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; struct drm_i915_gem_execbuffer *args = data; struct drm_i915_gem_exec_object *exec_list = NULL; struct drm_gem_object **object_list = NULL; @@ -3363,7 +3388,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, dev->invalidate_domains, dev->flush_domains); if (dev->flush_domains) - (void)i915_add_request(dev, dev->flush_domains); + (void)i915_add_request(dev, file_priv, + dev->flush_domains); } for (i = 0; i < args->buffer_count; i++) { @@ -3412,9 +3438,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, * *some* interrupts representing completion of buffers that we can * wait on when trying to clear up gtt space). */ - seqno = i915_add_request(dev, flush_domains); + seqno = i915_add_request(dev, file_priv, flush_domains); BUG_ON(seqno == 0); - i915_file_priv->mm.last_gem_seqno = seqno; for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; @@ -3802,7 +3827,7 @@ i915_gem_idle(struct drm_device *dev) */ i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT), ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); - seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU); + seqno = i915_add_request(dev, NULL, ~I915_GEM_DOMAIN_CPU); if (seqno == 0) { mutex_unlock(&dev->struct_mutex); @@ -4352,3 +4377,17 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, drm_agp_chipset_flush(dev); return 0; } + +void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv) +{ + struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; + + /* Clean up our request list when the client is going away, so that + * later retire_requests won't dereference our soon-to-be-gone + * file_priv. + */ + mutex_lock(&dev->struct_mutex); + while (!list_empty(&i915_file_priv->mm.request_list)) + list_del_init(i915_file_priv->mm.request_list.next); + mutex_unlock(&dev->struct_mutex); +} -- cgit v1.2.3 From bbe281fad65f2eda1792d70763663a1b729fc03c Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Thu, 4 Jun 2009 15:44:25 +0200 Subject: HID: hidraw -- fix comment about accepted devices hidraw accepts any devices, no matter if the device has already been claimed by other HID driver (hid-input, hidraw), and this is intended to stay. Fix up the comment to reflect reality. Signed-off-by: Jiri Kosina --- drivers/hid/hidraw.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 00ccf4b1985d..0c6639ea03dd 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -349,10 +349,7 @@ int hidraw_connect(struct hid_device *hid) int minor, result; struct hidraw *dev; - /* TODO currently we accept any HID device. This should later - * probably be fixed to accept only those devices which provide - * non-input applications - */ + /* we accept any HID device, no matter the applications */ dev = kzalloc(sizeof(struct hidraw), GFP_KERNEL); if (!dev) -- cgit v1.2.3 From 70fa9f2eadea7ca35bdcd89b20d555934593b40c Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Thu, 4 Jun 2009 15:48:38 +0200 Subject: HID: no more reinitializtion is needed in post_reset No more reinitialization is needed in the post reset hook, remove the FIXME comment. While at it, clean up whitespaces in the immediate surrounding. Signed-off-by: Jiri Kosina --- drivers/hid/usbhid/hid-core.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index ac8049b5f1e9..76c4bbe9dccb 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -1234,12 +1234,11 @@ static int hid_post_reset(struct usb_interface *intf) struct hid_device *hid = usb_get_intfdata(intf); struct usbhid_device *usbhid = hid->driver_data; int status; - + spin_lock_irq(&usbhid->lock); clear_bit(HID_RESET_PENDING, &usbhid->iofl); spin_unlock_irq(&usbhid->lock); hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0); - /* FIXME: Any more reinitialization needed? */ status = hid_start_in(hid); if (status < 0) hid_io_error(hid); @@ -1251,14 +1250,14 @@ static int hid_post_reset(struct usb_interface *intf) int usbhid_get_power(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; - + return usb_autopm_get_interface(usbhid->intf); } void usbhid_put_power(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; - + usb_autopm_put_interface(usbhid->intf); } -- cgit v1.2.3 From 5926a295bb78272b3f648f62febecd19a1b6a6ca Mon Sep 17 00:00:00 2001 From: Alessandro Rubini Date: Thu, 4 Jun 2009 17:43:04 +0100 Subject: [ARM] 5541/1: serial/amba-pl011.c: add support for the modified port found in Nomadik The Nomadik 8815 SoC has a slightly modified version of the PL011 block. The patch uses the different ID value as a key to select a vendor structure that is used to keep track of the differences, as suggested by Russell King. Signed-off-by: Alessandro Rubini Acked-by: Andrea Gallo Acked-by: Linus Walleij Signed-off-by: Russell King --- drivers/serial/amba-pl011.c | 30 +++++++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c index 4cfa1eb26892..8c5bda27736c 100644 --- a/drivers/serial/amba-pl011.c +++ b/drivers/serial/amba-pl011.c @@ -70,6 +70,23 @@ struct uart_amba_port { struct clk *clk; unsigned int im; /* interrupt mask */ unsigned int old_status; + unsigned int ifls; /* vendor-specific */ +}; + +/* There is by now at least one vendor with differing details, so handle it */ +struct vendor_data { + unsigned int ifls; + unsigned int fifosize; +}; + +static struct vendor_data vendor_arm = { + .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8, + .fifosize = 16, +}; + +static struct vendor_data vendor_st = { + .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF, + .fifosize = 64, }; static void pl011_stop_tx(struct uart_port *port) @@ -360,8 +377,7 @@ static int pl011_startup(struct uart_port *port) if (retval) goto clk_dis; - writew(UART011_IFLS_RX4_8|UART011_IFLS_TX4_8, - uap->port.membase + UART011_IFLS); + writew(uap->ifls, uap->port.membase + UART011_IFLS); /* * Provoke TX FIFO interrupt into asserting. @@ -732,6 +748,7 @@ static struct uart_driver amba_reg = { static int pl011_probe(struct amba_device *dev, struct amba_id *id) { struct uart_amba_port *uap; + struct vendor_data *vendor = id->data; void __iomem *base; int i, ret; @@ -762,12 +779,13 @@ static int pl011_probe(struct amba_device *dev, struct amba_id *id) goto unmap; } + uap->ifls = vendor->ifls; uap->port.dev = &dev->dev; uap->port.mapbase = dev->res.start; uap->port.membase = base; uap->port.iotype = UPIO_MEM; uap->port.irq = dev->irq[0]; - uap->port.fifosize = 16; + uap->port.fifosize = vendor->fifosize; uap->port.ops = &amba_pl011_pops; uap->port.flags = UPF_BOOT_AUTOCONF; uap->port.line = i; @@ -812,6 +830,12 @@ static struct amba_id pl011_ids[] __initdata = { { .id = 0x00041011, .mask = 0x000fffff, + .data = &vendor_arm, + }, + { + .id = 0x00380802, + .mask = 0x00ffffff, + .data = &vendor_st, }, { 0, 0 }, }; -- cgit v1.2.3 From f23d4911319fdebffd0529b31bb66d324ef287e6 Mon Sep 17 00:00:00 2001 From: Eric Miao Date: Mon, 13 Apr 2009 14:43:25 +0800 Subject: [ARM] pxa: add platform device ID table to pxa i2c driver Signed-off-by: Eric Miao --- drivers/i2c/busses/i2c-pxa.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index acc7143d9655..e4ee8835b97f 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -34,11 +34,25 @@ #include #include -#include #include #include #include +/* + * I2C register offsets will be shifted 0 or 1 bit left, depending on + * different SoCs + */ +#define REG_SHIFT_0 (0 << 0) +#define REG_SHIFT_1 (1 << 0) +#define REG_SHIFT(d) ((d) & 0x1) + +static const struct platform_device_id i2c_pxa_id_table[] = { + { "pxa2xx-i2c", REG_SHIFT_1 }, + { "pxa3xx-pwri2c", REG_SHIFT_0 }, + { }, +}; +MODULE_DEVICE_TABLE(platform, i2c_pxa_id_table); + /* * I2C registers and bit definitions */ @@ -985,6 +999,7 @@ static int i2c_pxa_probe(struct platform_device *dev) struct pxa_i2c *i2c; struct resource *res; struct i2c_pxa_platform_data *plat = dev->dev.platform_data; + struct platform_device_id *id = platform_get_device_id(dev); int ret; int irq; @@ -1028,7 +1043,7 @@ static int i2c_pxa_probe(struct platform_device *dev) ret = -EIO; goto eremap; } - i2c->reg_shift = (cpu_is_pxa3xx() && (dev->id == 1)) ? 0 : 1; + i2c->reg_shift = REG_SHIFT(id->driver_data); i2c->iobase = res->start; i2c->iosize = res_len(res); @@ -1150,6 +1165,7 @@ static struct platform_driver i2c_pxa_driver = { .name = "pxa2xx-i2c", .owner = THIS_MODULE, }, + .id_table = i2c_pxa_id_table, }; static int __init i2c_adap_pxa_init(void) -- cgit v1.2.3 From f0a83701399123b0e95cc4d949fcccf9941fd190 Mon Sep 17 00:00:00 2001 From: Eric Miao Date: Mon, 13 Apr 2009 15:03:11 +0800 Subject: [ARM] pxa: move mach/i2c.h to plat/i2c.h Signed-off-by: Paul Shen Signed-off-by: Eric Miao --- drivers/i2c/busses/i2c-pxa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index e4ee8835b97f..035a6c7e59df 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -36,7 +36,7 @@ #include #include -#include +#include /* * I2C register offsets will be shifted 0 or 1 bit left, depending on -- cgit v1.2.3 From 80153d1bcc6a20361d5974f37d3729583ba99154 Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Tue, 12 May 2009 19:37:20 +0000 Subject: [ARM] pxa/stargate2: Add board specific elements to the smc91x driver Signed-off-by: Jonathan Cameron Signed-off-by: Eric Miao --- drivers/net/smc91x.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index 329f890e2903..f1f773b17fe1 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h @@ -45,7 +45,8 @@ defined(CONFIG_MACH_ZYLONITE) ||\ defined(CONFIG_MACH_LITTLETON) ||\ defined(CONFIG_MACH_ZYLONITE2) ||\ - defined(CONFIG_ARCH_VIPER) + defined(CONFIG_ARCH_VIPER) ||\ + defined(CONFIG_MACH_STARGATE2) #include @@ -73,7 +74,7 @@ /* We actually can't write halfwords properly if not word aligned */ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) { - if (machine_is_mainstone() && reg & 2) { + if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) { unsigned int v = val << 16; v |= readl(ioaddr + (reg & ~2)) & 0xffff; writel(v, ioaddr + (reg & ~2)); -- cgit v1.2.3 From 5aeb1a5e9f2eced482805eeb154baf77ea53c8ce Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Tue, 12 May 2009 19:37:21 +0000 Subject: [ARM] pxa/stargate2: add support for Compact Flash/PCMCIA Signed-off-by: Jonathan Cameron Signed-off-by: Eric Miao --- drivers/pcmcia/Kconfig | 2 +- drivers/pcmcia/Makefile | 1 + drivers/pcmcia/pxa2xx_stargate2.c | 174 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 176 insertions(+), 1 deletion(-) create mode 100644 drivers/pcmcia/pxa2xx_stargate2.c (limited to 'drivers') diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig index 276473543982..fbf965b31c14 100644 --- a/drivers/pcmcia/Kconfig +++ b/drivers/pcmcia/Kconfig @@ -217,7 +217,7 @@ config PCMCIA_PXA2XX depends on ARM && ARCH_PXA && PCMCIA depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \ || MACH_ARMCORE || ARCH_PXA_PALM || TRIZEPS_PCMCIA \ - || ARCH_VIPER || ARCH_PXA_ESERIES) + || ARCH_VIPER || ARCH_PXA_ESERIES || MACH_STARGATE2) help Say Y here to include support for the PXA2xx PCMCIA controller diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile index bbac46327227..047394d98ac2 100644 --- a/drivers/pcmcia/Makefile +++ b/drivers/pcmcia/Makefile @@ -73,5 +73,6 @@ pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps4.o pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o +pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_core.o $(pxa2xx-obj-y) diff --git a/drivers/pcmcia/pxa2xx_stargate2.c b/drivers/pcmcia/pxa2xx_stargate2.c new file mode 100644 index 000000000000..490749ea677f --- /dev/null +++ b/drivers/pcmcia/pxa2xx_stargate2.c @@ -0,0 +1,174 @@ +/* + * linux/drivers/pcmcia/pxa2xx_stargate2.c + * + * Stargate 2 PCMCIA specific routines. + * + * Created: December 6, 2005 + * Author: Ed C. Epp + * Copyright: Intel Corp 2005 + * Jonathan Cameron 2009 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include "soc_common.h" + +#define SG2_S0_BUFF_CTL 120 +#define SG2_S0_POWER_CTL 108 +#define SG2_S0_GPIO_RESET 82 +#define SG2_S0_GPIO_DETECT 53 +#define SG2_S0_GPIO_READY 81 + +static struct pcmcia_irqs irqs[] = { + { 0, IRQ_GPIO(SG2_S0_GPIO_DETECT), "PCMCIA0 CD" }, +}; + +static int sg2_pcmcia_hw_init(struct soc_pcmcia_socket *skt) +{ + skt->irq = IRQ_GPIO(SG2_S0_GPIO_READY); + return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); +} + +static void sg2_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) +{ + soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs)); +} + +static void sg2_pcmcia_socket_state(struct soc_pcmcia_socket *skt, + struct pcmcia_state *state) +{ + state->detect = !gpio_get_value(SG2_S0_GPIO_DETECT); + state->ready = !!gpio_get_value(SG2_S0_GPIO_READY); + state->bvd1 = 0; /* not available - battery detect on card */ + state->bvd2 = 0; /* not available */ + state->vs_3v = 1; /* not available - voltage detect for card */ + state->vs_Xv = 0; /* not available */ + state->wrprot = 0; /* not available - write protect */ +} + +static int sg2_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, + const socket_state_t *state) +{ + /* Enable card power */ + switch (state->Vcc) { + case 0: + /* sets power ctl register high */ + gpio_set_value(SG2_S0_POWER_CTL, 1); + break; + case 33: + case 50: + /* sets power control register low (clear) */ + gpio_set_value(SG2_S0_POWER_CTL, 0); + msleep(100); + break; + default: + pr_err("%s(): bad Vcc %u\n", + __func__, state->Vcc); + return -1; + } + + /* reset */ + gpio_set_value(SG2_S0_GPIO_RESET, !!(state->flags & SS_RESET)); + + return 0; +} + +static void sg2_pcmcia_socket_init(struct soc_pcmcia_socket *skt) +{ + soc_pcmcia_enable_irqs(skt, irqs, ARRAY_SIZE(irqs)); +} + +static void sg2_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt) +{ + soc_pcmcia_disable_irqs(skt, irqs, ARRAY_SIZE(irqs)); +} + +static struct pcmcia_low_level sg2_pcmcia_ops __initdata = { + .owner = THIS_MODULE, + .hw_init = sg2_pcmcia_hw_init, + .hw_shutdown = sg2_pcmcia_hw_shutdown, + .socket_state = sg2_pcmcia_socket_state, + .configure_socket = sg2_pcmcia_configure_socket, + .socket_init = sg2_pcmcia_socket_init, + .socket_suspend = sg2_pcmcia_socket_suspend, + .nr = 1, +}; + +static struct platform_device *sg2_pcmcia_device; + +static int __init sg2_pcmcia_init(void) +{ + int ret; + + if (!machine_is_stargate2()) + return -ENODEV; + + sg2_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); + if (!sg2_pcmcia_device) + return -ENOMEM; + + ret = gpio_request(SG2_S0_BUFF_CTL, "SG2 CF buff ctl"); + if (ret) + goto error_put_platform_device; + ret = gpio_request(SG2_S0_POWER_CTL, "SG2 CF power ctl"); + if (ret) + goto error_free_gpio_buff_ctl; + ret = gpio_request(SG2_S0_GPIO_RESET, "SG2 CF reset"); + if (ret) + goto error_free_gpio_power_ctl; + /* Set gpio directions */ + gpio_direction_output(SG2_S0_BUFF_CTL, 0); + gpio_direction_output(SG2_S0_POWER_CTL, 1); + gpio_direction_output(SG2_S0_GPIO_RESET, 1); + + ret = platform_device_add_data(sg2_pcmcia_device, + &sg2_pcmcia_ops, + sizeof(sg2_pcmcia_ops)); + if (ret) + goto error_free_gpio_reset; + + ret = platform_device_add(sg2_pcmcia_device); + if (ret) + goto error_free_gpio_reset; + + return 0; +error_free_gpio_reset: + gpio_free(SG2_S0_GPIO_RESET); +error_free_gpio_power_ctl: + gpio_free(SG2_S0_POWER_CTL); +error_free_gpio_buff_ctl: + gpio_free(SG2_S0_BUFF_CTL); +error_put_platform_device: + platform_device_put(sg2_pcmcia_device); + + return ret; +} + +static void __exit sg2_pcmcia_exit(void) +{ + platform_device_unregister(sg2_pcmcia_device); + gpio_free(SG2_S0_BUFF_CTL); + gpio_free(SG2_S0_POWER_CTL); + gpio_free(SG2_S0_GPIO_RESET); +} + +fs_initcall(sg2_pcmcia_init); +module_exit(sg2_pcmcia_exit); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:pxa2xx-pcmcia"); -- cgit v1.2.3 From 32cb055b57eab803ea82b76dc913b0378e5af145 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Fri, 5 Jun 2009 15:38:36 +0800 Subject: agp/intel: Add support for new chipsets Both desktop and mobile versions are added. Signed-off-by: Zhenyu Wang Signed-off-by: Eric Anholt --- drivers/char/agp/intel-agp.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 3686912427ba..7a748fa0dfce 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -46,6 +46,10 @@ #define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22 #define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 #define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 +#define PCI_DEVICE_ID_INTEL_IGDNG_D_HB 0x0040 +#define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x0042 +#define PCI_DEVICE_ID_INTEL_IGDNG_M_HB 0x0044 +#define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x0046 /* cover 915 and 945 variants */ #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ @@ -75,7 +79,9 @@ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB) + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \ + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB) extern int agp_memory_reserved; @@ -1211,6 +1217,8 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) case PCI_DEVICE_ID_INTEL_Q45_HB: case PCI_DEVICE_ID_INTEL_G45_HB: case PCI_DEVICE_ID_INTEL_G41_HB: + case PCI_DEVICE_ID_INTEL_IGDNG_D_HB: + case PCI_DEVICE_ID_INTEL_IGDNG_M_HB: *gtt_offset = *gtt_size = MB(2); break; default: @@ -2186,6 +2194,10 @@ static const struct intel_driver_description { "G45/G43", NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, "G41", NULL, &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_IGDNG_D_HB, PCI_DEVICE_ID_INTEL_IGDNG_D_IG, 0, + "IGDNG/D", NULL, &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_IGDNG_M_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0, + "IGDNG/M", NULL, &intel_i965_driver }, { 0, 0, 0, NULL, NULL, NULL } }; @@ -2387,6 +2399,8 @@ static struct pci_device_id agp_intel_pci_table[] = { ID(PCI_DEVICE_ID_INTEL_Q45_HB), ID(PCI_DEVICE_ID_INTEL_G45_HB), ID(PCI_DEVICE_ID_INTEL_G41_HB), + ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB), + ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB), { } }; -- cgit v1.2.3 From 280da227c870a50f669de0c8d46bfb2c62da9995 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Fri, 5 Jun 2009 15:38:37 +0800 Subject: drm/i915: Add chipset/feature defines for for new chipsets Signed-off-by: Zhenyu Wang [anholt: dropped drm_pciids.h hunk to avoid loading an incomplete driver] Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_drv.h | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e0fac5f62c69..ded9e786883e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -796,7 +796,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); (dev)->pci_device == 0x2E02 || \ (dev)->pci_device == 0x2E12 || \ (dev)->pci_device == 0x2E22 || \ - (dev)->pci_device == 0x2E32) + (dev)->pci_device == 0x2E32 || \ + (dev)->pci_device == 0x0042 || \ + (dev)->pci_device == 0x0046) #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \ (dev)->pci_device == 0x2A12) @@ -818,20 +820,26 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); (dev)->pci_device == 0x29D2 || \ (IS_IGD(dev))) +#define IS_IGDNG_D(dev) ((dev)->pci_device == 0x0042) +#define IS_IGDNG_M(dev) ((dev)->pci_device == 0x0046) +#define IS_IGDNG(dev) (IS_IGDNG_D(dev) || IS_IGDNG_M(dev)) + #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ - IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) + IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \ + IS_IGDNG(dev)) #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \ - IS_IGD(dev)) + IS_IGD(dev) || IS_IGDNG_M(dev)) -#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) +#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \ + IS_IGDNG(dev)) /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. */ #define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ IS_I915GM(dev))) -#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev)) +#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) #define PRIMARY_RINGBUFFER_SIZE (128*1024) -- cgit v1.2.3 From b9055052d3e0388b4a5e8c3e0bbab665c5996f50 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Fri, 5 Jun 2009 15:38:38 +0800 Subject: drm/i915: Add new chipset register definitions Signed-off-by: Zhenyu Wang Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_reg.h | 447 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 447 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 375569d01d01..99681cfb7ab9 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -450,6 +450,13 @@ #define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) #define PLL_REF_INPUT_MASK (3 << 13) #define PLL_LOAD_PULSE_PHASE_SHIFT 9 +/* IGDNG */ +# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9 +# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9) +# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9) +# define DPLL_FPA1_P1_POST_DIV_SHIFT 0 +# define DPLL_FPA1_P1_POST_DIV_MASK 0xff + /* * Parallel to Serial Load Pulse phase selection. * Selects the phase for the 10X DPLL clock for the PCIe @@ -1517,4 +1524,444 @@ # define VGA_2X_MODE (1 << 30) # define VGA_PIPE_B_SELECT (1 << 29) +/* IGDNG */ + +#define CPU_VGACNTRL 0x41000 + +#define DIGITAL_PORT_HOTPLUG_CNTRL 0x44030 +#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4) +#define DIGITAL_PORTA_SHORT_PULSE_2MS (0 << 2) +#define DIGITAL_PORTA_SHORT_PULSE_4_5MS (1 << 2) +#define DIGITAL_PORTA_SHORT_PULSE_6MS (2 << 2) +#define DIGITAL_PORTA_SHORT_PULSE_100MS (3 << 2) +#define DIGITAL_PORTA_NO_DETECT (0 << 0) +#define DIGITAL_PORTA_LONG_PULSE_DETECT_MASK (1 << 1) +#define DIGITAL_PORTA_SHORT_PULSE_DETECT_MASK (1 << 0) + +/* refresh rate hardware control */ +#define RR_HW_CTL 0x45300 +#define RR_HW_LOW_POWER_FRAMES_MASK 0xff +#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00 + +#define FDI_PLL_BIOS_0 0x46000 +#define FDI_PLL_BIOS_1 0x46004 +#define FDI_PLL_BIOS_2 0x46008 +#define DISPLAY_PORT_PLL_BIOS_0 0x4600c +#define DISPLAY_PORT_PLL_BIOS_1 0x46010 +#define DISPLAY_PORT_PLL_BIOS_2 0x46014 + +#define FDI_PLL_FREQ_CTL 0x46030 +#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24) +#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00 +#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff + + +#define PIPEA_DATA_M1 0x60030 +#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */ +#define TU_SIZE_MASK 0x7e000000 +#define PIPEA_DATA_M1_OFFSET 0 +#define PIPEA_DATA_N1 0x60034 +#define PIPEA_DATA_N1_OFFSET 0 + +#define PIPEA_DATA_M2 0x60038 +#define PIPEA_DATA_M2_OFFSET 0 +#define PIPEA_DATA_N2 0x6003c +#define PIPEA_DATA_N2_OFFSET 0 + +#define PIPEA_LINK_M1 0x60040 +#define PIPEA_LINK_M1_OFFSET 0 +#define PIPEA_LINK_N1 0x60044 +#define PIPEA_LINK_N1_OFFSET 0 + +#define PIPEA_LINK_M2 0x60048 +#define PIPEA_LINK_M2_OFFSET 0 +#define PIPEA_LINK_N2 0x6004c +#define PIPEA_LINK_N2_OFFSET 0 + +/* PIPEB timing regs are same start from 0x61000 */ + +#define PIPEB_DATA_M1 0x61030 +#define PIPEB_DATA_M1_OFFSET 0 +#define PIPEB_DATA_N1 0x61034 +#define PIPEB_DATA_N1_OFFSET 0 + +#define PIPEB_DATA_M2 0x61038 +#define PIPEB_DATA_M2_OFFSET 0 +#define PIPEB_DATA_N2 0x6103c +#define PIPEB_DATA_N2_OFFSET 0 + +#define PIPEB_LINK_M1 0x61040 +#define PIPEB_LINK_M1_OFFSET 0 +#define PIPEB_LINK_N1 0x61044 +#define PIPEB_LINK_N1_OFFSET 0 + +#define PIPEB_LINK_M2 0x61048 +#define PIPEB_LINK_M2_OFFSET 0 +#define PIPEB_LINK_N2 0x6104c +#define PIPEB_LINK_N2_OFFSET 0 + +/* CPU panel fitter */ +#define PFA_CTL_1 0x68080 +#define PFB_CTL_1 0x68880 +#define PF_ENABLE (1<<31) + +/* legacy palette */ +#define LGC_PALETTE_A 0x4a000 +#define LGC_PALETTE_B 0x4a800 + +/* interrupts */ +#define DE_MASTER_IRQ_CONTROL (1 << 31) +#define DE_SPRITEB_FLIP_DONE (1 << 29) +#define DE_SPRITEA_FLIP_DONE (1 << 28) +#define DE_PLANEB_FLIP_DONE (1 << 27) +#define DE_PLANEA_FLIP_DONE (1 << 26) +#define DE_PCU_EVENT (1 << 25) +#define DE_GTT_FAULT (1 << 24) +#define DE_POISON (1 << 23) +#define DE_PERFORM_COUNTER (1 << 22) +#define DE_PCH_EVENT (1 << 21) +#define DE_AUX_CHANNEL_A (1 << 20) +#define DE_DP_A_HOTPLUG (1 << 19) +#define DE_GSE (1 << 18) +#define DE_PIPEB_VBLANK (1 << 15) +#define DE_PIPEB_EVEN_FIELD (1 << 14) +#define DE_PIPEB_ODD_FIELD (1 << 13) +#define DE_PIPEB_LINE_COMPARE (1 << 12) +#define DE_PIPEB_VSYNC (1 << 11) +#define DE_PIPEB_FIFO_UNDERRUN (1 << 8) +#define DE_PIPEA_VBLANK (1 << 7) +#define DE_PIPEA_EVEN_FIELD (1 << 6) +#define DE_PIPEA_ODD_FIELD (1 << 5) +#define DE_PIPEA_LINE_COMPARE (1 << 4) +#define DE_PIPEA_VSYNC (1 << 3) +#define DE_PIPEA_FIFO_UNDERRUN (1 << 0) + +#define DEISR 0x44000 +#define DEIMR 0x44004 +#define DEIIR 0x44008 +#define DEIER 0x4400c + +/* GT interrupt */ +#define GT_SYNC_STATUS (1 << 2) +#define GT_USER_INTERRUPT (1 << 0) + +#define GTISR 0x44010 +#define GTIMR 0x44014 +#define GTIIR 0x44018 +#define GTIER 0x4401c + +/* PCH */ + +/* south display engine interrupt */ +#define SDE_CRT_HOTPLUG (1 << 11) +#define SDE_PORTD_HOTPLUG (1 << 10) +#define SDE_PORTC_HOTPLUG (1 << 9) +#define SDE_PORTB_HOTPLUG (1 << 8) +#define SDE_SDVOB_HOTPLUG (1 << 6) + +#define SDEISR 0xc4000 +#define SDEIMR 0xc4004 +#define SDEIIR 0xc4008 +#define SDEIER 0xc400c + +/* digital port hotplug */ +#define PCH_PORT_HOTPLUG 0xc4030 +#define PORTD_HOTPLUG_ENABLE (1 << 20) +#define PORTD_PULSE_DURATION_2ms (0) +#define PORTD_PULSE_DURATION_4_5ms (1 << 18) +#define PORTD_PULSE_DURATION_6ms (2 << 18) +#define PORTD_PULSE_DURATION_100ms (3 << 18) +#define PORTD_HOTPLUG_NO_DETECT (0) +#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16) +#define PORTD_HOTPLUG_LONG_DETECT (1 << 17) +#define PORTC_HOTPLUG_ENABLE (1 << 12) +#define PORTC_PULSE_DURATION_2ms (0) +#define PORTC_PULSE_DURATION_4_5ms (1 << 10) +#define PORTC_PULSE_DURATION_6ms (2 << 10) +#define PORTC_PULSE_DURATION_100ms (3 << 10) +#define PORTC_HOTPLUG_NO_DETECT (0) +#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8) +#define PORTC_HOTPLUG_LONG_DETECT (1 << 9) +#define PORTB_HOTPLUG_ENABLE (1 << 4) +#define PORTB_PULSE_DURATION_2ms (0) +#define PORTB_PULSE_DURATION_4_5ms (1 << 2) +#define PORTB_PULSE_DURATION_6ms (2 << 2) +#define PORTB_PULSE_DURATION_100ms (3 << 2) +#define PORTB_HOTPLUG_NO_DETECT (0) +#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) +#define PORTB_HOTPLUG_LONG_DETECT (1 << 1) + +#define PCH_GPIOA 0xc5010 +#define PCH_GPIOB 0xc5014 +#define PCH_GPIOC 0xc5018 +#define PCH_GPIOD 0xc501c +#define PCH_GPIOE 0xc5020 +#define PCH_GPIOF 0xc5024 + +#define PCH_DPLL_A 0xc6014 +#define PCH_DPLL_B 0xc6018 + +#define PCH_FPA0 0xc6040 +#define PCH_FPA1 0xc6044 +#define PCH_FPB0 0xc6048 +#define PCH_FPB1 0xc604c + +#define PCH_DPLL_TEST 0xc606c + +#define PCH_DREF_CONTROL 0xC6200 +#define DREF_CONTROL_MASK 0x7fc3 +#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0<<13) +#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2<<13) +#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3<<13) +#define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13) +#define DREF_SSC_SOURCE_DISABLE (0<<11) +#define DREF_SSC_SOURCE_ENABLE (2<<11) +#define DREF_SSC_SOURCE_MASK (2<<11) +#define DREF_NONSPREAD_SOURCE_DISABLE (0<<9) +#define DREF_NONSPREAD_CK505_ENABLE (1<<9) +#define DREF_NONSPREAD_SOURCE_ENABLE (2<<9) +#define DREF_NONSPREAD_SOURCE_MASK (2<<9) +#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7) +#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7) +#define DREF_SSC4_DOWNSPREAD (0<<6) +#define DREF_SSC4_CENTERSPREAD (1<<6) +#define DREF_SSC1_DISABLE (0<<1) +#define DREF_SSC1_ENABLE (1<<1) +#define DREF_SSC4_DISABLE (0) +#define DREF_SSC4_ENABLE (1) + +#define PCH_RAWCLK_FREQ 0xc6204 +#define FDL_TP1_TIMER_SHIFT 12 +#define FDL_TP1_TIMER_MASK (3<<12) +#define FDL_TP2_TIMER_SHIFT 10 +#define FDL_TP2_TIMER_MASK (3<<10) +#define RAWCLK_FREQ_MASK 0x3ff + +#define PCH_DPLL_TMR_CFG 0xc6208 + +#define PCH_SSC4_PARMS 0xc6210 +#define PCH_SSC4_AUX_PARMS 0xc6214 + +/* transcoder */ + +#define TRANS_HTOTAL_A 0xe0000 +#define TRANS_HTOTAL_SHIFT 16 +#define TRANS_HACTIVE_SHIFT 0 +#define TRANS_HBLANK_A 0xe0004 +#define TRANS_HBLANK_END_SHIFT 16 +#define TRANS_HBLANK_START_SHIFT 0 +#define TRANS_HSYNC_A 0xe0008 +#define TRANS_HSYNC_END_SHIFT 16 +#define TRANS_HSYNC_START_SHIFT 0 +#define TRANS_VTOTAL_A 0xe000c +#define TRANS_VTOTAL_SHIFT 16 +#define TRANS_VACTIVE_SHIFT 0 +#define TRANS_VBLANK_A 0xe0010 +#define TRANS_VBLANK_END_SHIFT 16 +#define TRANS_VBLANK_START_SHIFT 0 +#define TRANS_VSYNC_A 0xe0014 +#define TRANS_VSYNC_END_SHIFT 16 +#define TRANS_VSYNC_START_SHIFT 0 + +#define TRANSA_DATA_M1 0xe0030 +#define TRANSA_DATA_N1 0xe0034 +#define TRANSA_DATA_M2 0xe0038 +#define TRANSA_DATA_N2 0xe003c +#define TRANSA_DP_LINK_M1 0xe0040 +#define TRANSA_DP_LINK_N1 0xe0044 +#define TRANSA_DP_LINK_M2 0xe0048 +#define TRANSA_DP_LINK_N2 0xe004c + +#define TRANS_HTOTAL_B 0xe1000 +#define TRANS_HBLANK_B 0xe1004 +#define TRANS_HSYNC_B 0xe1008 +#define TRANS_VTOTAL_B 0xe100c +#define TRANS_VBLANK_B 0xe1010 +#define TRANS_VSYNC_B 0xe1014 + +#define TRANSB_DATA_M1 0xe1030 +#define TRANSB_DATA_N1 0xe1034 +#define TRANSB_DATA_M2 0xe1038 +#define TRANSB_DATA_N2 0xe103c +#define TRANSB_DP_LINK_M1 0xe1040 +#define TRANSB_DP_LINK_N1 0xe1044 +#define TRANSB_DP_LINK_M2 0xe1048 +#define TRANSB_DP_LINK_N2 0xe104c + +#define TRANSACONF 0xf0008 +#define TRANSBCONF 0xf1008 +#define TRANS_DISABLE (0<<31) +#define TRANS_ENABLE (1<<31) +#define TRANS_STATE_MASK (1<<30) +#define TRANS_STATE_DISABLE (0<<30) +#define TRANS_STATE_ENABLE (1<<30) +#define TRANS_FSYNC_DELAY_HB1 (0<<27) +#define TRANS_FSYNC_DELAY_HB2 (1<<27) +#define TRANS_FSYNC_DELAY_HB3 (2<<27) +#define TRANS_FSYNC_DELAY_HB4 (3<<27) +#define TRANS_DP_AUDIO_ONLY (1<<26) +#define TRANS_DP_VIDEO_AUDIO (0<<26) +#define TRANS_PROGRESSIVE (0<<21) +#define TRANS_8BPC (0<<5) +#define TRANS_10BPC (1<<5) +#define TRANS_6BPC (2<<5) +#define TRANS_12BPC (3<<5) + +#define FDI_RXA_CHICKEN 0xc200c +#define FDI_RXB_CHICKEN 0xc2010 +#define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1) + +/* CPU: FDI_TX */ +#define FDI_TXA_CTL 0x60100 +#define FDI_TXB_CTL 0x61100 +#define FDI_TX_DISABLE (0<<31) +#define FDI_TX_ENABLE (1<<31) +#define FDI_LINK_TRAIN_PATTERN_1 (0<<28) +#define FDI_LINK_TRAIN_PATTERN_2 (1<<28) +#define FDI_LINK_TRAIN_PATTERN_IDLE (2<<28) +#define FDI_LINK_TRAIN_NONE (3<<28) +#define FDI_LINK_TRAIN_VOLTAGE_0_4V (0<<25) +#define FDI_LINK_TRAIN_VOLTAGE_0_6V (1<<25) +#define FDI_LINK_TRAIN_VOLTAGE_0_8V (2<<25) +#define FDI_LINK_TRAIN_VOLTAGE_1_2V (3<<25) +#define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0<<22) +#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22) +#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22) +#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22) +#define FDI_DP_PORT_WIDTH_X1 (0<<19) +#define FDI_DP_PORT_WIDTH_X2 (1<<19) +#define FDI_DP_PORT_WIDTH_X3 (2<<19) +#define FDI_DP_PORT_WIDTH_X4 (3<<19) +#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18) +/* IGDNG: hardwired to 1 */ +#define FDI_TX_PLL_ENABLE (1<<14) +/* both Tx and Rx */ +#define FDI_SCRAMBLING_ENABLE (0<<7) +#define FDI_SCRAMBLING_DISABLE (1<<7) + +/* FDI_RX, FDI_X is hard-wired to Transcoder_X */ +#define FDI_RXA_CTL 0xf000c +#define FDI_RXB_CTL 0xf100c +#define FDI_RX_ENABLE (1<<31) +#define FDI_RX_DISABLE (0<<31) +/* train, dp width same as FDI_TX */ +#define FDI_DP_PORT_WIDTH_X8 (7<<19) +#define FDI_8BPC (0<<16) +#define FDI_10BPC (1<<16) +#define FDI_6BPC (2<<16) +#define FDI_12BPC (3<<16) +#define FDI_LINK_REVERSE_OVERWRITE (1<<15) +#define FDI_DMI_LINK_REVERSE_MASK (1<<14) +#define FDI_RX_PLL_ENABLE (1<<13) +#define FDI_FS_ERR_CORRECT_ENABLE (1<<11) +#define FDI_FE_ERR_CORRECT_ENABLE (1<<10) +#define FDI_FS_ERR_REPORT_ENABLE (1<<9) +#define FDI_FE_ERR_REPORT_ENABLE (1<<8) +#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6) +#define FDI_SEL_RAWCLK (0<<4) +#define FDI_SEL_PCDCLK (1<<4) + +#define FDI_RXA_MISC 0xf0010 +#define FDI_RXB_MISC 0xf1010 +#define FDI_RXA_TUSIZE1 0xf0030 +#define FDI_RXA_TUSIZE2 0xf0038 +#define FDI_RXB_TUSIZE1 0xf1030 +#define FDI_RXB_TUSIZE2 0xf1038 + +/* FDI_RX interrupt register format */ +#define FDI_RX_INTER_LANE_ALIGN (1<<10) +#define FDI_RX_SYMBOL_LOCK (1<<9) /* train 2 */ +#define FDI_RX_BIT_LOCK (1<<8) /* train 1 */ +#define FDI_RX_TRAIN_PATTERN_2_FAIL (1<<7) +#define FDI_RX_FS_CODE_ERR (1<<6) +#define FDI_RX_FE_CODE_ERR (1<<5) +#define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1<<4) +#define FDI_RX_HDCP_LINK_FAIL (1<<3) +#define FDI_RX_PIXEL_FIFO_OVERFLOW (1<<2) +#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1) +#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0) + +#define FDI_RXA_IIR 0xf0014 +#define FDI_RXA_IMR 0xf0018 +#define FDI_RXB_IIR 0xf1014 +#define FDI_RXB_IMR 0xf1018 + +#define FDI_PLL_CTL_1 0xfe000 +#define FDI_PLL_CTL_2 0xfe004 + +/* CRT */ +#define PCH_ADPA 0xe1100 +#define ADPA_TRANS_SELECT_MASK (1<<30) +#define ADPA_TRANS_A_SELECT 0 +#define ADPA_TRANS_B_SELECT (1<<30) +#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */ +#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24) +#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24) +#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24) +#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24) +#define ADPA_CRT_HOTPLUG_ENABLE (1<<23) +#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22) +#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22) +#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21) +#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21) +#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20) +#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20) +#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18) +#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18) +#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18) +#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18) +#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17) +#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) +#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) + +/* or SDVOB */ +#define HDMIB 0xe1140 +#define PORT_ENABLE (1 << 31) +#define TRANSCODER_A (0) +#define TRANSCODER_B (1 << 30) +#define COLOR_FORMAT_8bpc (0) +#define COLOR_FORMAT_12bpc (3 << 26) +#define SDVOB_HOTPLUG_ENABLE (1 << 23) +#define SDVO_ENCODING (0) +#define TMDS_ENCODING (2 << 10) +#define NULL_PACKET_VSYNC_ENABLE (1 << 9) +#define SDVOB_BORDER_ENABLE (1 << 7) +#define AUDIO_ENABLE (1 << 6) +#define VSYNC_ACTIVE_HIGH (1 << 4) +#define HSYNC_ACTIVE_HIGH (1 << 3) +#define PORT_DETECTED (1 << 2) + +#define HDMIC 0xe1150 +#define HDMID 0xe1160 + +#define PCH_LVDS 0xe1180 +#define LVDS_DETECTED (1 << 1) + +#define BLC_PWM_CPU_CTL2 0x48250 +#define PWM_ENABLE (1 << 31) +#define PWM_PIPE_A (0 << 29) +#define PWM_PIPE_B (1 << 29) +#define BLC_PWM_CPU_CTL 0x48254 + +#define BLC_PWM_PCH_CTL1 0xc8250 +#define PWM_PCH_ENABLE (1 << 31) +#define PWM_POLARITY_ACTIVE_LOW (1 << 29) +#define PWM_POLARITY_ACTIVE_HIGH (0 << 29) +#define PWM_POLARITY_ACTIVE_LOW2 (1 << 28) +#define PWM_POLARITY_ACTIVE_HIGH2 (0 << 28) + +#define BLC_PWM_PCH_CTL2 0xc8254 + +#define PCH_PP_STATUS 0xc7200 +#define PCH_PP_CONTROL 0xc7204 +#define EDP_FORCE_VDD (1 << 3) +#define EDP_BLC_ENABLE (1 << 2) +#define PANEL_POWER_RESET (1 << 1) +#define PANEL_POWER_OFF (0 << 0) +#define PANEL_POWER_ON (1 << 0) +#define PCH_PP_ON_DELAYS 0xc7208 +#define EDP_PANEL (1 << 30) +#define PCH_PP_OFF_DELAYS 0xc720c +#define PCH_PP_DIVISOR 0xc7210 + #endif /* _I915_REG_H_ */ -- cgit v1.2.3 From e170b030dcd6aed11dde2d124c09991ec771f529 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Fri, 5 Jun 2009 15:38:40 +0800 Subject: drm/i915: Disable opregion on IGDNG for now Disable OpRegion support for now until verified on new chipsets. Signed-off-by: Zhenyu Wang Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_dma.c | 7 +++++-- drivers/gpu/drm/i915/i915_irq.c | 4 +++- 2 files changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 0c222c28b8c1..fa105bd119a6 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1206,7 +1206,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) } /* Must be done after probing outputs */ - intel_opregion_init(dev, 0); + /* FIXME: verify on IGDNG */ + if (!IS_IGDNG(dev)) + intel_opregion_init(dev, 0); return 0; @@ -1240,7 +1242,8 @@ int i915_driver_unload(struct drm_device *dev) if (dev_priv->regs != NULL) iounmap(dev_priv->regs); - intel_opregion_free(dev, 0); + if (!IS_IGDNG(dev)) + intel_opregion_free(dev, 0); if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_modeset_cleanup(dev); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 98bb4c878c4e..4b0bcbd58b8f 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -484,7 +484,9 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) void i915_enable_interrupt (struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - opregion_enable_asle(dev); + + if (!IS_IGDNG(dev)) + opregion_enable_asle(dev); dev_priv->irq_enabled = 1; } -- cgit v1.2.3 From 2cce0d8740f0d1454d012401257d96c513ce358f Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Fri, 5 Jun 2009 15:38:41 +0800 Subject: drm/i915: Disable tiling on IGDNG for now Swizzle bit detection not working right on it. Signed-off-by: Zhenyu Wang Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_gem_tiling.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 540dd336e6ec..07d976bf4931 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -170,6 +170,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) } } + /* FIXME: check with memory config on IGDNG */ + if (IS_IGDNG(dev)) { + DRM_ERROR("disable tiling on IGDNG...\n"); + swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; + swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; + } + dev_priv->mm.bit_6_swizzle_x = swizzle_x; dev_priv->mm.bit_6_swizzle_y = swizzle_y; } -- cgit v1.2.3 From 2c07245fb8f7f0a282282e5a9747e46defdb2cc7 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Fri, 5 Jun 2009 15:38:42 +0800 Subject: drm/i915: enable kernel modesetting on IGDNG This adds kernel mode setting on IGDNG with VGA output support. Note that suspend/resume doesn't work yet. Signed-off-by: Zhenyu Wang Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_dma.c | 2 +- drivers/gpu/drm/i915/intel_crt.c | 76 ++++- drivers/gpu/drm/i915/intel_display.c | 603 +++++++++++++++++++++++++++++++++-- 3 files changed, 643 insertions(+), 38 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index fa105bd119a6..5d36059d6f4e 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -922,7 +922,7 @@ static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size, * Some of the preallocated space is taken by the GTT * and popup. GTT is 1K per MB of aperture size, and popup is 4K. */ - if (IS_G4X(dev) || IS_IGD(dev)) + if (IS_G4X(dev) || IS_IGD(dev) || IS_IGDNG(dev)) overhead = 4096; else overhead = (*aperture_size / 1024) + 4096; diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 640f5158effc..ff9bccad3871 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -37,9 +37,14 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 temp; + u32 temp, reg; - temp = I915_READ(ADPA); + if (IS_IGDNG(dev)) + reg = PCH_ADPA; + else + reg = ADPA; + + temp = I915_READ(reg); temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); temp |= ADPA_DAC_ENABLE; @@ -58,7 +63,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode) break; } - I915_WRITE(ADPA, temp); + I915_WRITE(reg, temp); } static int intel_crt_mode_valid(struct drm_connector *connector, @@ -101,17 +106,23 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, struct drm_i915_private *dev_priv = dev->dev_private; int dpll_md_reg; u32 adpa, dpll_md; + u32 adpa_reg; if (intel_crtc->pipe == 0) dpll_md_reg = DPLL_A_MD; else dpll_md_reg = DPLL_B_MD; + if (IS_IGDNG(dev)) + adpa_reg = PCH_ADPA; + else + adpa_reg = ADPA; + /* * Disable separate mode multiplier used when cloning SDVO to CRT * XXX this needs to be adjusted when we really are cloning */ - if (IS_I965G(dev)) { + if (IS_I965G(dev) && !IS_IGDNG(dev)) { dpll_md = I915_READ(dpll_md_reg); I915_WRITE(dpll_md_reg, dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); @@ -125,13 +136,53 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, if (intel_crtc->pipe == 0) { adpa |= ADPA_PIPE_A_SELECT; - I915_WRITE(BCLRPAT_A, 0); + if (!IS_IGDNG(dev)) + I915_WRITE(BCLRPAT_A, 0); } else { adpa |= ADPA_PIPE_B_SELECT; - I915_WRITE(BCLRPAT_B, 0); + if (!IS_IGDNG(dev)) + I915_WRITE(BCLRPAT_B, 0); } - I915_WRITE(ADPA, adpa); + I915_WRITE(adpa_reg, adpa); +} + +static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 adpa, temp; + bool ret; + + temp = adpa = I915_READ(PCH_ADPA); + + adpa &= ~ADPA_CRT_HOTPLUG_MASK; + + adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | + ADPA_CRT_HOTPLUG_WARMUP_10MS | + ADPA_CRT_HOTPLUG_SAMPLE_4S | + ADPA_CRT_HOTPLUG_VOLTAGE_50 | /* default */ + ADPA_CRT_HOTPLUG_VOLREF_325MV | + ADPA_CRT_HOTPLUG_ENABLE | + ADPA_CRT_HOTPLUG_FORCE_TRIGGER); + + DRM_DEBUG("pch crt adpa 0x%x", adpa); + I915_WRITE(PCH_ADPA, adpa); + + /* This might not be needed as not specified in spec...*/ + udelay(1000); + + /* Check the status to see if both blue and green are on now */ + adpa = I915_READ(PCH_ADPA); + if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) == + ADPA_CRT_HOTPLUG_MONITOR_COLOR) + ret = true; + else + ret = false; + + /* restore origin register */ + I915_WRITE(PCH_ADPA, temp); + return ret; } /** @@ -148,6 +199,10 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) struct drm_i915_private *dev_priv = dev->dev_private; u32 hotplug_en; int i, tries = 0; + + if (IS_IGDNG(dev)) + return intel_igdng_crt_detect_hotplug(connector); + /* * On 4 series desktop, CRT detect sequence need to be done twice * to get a reliable result. @@ -427,6 +482,7 @@ void intel_crt_init(struct drm_device *dev) { struct drm_connector *connector; struct intel_output *intel_output; + u32 i2c_reg; intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); if (!intel_output) @@ -443,7 +499,11 @@ void intel_crt_init(struct drm_device *dev) &intel_output->enc); /* Set up the DDC bus. */ - intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A"); + if (IS_IGDNG(dev)) + i2c_reg = PCH_GPIOA; + else + i2c_reg = GPIOA; + intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); if (!intel_output->ddc_bus) { dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " "failed.\n"); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index c9d6f10ba92e..2cd6ba6523d8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -137,6 +137,8 @@ struct intel_limit { #define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7 #define INTEL_LIMIT_IGD_SDVO_DAC 8 #define INTEL_LIMIT_IGD_LVDS 9 +#define INTEL_LIMIT_IGDNG_SDVO_DAC 10 +#define INTEL_LIMIT_IGDNG_LVDS 11 /*The parameter is for SDVO on G4x platform*/ #define G4X_DOT_SDVO_MIN 25000 @@ -216,12 +218,43 @@ struct intel_limit { #define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7 #define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0 +/* IGDNG */ +/* as we calculate clock using (register_value + 2) for + N/M1/M2, so here the range value for them is (actual_value-2). + */ +#define IGDNG_DOT_MIN 25000 +#define IGDNG_DOT_MAX 350000 +#define IGDNG_VCO_MIN 1760000 +#define IGDNG_VCO_MAX 3510000 +#define IGDNG_N_MIN 1 +#define IGDNG_N_MAX 5 +#define IGDNG_M_MIN 79 +#define IGDNG_M_MAX 118 +#define IGDNG_M1_MIN 12 +#define IGDNG_M1_MAX 23 +#define IGDNG_M2_MIN 5 +#define IGDNG_M2_MAX 9 +#define IGDNG_P_SDVO_DAC_MIN 5 +#define IGDNG_P_SDVO_DAC_MAX 80 +#define IGDNG_P_LVDS_MIN 28 +#define IGDNG_P_LVDS_MAX 112 +#define IGDNG_P1_MIN 1 +#define IGDNG_P1_MAX 8 +#define IGDNG_P2_SDVO_DAC_SLOW 10 +#define IGDNG_P2_SDVO_DAC_FAST 5 +#define IGDNG_P2_LVDS_SLOW 14 /* single channel */ +#define IGDNG_P2_LVDS_FAST 7 /* double channel */ +#define IGDNG_P2_DOT_LIMIT 225000 /* 225Mhz */ + static bool intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *best_clock); static bool intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *best_clock); +static bool +intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *best_clock); static const intel_limit_t intel_limits[] = { { /* INTEL_LIMIT_I8XX_DVO_DAC */ @@ -383,9 +416,47 @@ static const intel_limit_t intel_limits[] = { .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, .find_pll = intel_find_best_PLL, }, - + { /* INTEL_LIMIT_IGDNG_SDVO_DAC */ + .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, + .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, + .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, + .m = { .min = IGDNG_M_MIN, .max = IGDNG_M_MAX }, + .m1 = { .min = IGDNG_M1_MIN, .max = IGDNG_M1_MAX }, + .m2 = { .min = IGDNG_M2_MIN, .max = IGDNG_M2_MAX }, + .p = { .min = IGDNG_P_SDVO_DAC_MIN, .max = IGDNG_P_SDVO_DAC_MAX }, + .p1 = { .min = IGDNG_P1_MIN, .max = IGDNG_P1_MAX }, + .p2 = { .dot_limit = IGDNG_P2_DOT_LIMIT, + .p2_slow = IGDNG_P2_SDVO_DAC_SLOW, + .p2_fast = IGDNG_P2_SDVO_DAC_FAST }, + .find_pll = intel_igdng_find_best_PLL, + }, + { /* INTEL_LIMIT_IGDNG_LVDS */ + .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, + .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, + .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, + .m = { .min = IGDNG_M_MIN, .max = IGDNG_M_MAX }, + .m1 = { .min = IGDNG_M1_MIN, .max = IGDNG_M1_MAX }, + .m2 = { .min = IGDNG_M2_MIN, .max = IGDNG_M2_MAX }, + .p = { .min = IGDNG_P_LVDS_MIN, .max = IGDNG_P_LVDS_MAX }, + .p1 = { .min = IGDNG_P1_MIN, .max = IGDNG_P1_MAX }, + .p2 = { .dot_limit = IGDNG_P2_DOT_LIMIT, + .p2_slow = IGDNG_P2_LVDS_SLOW, + .p2_fast = IGDNG_P2_LVDS_FAST }, + .find_pll = intel_igdng_find_best_PLL, + }, }; +static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc) +{ + const intel_limit_t *limit; + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) + limit = &intel_limits[INTEL_LIMIT_IGDNG_LVDS]; + else + limit = &intel_limits[INTEL_LIMIT_IGDNG_SDVO_DAC]; + + return limit; +} + static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -418,7 +489,9 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; const intel_limit_t *limit; - if (IS_G4X(dev)) { + if (IS_IGDNG(dev)) + limit = intel_igdng_limit(crtc); + else if (IS_G4X(dev)) { limit = intel_g4x_limit(crtc); } else if (IS_I9XX(dev) && !IS_IGD(dev)) { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) @@ -630,7 +703,64 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, } } } + return found; +} + +static bool +intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *best_clock) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + intel_clock_t clock; + int max_n; + bool found; + int err_most = 47; + found = false; + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == + LVDS_CLKB_POWER_UP) + clock.p2 = limit->p2.p2_fast; + else + clock.p2 = limit->p2.p2_slow; + } else { + if (target < limit->p2.dot_limit) + clock.p2 = limit->p2.p2_slow; + else + clock.p2 = limit->p2.p2_fast; + } + + memset(best_clock, 0, sizeof(*best_clock)); + max_n = limit->n.max; + /* based on hardware requriment prefer smaller n to precision */ + for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { + /* based on hardware requirment prefere larger m1,m2, p1 */ + for (clock.m1 = limit->m1.max; + clock.m1 >= limit->m1.min; clock.m1--) { + for (clock.m2 = limit->m2.max; + clock.m2 >= limit->m2.min; clock.m2--) { + for (clock.p1 = limit->p1.max; + clock.p1 >= limit->p1.min; clock.p1--) { + int this_err; + intel_clock(dev, refclk, &clock); + if (!intel_PLL_is_valid(crtc, &clock)) + continue; + this_err = abs((10000 - (target*10000/clock.dot))); + if (this_err < err_most) { + *best_clock = clock; + err_most = this_err; + max_n = clock.n; + found = true; + /* found on first matching */ + goto out; + } + } + } + } + } +out: return found; } @@ -785,18 +915,292 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, return 0; } +static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + int plane = intel_crtc->pipe; + int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; + int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; + int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; + int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; + int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; + int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; + int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; + int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; + int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; + int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; + int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; + int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; + int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; + int cpu_vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; + int cpu_vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; + int cpu_vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; + int trans_htot_reg = (pipe == 0) ? TRANS_HTOTAL_A : TRANS_HTOTAL_B; + int trans_hblank_reg = (pipe == 0) ? TRANS_HBLANK_A : TRANS_HBLANK_B; + int trans_hsync_reg = (pipe == 0) ? TRANS_HSYNC_A : TRANS_HSYNC_B; + int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B; + int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B; + int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; + u32 temp; + int tries = 5, j; + /* XXX: When our outputs are all unaware of DPMS modes other than off + * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. + */ + switch (mode) { + case DRM_MODE_DPMS_ON: + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + DRM_DEBUG("crtc %d dpms on\n", pipe); + /* enable PCH DPLL */ + temp = I915_READ(pch_dpll_reg); + if ((temp & DPLL_VCO_ENABLE) == 0) { + I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); + I915_READ(pch_dpll_reg); + } -/** - * Sets the power management mode of the pipe and plane. - * - * This code should probably grow support for turning the cursor off and back - * on appropriately at the same time as we're turning the pipe off/on. - */ -static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) + /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ + temp = I915_READ(fdi_rx_reg); + I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | + FDI_SEL_PCDCLK | + FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ + I915_READ(fdi_rx_reg); + udelay(200); + + /* Enable CPU FDI TX PLL, always on for IGDNG */ + temp = I915_READ(fdi_tx_reg); + if ((temp & FDI_TX_PLL_ENABLE) == 0) { + I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); + I915_READ(fdi_tx_reg); + udelay(100); + } + + /* Enable CPU pipe */ + temp = I915_READ(pipeconf_reg); + if ((temp & PIPEACONF_ENABLE) == 0) { + I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); + I915_READ(pipeconf_reg); + udelay(100); + } + + /* configure and enable CPU plane */ + temp = I915_READ(dspcntr_reg); + if ((temp & DISPLAY_PLANE_ENABLE) == 0) { + I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE); + /* Flush the plane changes */ + I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); + } + + /* enable CPU FDI TX and PCH FDI RX */ + temp = I915_READ(fdi_tx_reg); + temp |= FDI_TX_ENABLE; + temp |= FDI_DP_PORT_WIDTH_X4; /* default */ + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + I915_WRITE(fdi_tx_reg, temp); + I915_READ(fdi_tx_reg); + + temp = I915_READ(fdi_rx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); + I915_READ(fdi_rx_reg); + + udelay(150); + + /* Train FDI. */ + /* umask FDI RX Interrupt symbol_lock and bit_lock bit + for train result */ + temp = I915_READ(fdi_rx_imr_reg); + temp &= ~FDI_RX_SYMBOL_LOCK; + temp &= ~FDI_RX_BIT_LOCK; + I915_WRITE(fdi_rx_imr_reg, temp); + I915_READ(fdi_rx_imr_reg); + udelay(150); + + temp = I915_READ(fdi_rx_iir_reg); + DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); + + if ((temp & FDI_RX_BIT_LOCK) == 0) { + for (j = 0; j < tries; j++) { + temp = I915_READ(fdi_rx_iir_reg); + DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); + if (temp & FDI_RX_BIT_LOCK) + break; + udelay(200); + } + if (j != tries) + I915_WRITE(fdi_rx_iir_reg, + temp | FDI_RX_BIT_LOCK); + else + DRM_DEBUG("train 1 fail\n"); + } else { + I915_WRITE(fdi_rx_iir_reg, + temp | FDI_RX_BIT_LOCK); + DRM_DEBUG("train 1 ok 2!\n"); + } + temp = I915_READ(fdi_tx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_2; + I915_WRITE(fdi_tx_reg, temp); + + temp = I915_READ(fdi_rx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_2; + I915_WRITE(fdi_rx_reg, temp); + + udelay(150); + + temp = I915_READ(fdi_rx_iir_reg); + DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); + + if ((temp & FDI_RX_SYMBOL_LOCK) == 0) { + for (j = 0; j < tries; j++) { + temp = I915_READ(fdi_rx_iir_reg); + DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); + if (temp & FDI_RX_SYMBOL_LOCK) + break; + udelay(200); + } + if (j != tries) { + I915_WRITE(fdi_rx_iir_reg, + temp | FDI_RX_SYMBOL_LOCK); + DRM_DEBUG("train 2 ok 1!\n"); + } else + DRM_DEBUG("train 2 fail\n"); + } else { + I915_WRITE(fdi_rx_iir_reg, temp | FDI_RX_SYMBOL_LOCK); + DRM_DEBUG("train 2 ok 2!\n"); + } + DRM_DEBUG("train done\n"); + + /* set transcoder timing */ + I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); + I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg)); + I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg)); + + I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg)); + I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); + I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); + + /* enable PCH transcoder */ + temp = I915_READ(transconf_reg); + I915_WRITE(transconf_reg, temp | TRANS_ENABLE); + I915_READ(transconf_reg); + + while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0) + ; + + /* enable normal */ + + temp = I915_READ(fdi_tx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | + FDI_TX_ENHANCE_FRAME_ENABLE); + I915_READ(fdi_tx_reg); + + temp = I915_READ(fdi_rx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE | + FDI_RX_ENHANCE_FRAME_ENABLE); + I915_READ(fdi_rx_reg); + + /* wait one idle pattern time */ + udelay(100); + + intel_crtc_load_lut(crtc); + + break; + case DRM_MODE_DPMS_OFF: + DRM_DEBUG("crtc %d dpms off\n", pipe); + + /* Disable the VGA plane that we never use */ + I915_WRITE(CPU_VGACNTRL, VGA_DISP_DISABLE); + + /* Disable display plane */ + temp = I915_READ(dspcntr_reg); + if ((temp & DISPLAY_PLANE_ENABLE) != 0) { + I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE); + /* Flush the plane changes */ + I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); + I915_READ(dspbase_reg); + } + + /* disable cpu pipe, disable after all planes disabled */ + temp = I915_READ(pipeconf_reg); + if ((temp & PIPEACONF_ENABLE) != 0) { + I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); + I915_READ(pipeconf_reg); + /* wait for cpu pipe off, pipe state */ + while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0) + ; + } else + DRM_DEBUG("crtc %d is disabled\n", pipe); + + /* IGDNG-A : disable cpu panel fitter ? */ + temp = I915_READ(pf_ctl_reg); + if ((temp & PF_ENABLE) != 0) { + I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); + I915_READ(pf_ctl_reg); + } + + /* disable CPU FDI tx and PCH FDI rx */ + temp = I915_READ(fdi_tx_reg); + I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_ENABLE); + I915_READ(fdi_tx_reg); + + temp = I915_READ(fdi_rx_reg); + I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); + I915_READ(fdi_rx_reg); + + /* still set train pattern 1 */ + temp = I915_READ(fdi_tx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + I915_WRITE(fdi_tx_reg, temp); + + temp = I915_READ(fdi_rx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + I915_WRITE(fdi_rx_reg, temp); + + /* disable PCH transcoder */ + temp = I915_READ(transconf_reg); + if ((temp & TRANS_ENABLE) != 0) { + I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE); + I915_READ(transconf_reg); + /* wait for PCH transcoder off, transcoder state */ + while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0) + ; + } + + /* disable PCH DPLL */ + temp = I915_READ(pch_dpll_reg); + if ((temp & DPLL_VCO_ENABLE) != 0) { + I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); + I915_READ(pch_dpll_reg); + } + + temp = I915_READ(fdi_rx_reg); + if ((temp & FDI_RX_PLL_ENABLE) != 0) { + temp &= ~FDI_SEL_PCDCLK; + temp &= ~FDI_RX_PLL_ENABLE; + I915_WRITE(fdi_rx_reg, temp); + I915_READ(fdi_rx_reg); + } + + /* Wait for the clocks to turn off. */ + udelay(150); + break; + } +} + +static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; - struct drm_i915_master_private *master_priv; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; @@ -805,7 +1209,6 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) int dspbase_reg = (pipe == 0) ? DSPAADDR : DSPBADDR; int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; u32 temp; - bool enabled; /* XXX: When our outputs are all unaware of DPMS modes other than off * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. @@ -890,6 +1293,26 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) udelay(150); break; } +} + +/** + * Sets the power management mode of the pipe and plane. + * + * This code should probably grow support for turning the cursor off and back + * on appropriately at the same time as we're turning the pipe off/on. + */ +static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_master_private *master_priv; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + bool enabled; + + if (IS_IGDNG(dev)) + igdng_crtc_dpms(crtc, mode); + else + i9xx_crtc_dpms(crtc, mode); if (!dev->primary->master) return; @@ -947,6 +1370,12 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { + struct drm_device *dev = crtc->dev; + if (IS_IGDNG(dev)) { + /* FDI link clock is fixed at 2.7G */ + if (mode->clock * 3 > 27000 * 4) + return MODE_CLOCK_HIGH; + } return true; } @@ -1030,6 +1459,48 @@ static int intel_panel_fitter_pipe (struct drm_device *dev) return 1; } +struct fdi_m_n { + u32 tu; + u32 gmch_m; + u32 gmch_n; + u32 link_m; + u32 link_n; +}; + +static void +fdi_reduce_ratio(u32 *num, u32 *den) +{ + while (*num > 0xffffff || *den > 0xffffff) { + *num >>= 1; + *den >>= 1; + } +} + +#define DATA_N 0x800000 +#define LINK_N 0x80000 + +static void +igdng_compute_m_n(int bytes_per_pixel, int nlanes, + int pixel_clock, int link_clock, + struct fdi_m_n *m_n) +{ + u64 temp; + + m_n->tu = 64; /* default size */ + + temp = (u64) DATA_N * pixel_clock; + temp = div_u64(temp, link_clock); + m_n->gmch_m = (temp * bytes_per_pixel) / nlanes; + m_n->gmch_n = DATA_N; + fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); + + temp = (u64) LINK_N * pixel_clock; + m_n->link_m = div_u64(temp, link_clock); + m_n->link_n = LINK_N; + fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); +} + + static int intel_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, @@ -1063,6 +1534,16 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, struct drm_connector *connector; const intel_limit_t *limit; int ret; + struct fdi_m_n m_n = {0}; + int data_m1_reg = (pipe == 0) ? PIPEA_DATA_M1 : PIPEB_DATA_M1; + int data_n1_reg = (pipe == 0) ? PIPEA_DATA_N1 : PIPEB_DATA_N1; + int link_m1_reg = (pipe == 0) ? PIPEA_LINK_M1 : PIPEB_LINK_M1; + int link_n1_reg = (pipe == 0) ? PIPEA_LINK_N1 : PIPEB_LINK_N1; + int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0; + int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; + int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; + u32 temp; + int sdvo_pixel_multiply; drm_vblank_pre_modeset(dev, pipe); @@ -1101,6 +1582,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000); } else if (IS_I9XX(dev)) { refclk = 96000; + if (IS_IGDNG(dev)) + refclk = 120000; /* 120Mhz refclk */ } else { refclk = 48000; } @@ -1137,12 +1620,21 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } } + /* FDI link */ + if (IS_IGDNG(dev)) + igdng_compute_m_n(3, 4, /* lane num 4 */ + adjusted_mode->clock, + 270000, /* lane clock */ + &m_n); + if (IS_IGD(dev)) fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; else fp = clock.n << 16 | clock.m1 << 8 | clock.m2; - dpll = DPLL_VGA_MODE_DIS; + if (!IS_IGDNG(dev)) + dpll = DPLL_VGA_MODE_DIS; + if (IS_I9XX(dev)) { if (is_lvds) dpll |= DPLLB_MODE_LVDS; @@ -1150,17 +1642,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, dpll |= DPLLB_MODE_DAC_SERIAL; if (is_sdvo) { dpll |= DPLL_DVO_HIGH_SPEED; - if (IS_I945G(dev) || IS_I945GM(dev)) { - int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; + sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; + if (IS_I945G(dev) || IS_I945GM(dev)) dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; - } + else if (IS_IGDNG(dev)) + dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; } /* compute bitmask from p1 value */ if (IS_IGD(dev)) dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD; - else + else { dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; + /* also FPA1 */ + if (IS_IGDNG(dev)) + dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; + } switch (clock.p2) { case 5: dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; @@ -1175,7 +1672,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; break; } - if (IS_I965G(dev)) + if (IS_I965G(dev) && !IS_IGDNG(dev)) dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); } else { if (is_lvds) { @@ -1207,10 +1704,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* Set up the display plane register */ dspcntr = DISPPLANE_GAMMA_ENABLE; - if (pipe == 0) - dspcntr |= DISPPLANE_SEL_PIPE_A; - else - dspcntr |= DISPPLANE_SEL_PIPE_B; + /* IGDNG's plane is forced to pipe, bit 24 is to + enable color space conversion */ + if (!IS_IGDNG(dev)) { + if (pipe == 0) + dspcntr |= DISPPLANE_SEL_PIPE_A; + else + dspcntr |= DISPPLANE_SEL_PIPE_B; + } if (pipe == 0 && !IS_I965G(dev)) { /* Enable pixel doubling when the dot clock is > 90% of the (display) @@ -1231,12 +1732,17 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* Disable the panel fitter if it was on our pipe */ - if (intel_panel_fitter_pipe(dev) == pipe) + if (!IS_IGDNG(dev) && intel_panel_fitter_pipe(dev) == pipe) I915_WRITE(PFIT_CONTROL, 0); DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); drm_mode_debug_printmodeline(mode); + /* assign to IGDNG registers */ + if (IS_IGDNG(dev)) { + fp_reg = pch_fp_reg; + dpll_reg = pch_dpll_reg; + } if (dpll & DPLL_VCO_ENABLE) { I915_WRITE(fp_reg, fp); @@ -1245,6 +1751,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, udelay(150); } + if (IS_IGDNG(dev)) { + /* enable PCH clock reference source */ + /* XXX need to change the setting for other outputs */ + u32 temp; + temp = I915_READ(PCH_DREF_CONTROL); + temp &= ~DREF_NONSPREAD_SOURCE_MASK; + temp |= DREF_NONSPREAD_CK505_ENABLE; + temp &= ~DREF_SSC_SOURCE_MASK; + temp |= DREF_SSC_SOURCE_ENABLE; + temp &= ~DREF_SSC1_ENABLE; + /* if no eDP, disable source output to CPU */ + temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; + temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; + I915_WRITE(PCH_DREF_CONTROL, temp); + } + /* The LVDS pin pair needs to be on before the DPLLs are enabled. * This is an exception to the general rule that mode_set doesn't turn * things on. @@ -1276,8 +1798,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* Wait for the clocks to stabilize. */ udelay(150); - if (IS_I965G(dev)) { - int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; + if (IS_I965G(dev) && !IS_IGDNG(dev)) { + sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); } else { @@ -1303,9 +1825,25 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* pipesrc and dspsize control the size that is scaled from, which should * always be the user's requested size. */ - I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); - I915_WRITE(dsppos_reg, 0); + if (!IS_IGDNG(dev)) { + I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | + (mode->hdisplay - 1)); + I915_WRITE(dsppos_reg, 0); + } I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); + + if (IS_IGDNG(dev)) { + I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m); + I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n); + I915_WRITE(link_m1_reg, m_n.link_m); + I915_WRITE(link_n1_reg, m_n.link_n); + + /* enable FDI RX PLL too */ + temp = I915_READ(fdi_rx_reg); + I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); + udelay(200); + } + I915_WRITE(pipeconf_reg, pipeconf); I915_READ(pipeconf_reg); @@ -1336,6 +1874,11 @@ void intel_crtc_load_lut(struct drm_crtc *crtc) if (!crtc->enabled) return; + /* use legacy palette for IGDNG */ + if (IS_IGDNG(dev)) + palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A : + LGC_PALETTE_B; + for (i = 0; i < 256; i++) { I915_WRITE(palreg + 4 * i, (intel_crtc->lut_r[i] << 16) | @@ -1885,10 +2428,12 @@ static void intel_setup_outputs(struct drm_device *dev) intel_crt_init(dev); /* Set up integrated LVDS */ - if (IS_MOBILE(dev) && !IS_I830(dev)) + if (IS_MOBILE(dev) && !IS_I830(dev) && !IS_IGDNG(dev)) intel_lvds_init(dev); - if (IS_I9XX(dev)) { + if (IS_IGDNG(dev)) { + /* ignore for other outputs */ + } else if (IS_I9XX(dev)) { int found; u32 reg; @@ -1912,7 +2457,7 @@ static void intel_setup_outputs(struct drm_device *dev) } else intel_dvo_init(dev); - if (IS_I9XX(dev) && IS_MOBILE(dev)) + if (IS_I9XX(dev) && IS_MOBILE(dev) && !IS_IGDNG(dev)) intel_tv_init(dev); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -- cgit v1.2.3 From 30ad48b7334a2eb2edf22f6c91f7b3f22a22a837 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Fri, 5 Jun 2009 15:38:43 +0800 Subject: drm/i915: Add HDMI support on IGDNG Signed-off-by: Zhenyu Wang Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/intel_display.c | 17 ++++++++++++++++- drivers/gpu/drm/i915/intel_hdmi.c | 33 +++++++++++++++++++++++++++++++-- 2 files changed, 47 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2cd6ba6523d8..53cf6efa67b6 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2432,7 +2432,22 @@ static void intel_setup_outputs(struct drm_device *dev) intel_lvds_init(dev); if (IS_IGDNG(dev)) { - /* ignore for other outputs */ + int found; + + if (I915_READ(HDMIB) & PORT_DETECTED) { + /* check SDVOB */ + /* found = intel_sdvo_init(dev, HDMIB); */ + found = 0; + if (!found) + intel_hdmi_init(dev, HDMIB); + } + + if (I915_READ(HDMIC) & PORT_DETECTED) + intel_hdmi_init(dev, HDMIC); + + if (I915_READ(HDMID) & PORT_DETECTED) + intel_hdmi_init(dev, HDMID); + } else if (IS_I9XX(dev)) { int found; u32 reg; diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index d0983bb93a18..d874b0c4b065 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -56,7 +56,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE | SDVO_VSYNC_ACTIVE_HIGH | - SDVO_HSYNC_ACTIVE_HIGH; + SDVO_HSYNC_ACTIVE_HIGH | + SDVO_NULL_PACKETS_DURING_VSYNC; if (hdmi_priv->has_hdmi_sink) sdvox |= SDVO_AUDIO_ENABLE; @@ -144,6 +145,22 @@ intel_hdmi_sink_detect(struct drm_connector *connector) } } +static enum drm_connector_status +igdng_hdmi_detect(struct drm_connector *connector) +{ + struct intel_output *intel_output = to_intel_output(connector); + struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; + + /* FIXME hotplug detect */ + + hdmi_priv->has_hdmi_sink = false; + intel_hdmi_sink_detect(connector); + if (hdmi_priv->has_hdmi_sink) + return connector_status_connected; + else + return connector_status_disconnected; +} + static enum drm_connector_status intel_hdmi_detect(struct drm_connector *connector) { @@ -153,6 +170,9 @@ intel_hdmi_detect(struct drm_connector *connector) struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; u32 temp, bit; + if (IS_IGDNG(dev)) + return igdng_hdmi_detect(connector); + temp = I915_READ(PORT_HOTPLUG_EN); switch (hdmi_priv->sdvox_reg) { @@ -268,8 +288,17 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) /* Set up the DDC bus. */ if (sdvox_reg == SDVOB) intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); - else + else if (sdvox_reg == SDVOC) intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); + else if (sdvox_reg == HDMIB) + intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, + "HDMIB"); + else if (sdvox_reg == HDMIC) + intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, + "HDMIC"); + else if (sdvox_reg == HDMID) + intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, + "HDMID"); if (!intel_output->ddc_bus) goto err_connector; -- cgit v1.2.3 From 541998a18b72d2cac48b3369fa4540116ff3f0a8 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Fri, 5 Jun 2009 15:38:44 +0800 Subject: drm/i915: Add LVDS support for IGDNG Signed-off-by: Zhenyu Wang Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/intel_display.c | 13 ++-- drivers/gpu/drm/i915/intel_lvds.c | 127 +++++++++++++++++++++++++++++------ 2 files changed, 114 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 53cf6efa67b6..05bd97e3e3e0 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1542,6 +1542,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0; int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; + int lvds_reg = LVDS; u32 temp; int sdvo_pixel_multiply; @@ -1772,8 +1773,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, * things on. */ if (is_lvds) { - u32 lvds = I915_READ(LVDS); + u32 lvds; + if (IS_IGDNG(dev)) + lvds_reg = PCH_LVDS; + + lvds = I915_READ(lvds_reg); lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT; /* Set the B0-B3 data pairs corresponding to whether we're going to * set the DPLLs for dual-channel mode or not. @@ -1788,8 +1793,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, * panels behave in the two modes. */ - I915_WRITE(LVDS, lvds); - I915_READ(LVDS); + I915_WRITE(lvds_reg, lvds); + I915_READ(lvds_reg); } I915_WRITE(fp_reg, fp); @@ -2428,7 +2433,7 @@ static void intel_setup_outputs(struct drm_device *dev) intel_crt_init(dev); /* Set up integrated LVDS */ - if (IS_MOBILE(dev) && !IS_I830(dev) && !IS_IGDNG(dev)) + if (IS_MOBILE(dev) && !IS_I830(dev)) intel_lvds_init(dev); if (IS_IGDNG(dev)) { diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 53731f0ffcb5..eea3a548b82a 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -45,10 +45,15 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level) { struct drm_i915_private *dev_priv = dev->dev_private; - u32 blc_pwm_ctl; + u32 blc_pwm_ctl, reg; - blc_pwm_ctl = I915_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; - I915_WRITE(BLC_PWM_CTL, (blc_pwm_ctl | + if (IS_IGDNG(dev)) + reg = BLC_PWM_CPU_CTL; + else + reg = BLC_PWM_CTL; + + blc_pwm_ctl = I915_READ(reg) & ~BACKLIGHT_DUTY_CYCLE_MASK; + I915_WRITE(reg, (blc_pwm_ctl | (level << BACKLIGHT_DUTY_CYCLE_SHIFT))); } @@ -58,8 +63,14 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level) static u32 intel_lvds_get_max_backlight(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + u32 reg; + + if (IS_IGDNG(dev)) + reg = BLC_PWM_PCH_CTL2; + else + reg = BLC_PWM_CTL; - return ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >> + return ((I915_READ(reg) & BACKLIGHT_MODULATION_FREQ_MASK) >> BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; } @@ -69,23 +80,31 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev) static void intel_lvds_set_power(struct drm_device *dev, bool on) { struct drm_i915_private *dev_priv = dev->dev_private; - u32 pp_status; + u32 pp_status, ctl_reg, status_reg; + + if (IS_IGDNG(dev)) { + ctl_reg = PCH_PP_CONTROL; + status_reg = PCH_PP_STATUS; + } else { + ctl_reg = PP_CONTROL; + status_reg = PP_STATUS; + } if (on) { - I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | + I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); do { - pp_status = I915_READ(PP_STATUS); + pp_status = I915_READ(status_reg); } while ((pp_status & PP_ON) == 0); intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle); } else { intel_lvds_set_backlight(dev, 0); - I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & + I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); do { - pp_status = I915_READ(PP_STATUS); + pp_status = I915_READ(status_reg); } while (pp_status & PP_ON); } } @@ -106,12 +125,28 @@ static void intel_lvds_save(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; + u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; + u32 pwm_ctl_reg; + + if (IS_IGDNG(dev)) { + pp_on_reg = PCH_PP_ON_DELAYS; + pp_off_reg = PCH_PP_OFF_DELAYS; + pp_ctl_reg = PCH_PP_CONTROL; + pp_div_reg = PCH_PP_DIVISOR; + pwm_ctl_reg = BLC_PWM_CPU_CTL; + } else { + pp_on_reg = PP_ON_DELAYS; + pp_off_reg = PP_OFF_DELAYS; + pp_ctl_reg = PP_CONTROL; + pp_div_reg = PP_DIVISOR; + pwm_ctl_reg = BLC_PWM_CTL; + } - dev_priv->savePP_ON = I915_READ(PP_ON_DELAYS); - dev_priv->savePP_OFF = I915_READ(PP_OFF_DELAYS); - dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); - dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); - dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); + dev_priv->savePP_ON = I915_READ(pp_on_reg); + dev_priv->savePP_OFF = I915_READ(pp_off_reg); + dev_priv->savePP_CONTROL = I915_READ(pp_ctl_reg); + dev_priv->savePP_DIVISOR = I915_READ(pp_div_reg); + dev_priv->saveBLC_PWM_CTL = I915_READ(pwm_ctl_reg); dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & BACKLIGHT_DUTY_CYCLE_MASK); @@ -127,12 +162,28 @@ static void intel_lvds_restore(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; + u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; + u32 pwm_ctl_reg; + + if (IS_IGDNG(dev)) { + pp_on_reg = PCH_PP_ON_DELAYS; + pp_off_reg = PCH_PP_OFF_DELAYS; + pp_ctl_reg = PCH_PP_CONTROL; + pp_div_reg = PCH_PP_DIVISOR; + pwm_ctl_reg = BLC_PWM_CPU_CTL; + } else { + pp_on_reg = PP_ON_DELAYS; + pp_off_reg = PP_OFF_DELAYS; + pp_ctl_reg = PP_CONTROL; + pp_div_reg = PP_DIVISOR; + pwm_ctl_reg = BLC_PWM_CTL; + } - I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); - I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON); - I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF); - I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); - I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); + I915_WRITE(pwm_ctl_reg, dev_priv->saveBLC_PWM_CTL); + I915_WRITE(pp_on_reg, dev_priv->savePP_ON); + I915_WRITE(pp_off_reg, dev_priv->savePP_OFF); + I915_WRITE(pp_div_reg, dev_priv->savePP_DIVISOR); + I915_WRITE(pp_ctl_reg, dev_priv->savePP_CONTROL); if (dev_priv->savePP_CONTROL & POWER_TARGET_ON) intel_lvds_set_power(dev, true); else @@ -216,8 +267,14 @@ static void intel_lvds_prepare(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; + u32 reg; - dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); + if (IS_IGDNG(dev)) + reg = BLC_PWM_CPU_CTL; + else + reg = BLC_PWM_CTL; + + dev_priv->saveBLC_PWM_CTL = I915_READ(reg); dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & BACKLIGHT_DUTY_CYCLE_MASK); @@ -251,6 +308,10 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, * settings. */ + /* No panel fitting yet, fixme */ + if (IS_IGDNG(dev)) + return; + /* * Enable automatic panel scaling so that non-native modes fill the * screen. Should be enabled before the pipe is enabled, according to @@ -446,12 +507,18 @@ void intel_lvds_init(struct drm_device *dev) struct drm_display_mode *scan; /* *modes, *bios_mode; */ struct drm_crtc *crtc; u32 lvds; - int pipe; + int pipe, gpio = GPIOC; /* Skip init on machines we know falsely report LVDS */ if (dmi_check_system(intel_no_lvds)) return; + if (IS_IGDNG(dev)) { + if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) + return; + gpio = PCH_GPIOC; + } + intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); if (!intel_output) { return; @@ -486,7 +553,7 @@ void intel_lvds_init(struct drm_device *dev) */ /* Set up the DDC bus. */ - intel_output->ddc_bus = intel_i2c_create(dev, GPIOC, "LVDSDDC_C"); + intel_output->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C"); if (!intel_output->ddc_bus) { dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " "failed.\n"); @@ -528,6 +595,11 @@ void intel_lvds_init(struct drm_device *dev) * on. If so, assume that whatever is currently programmed is the * correct mode. */ + + /* IGDNG: FIXME if still fail, not try pipe mode now */ + if (IS_IGDNG(dev)) + goto failed; + lvds = I915_READ(LVDS); pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0; crtc = intel_get_crtc_from_pipe(dev, pipe); @@ -546,6 +618,17 @@ void intel_lvds_init(struct drm_device *dev) goto failed; out: + if (IS_IGDNG(dev)) { + u32 pwm; + /* make sure PWM is enabled */ + pwm = I915_READ(BLC_PWM_CPU_CTL2); + pwm |= (PWM_ENABLE | PWM_PIPE_B); + I915_WRITE(BLC_PWM_CPU_CTL2, pwm); + + pwm = I915_READ(BLC_PWM_PCH_CTL1); + pwm |= PWM_PCH_ENABLE; + I915_WRITE(BLC_PWM_PCH_CTL1, pwm); + } drm_sysfs_connector_add(connector); return; -- cgit v1.2.3 From cb66c692d1ae257f32dc7f6085cf9cb9f2f6bab8 Mon Sep 17 00:00:00 2001 From: Ma Ling Date: Sun, 31 May 2009 16:58:32 +0800 Subject: drm/i915: Set correct TV detection voltage level override values We detect TV connect status by setting DAC voltage level override values as 0.7 voltage for DAC_A/B/C. The corresponding 2-bits shold be 0x2, In order correctly to set last bit as 0, at first we must clean it. It fixed freedesktop.org bug #21204 Signed-off-by: Ma Ling Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_reg.h | 6 +++--- drivers/gpu/drm/i915/intel_tv.c | 3 +++ 2 files changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 99681cfb7ab9..79df9e9c38e4 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -958,15 +958,15 @@ # define DAC_A_1_3_V (0 << 4) # define DAC_A_1_1_V (1 << 4) # define DAC_A_0_7_V (2 << 4) -# define DAC_A_OFF (3 << 4) +# define DAC_A_MASK (3 << 4) # define DAC_B_1_3_V (0 << 2) # define DAC_B_1_1_V (1 << 2) # define DAC_B_0_7_V (2 << 2) -# define DAC_B_OFF (3 << 2) +# define DAC_B_MASK (3 << 2) # define DAC_C_1_3_V (0 << 0) # define DAC_C_1_1_V (1 << 0) # define DAC_C_0_7_V (2 << 0) -# define DAC_C_OFF (3 << 0) +# define DAC_C_MASK (3 << 0) /** * CSC coefficients are stored in a floating point format with 9 bits of diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index d2c32983242d..c7d9ef01dbf6 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1392,6 +1392,9 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) tv_ctl &= ~TV_TEST_MODE_MASK; tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; tv_dac &= ~TVDAC_SENSE_MASK; + tv_dac &= ~DAC_A_MASK; + tv_dac &= ~DAC_B_MASK; + tv_dac &= ~DAC_C_MASK; tv_dac |= (TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL | TVDAC_B_SENSE_CTL | -- cgit v1.2.3 From 2245fda810f870dce9b030e6aa604320abba53a5 Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Sat, 30 May 2009 20:42:29 -0700 Subject: drm/i915: Don't trim cursor addresses to 11 bits We can safely assume that cursor addresses will not extend beyond the addressable screen dimensions; setting the additional bits is harmless in any case. Signed-off-by: Keith Packard Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/intel_display.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 05bd97e3e3e0..c5c45827ca01 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2012,16 +2012,16 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) uint32_t adder; if (x < 0) { - temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT); + temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; x = -x; } if (y < 0) { - temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT); + temp |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; y = -y; } - temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT); - temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT); + temp |= x << CURSOR_X_SHIFT; + temp |= y << CURSOR_Y_SHIFT; adder = intel_crtc->cursor_addr; I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); -- cgit v1.2.3 From 040d87f15a0129242463d3ed7c48381505f596e2 Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Sat, 30 May 2009 20:42:33 -0700 Subject: drm/i915: Add Display Port register defines This adds the register definitions for the display port enable register along with those for the GMCH and Link M/N ratios required to drive display port outputs. Signed-off-by: Keith Packard Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_reg.h | 163 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 163 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 79df9e9c38e4..f6237a0b1133 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -638,8 +638,11 @@ /* Hotplug control (945+ only) */ #define PORT_HOTPLUG_EN 0x61110 #define HDMIB_HOTPLUG_INT_EN (1 << 29) +#define DPB_HOTPLUG_INT_EN (1 << 29) #define HDMIC_HOTPLUG_INT_EN (1 << 28) +#define DPC_HOTPLUG_INT_EN (1 << 28) #define HDMID_HOTPLUG_INT_EN (1 << 27) +#define DPD_HOTPLUG_INT_EN (1 << 27) #define SDVOB_HOTPLUG_INT_EN (1 << 26) #define SDVOC_HOTPLUG_INT_EN (1 << 25) #define TV_HOTPLUG_INT_EN (1 << 18) @@ -672,8 +675,11 @@ #define PORT_HOTPLUG_STAT 0x61114 #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) +#define DPB_HOTPLUG_INT_STATUS (1 << 29) #define HDMIC_HOTPLUG_INT_STATUS (1 << 28) +#define DPC_HOTPLUG_INT_STATUS (1 << 28) #define HDMID_HOTPLUG_INT_STATUS (1 << 27) +#define DPD_HOTPLUG_INT_STATUS (1 << 27) #define CRT_HOTPLUG_INT_STATUS (1 << 11) #define TV_HOTPLUG_INT_STATUS (1 << 10) #define CRT_HOTPLUG_MONITOR_MASK (3 << 8) @@ -1335,6 +1341,163 @@ #define TV_V_CHROMA_0 0x68400 #define TV_V_CHROMA_42 0x684a8 +/* Display Port */ +#define DP_B 0x64100 +#define DP_C 0x64200 +#define DP_D 0x64300 + +#define DP_PORT_EN (1 << 31) +#define DP_PIPEB_SELECT (1 << 30) + +/* Link training mode - select a suitable mode for each stage */ +#define DP_LINK_TRAIN_PAT_1 (0 << 28) +#define DP_LINK_TRAIN_PAT_2 (1 << 28) +#define DP_LINK_TRAIN_PAT_IDLE (2 << 28) +#define DP_LINK_TRAIN_OFF (3 << 28) +#define DP_LINK_TRAIN_MASK (3 << 28) +#define DP_LINK_TRAIN_SHIFT 28 + +/* Signal voltages. These are mostly controlled by the other end */ +#define DP_VOLTAGE_0_4 (0 << 25) +#define DP_VOLTAGE_0_6 (1 << 25) +#define DP_VOLTAGE_0_8 (2 << 25) +#define DP_VOLTAGE_1_2 (3 << 25) +#define DP_VOLTAGE_MASK (7 << 25) +#define DP_VOLTAGE_SHIFT 25 + +/* Signal pre-emphasis levels, like voltages, the other end tells us what + * they want + */ +#define DP_PRE_EMPHASIS_0 (0 << 22) +#define DP_PRE_EMPHASIS_3_5 (1 << 22) +#define DP_PRE_EMPHASIS_6 (2 << 22) +#define DP_PRE_EMPHASIS_9_5 (3 << 22) +#define DP_PRE_EMPHASIS_MASK (7 << 22) +#define DP_PRE_EMPHASIS_SHIFT 22 + +/* How many wires to use. I guess 3 was too hard */ +#define DP_PORT_WIDTH_1 (0 << 19) +#define DP_PORT_WIDTH_2 (1 << 19) +#define DP_PORT_WIDTH_4 (3 << 19) +#define DP_PORT_WIDTH_MASK (7 << 19) + +/* Mystic DPCD version 1.1 special mode */ +#define DP_ENHANCED_FRAMING (1 << 18) + +/** locked once port is enabled */ +#define DP_PORT_REVERSAL (1 << 15) + +/** sends the clock on lane 15 of the PEG for debug */ +#define DP_CLOCK_OUTPUT_ENABLE (1 << 13) + +#define DP_SCRAMBLING_DISABLE (1 << 12) + +/** limit RGB values to avoid confusing TVs */ +#define DP_COLOR_RANGE_16_235 (1 << 8) + +/** Turn on the audio link */ +#define DP_AUDIO_OUTPUT_ENABLE (1 << 6) + +/** vs and hs sync polarity */ +#define DP_SYNC_VS_HIGH (1 << 4) +#define DP_SYNC_HS_HIGH (1 << 3) + +/** A fantasy */ +#define DP_DETECTED (1 << 2) + +/** The aux channel provides a way to talk to the + * signal sink for DDC etc. Max packet size supported + * is 20 bytes in each direction, hence the 5 fixed + * data registers + */ +#define DPB_AUX_CH_CTL 0x64110 +#define DPB_AUX_CH_DATA1 0x64114 +#define DPB_AUX_CH_DATA2 0x64118 +#define DPB_AUX_CH_DATA3 0x6411c +#define DPB_AUX_CH_DATA4 0x64120 +#define DPB_AUX_CH_DATA5 0x64124 + +#define DPC_AUX_CH_CTL 0x64210 +#define DPC_AUX_CH_DATA1 0x64214 +#define DPC_AUX_CH_DATA2 0x64218 +#define DPC_AUX_CH_DATA3 0x6421c +#define DPC_AUX_CH_DATA4 0x64220 +#define DPC_AUX_CH_DATA5 0x64224 + +#define DPD_AUX_CH_CTL 0x64310 +#define DPD_AUX_CH_DATA1 0x64314 +#define DPD_AUX_CH_DATA2 0x64318 +#define DPD_AUX_CH_DATA3 0x6431c +#define DPD_AUX_CH_DATA4 0x64320 +#define DPD_AUX_CH_DATA5 0x64324 + +#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31) +#define DP_AUX_CH_CTL_DONE (1 << 30) +#define DP_AUX_CH_CTL_INTERRUPT (1 << 29) +#define DP_AUX_CH_CTL_TIME_OUT_ERROR (1 << 28) +#define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26) +#define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26) +#define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26) +#define DP_AUX_CH_CTL_TIME_OUT_1600us (3 << 26) +#define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26) +#define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25) +#define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20) +#define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20 +#define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16) +#define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16 +#define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15) +#define DP_AUX_CH_CTL_MANCHESTER_TEST (1 << 14) +#define DP_AUX_CH_CTL_SYNC_TEST (1 << 13) +#define DP_AUX_CH_CTL_DEGLITCH_TEST (1 << 12) +#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11) +#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff) +#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0 + +/* + * Computing GMCH M and N values for the Display Port link + * + * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes + * + * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz) + * + * The GMCH value is used internally + * + * bytes_per_pixel is the number of bytes coming out of the plane, + * which is after the LUTs, so we want the bytes for our color format. + * For our current usage, this is always 3, one byte for R, G and B. + */ +#define PIPEA_GMCH_DATA_M 0x70050 +#define PIPEB_GMCH_DATA_M 0x71050 + +/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */ +#define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25) +#define PIPE_GMCH_DATA_M_TU_SIZE_SHIFT 25 + +#define PIPE_GMCH_DATA_M_MASK (0xffffff) + +#define PIPEA_GMCH_DATA_N 0x70054 +#define PIPEB_GMCH_DATA_N 0x71054 +#define PIPE_GMCH_DATA_N_MASK (0xffffff) + +/* + * Computing Link M and N values for the Display Port link + * + * Link M / N = pixel_clock / ls_clk + * + * (the DP spec calls pixel_clock the 'strm_clk') + * + * The Link value is transmitted in the Main Stream + * Attributes and VB-ID. + */ + +#define PIPEA_DP_LINK_M 0x70060 +#define PIPEB_DP_LINK_M 0x71060 +#define PIPEA_DP_LINK_M_MASK (0xffffff) + +#define PIPEA_DP_LINK_N 0x70064 +#define PIPEB_DP_LINK_N 0x71064 +#define PIPEA_DP_LINK_N_MASK (0xffffff) + /* Display & cursor control */ /* Pipe A */ -- cgit v1.2.3 From 59a036cfbd29aadf40d2b754cfebee2a96268752 Mon Sep 17 00:00:00 2001 From: yakui_zhao Date: Sun, 31 May 2009 17:16:22 +0800 Subject: drm/i915: Add the structure of child_device_config in video BIOS tables. Signed-off-by: Zhao Yakui Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/intel_bios.h | 101 +++++++++++++++++++++++++++++++++++--- 1 file changed, 93 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 8ca2cde15804..fe72e1c225d8 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -135,6 +135,86 @@ struct bdb_general_features { u8 rsvd11:6; /* finish byte */ } __attribute__((packed)); +/* pre-915 */ +#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */ +#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */ +#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */ +#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */ + +/* Pre 915 */ +#define DEVICE_TYPE_NONE 0x00 +#define DEVICE_TYPE_CRT 0x01 +#define DEVICE_TYPE_TV 0x09 +#define DEVICE_TYPE_EFP 0x12 +#define DEVICE_TYPE_LFP 0x22 +/* On 915+ */ +#define DEVICE_TYPE_CRT_DPMS 0x6001 +#define DEVICE_TYPE_CRT_DPMS_HOTPLUG 0x4001 +#define DEVICE_TYPE_TV_COMPOSITE 0x0209 +#define DEVICE_TYPE_TV_MACROVISION 0x0289 +#define DEVICE_TYPE_TV_RF_COMPOSITE 0x020c +#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE 0x0609 +#define DEVICE_TYPE_TV_SCART 0x0209 +#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009 +#define DEVICE_TYPE_EFP_HOTPLUG_PWR 0x6012 +#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR 0x6052 +#define DEVICE_TYPE_EFP_DVI_I 0x6053 +#define DEVICE_TYPE_EFP_DVI_D_DUAL 0x6152 +#define DEVICE_TYPE_EFP_DVI_D_HDCP 0x60d2 +#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR 0x6062 +#define DEVICE_TYPE_OPENLDI_DUALPIX 0x6162 +#define DEVICE_TYPE_LFP_PANELLINK 0x5012 +#define DEVICE_TYPE_LFP_CMOS_PWR 0x5042 +#define DEVICE_TYPE_LFP_LVDS_PWR 0x5062 +#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162 +#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2 + +#define DEVICE_CFG_NONE 0x00 +#define DEVICE_CFG_12BIT_DVOB 0x01 +#define DEVICE_CFG_12BIT_DVOC 0x02 +#define DEVICE_CFG_24BIT_DVOBC 0x09 +#define DEVICE_CFG_24BIT_DVOCB 0x0a +#define DEVICE_CFG_DUAL_DVOB 0x11 +#define DEVICE_CFG_DUAL_DVOC 0x12 +#define DEVICE_CFG_DUAL_DVOBC 0x13 +#define DEVICE_CFG_DUAL_LINK_DVOBC 0x19 +#define DEVICE_CFG_DUAL_LINK_DVOCB 0x1a + +#define DEVICE_WIRE_NONE 0x00 +#define DEVICE_WIRE_DVOB 0x01 +#define DEVICE_WIRE_DVOC 0x02 +#define DEVICE_WIRE_DVOBC 0x03 +#define DEVICE_WIRE_DVOBB 0x05 +#define DEVICE_WIRE_DVOCC 0x06 +#define DEVICE_WIRE_DVOB_MASTER 0x0d +#define DEVICE_WIRE_DVOC_MASTER 0x0e + +#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */ +#define DEVICE_PORT_DVOB 0x01 +#define DEVICE_PORT_DVOC 0x02 + +struct child_device_config { + u16 handle; + u16 device_type; + u8 device_id[10]; /* See DEVICE_TYPE_* above */ + u16 addin_offset; + u8 dvo_port; /* See Device_PORT_* above */ + u8 i2c_pin; + u8 slave_addr; + u8 ddc_pin; + u16 edid_ptr; + u8 dvo_cfg; /* See DEVICE_CFG_* above */ + u8 dvo2_port; + u8 i2c2_pin; + u8 slave2_addr; + u8 ddc2_pin; + u8 capabilities; + u8 dvo_wiring;/* See DEVICE_WIRE_* above */ + u8 dvo2_wiring; + u16 extended_type; + u8 dvo_function; +} __attribute__((packed)); + struct bdb_general_definitions { /* DDC GPIO */ u8 crt_ddc_gmbus_pin; @@ -149,14 +229,19 @@ struct bdb_general_definitions { u8 boot_display[2]; u8 child_dev_size; - /* device info */ - u8 tv_or_lvds_info[33]; - u8 dev1[33]; - u8 dev2[33]; - u8 dev3[33]; - u8 dev4[33]; - /* may be another device block here on some platforms */ -}; + /* + * Device info: + * If TV is present, it'll be at devices[0]. + * LVDS will be next, either devices[0] or [1], if present. + * On some platforms the number of device is 6. But could be as few as + * 4 if both TV and LVDS are missing. + * And the device num is related with the size of general definition + * block. It is obtained by using the following formula: + * number = (block_size - sizeof(bdb_general_definitions))/ + * sizeof(child_device_config); + */ + struct child_device_config devices[0]; +} __attribute__((packed)); struct bdb_lvds_options { u8 panel_type; -- cgit v1.2.3 From 9b9d172d06b0f2d51cc9431e2c6c3055f0cf10ef Mon Sep 17 00:00:00 2001 From: yakui_zhao Date: Sun, 31 May 2009 17:17:17 +0800 Subject: drm/i915: parse VBT general definition block to get the SDVO device info The general definition block contains the child device tables, which include the SDVO device info. For example: device slave address, device dvo port, device type. We will get the info of SDVO device by parsing the general definition blocks. Only when a valid slave address is found, it is regarded as the SDVO device. And the info of DVO port and slave address is recorded. http://bugs.freedesktop.org/show_bug.cgi?id=20429 Signed-off-by: Zhao Yakui Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_drv.h | 8 ++++ drivers/gpu/drm/i915/intel_bios.c | 86 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 93 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ded9e786883e..db81f5513daa 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -126,6 +126,13 @@ struct drm_i915_fence_reg { struct drm_gem_object *obj; }; +struct sdvo_device_mapping { + u8 dvo_port; + u8 slave_addr; + u8 dvo_wiring; + u8 initialized; +}; + typedef struct drm_i915_private { struct drm_device *dev; @@ -389,6 +396,7 @@ typedef struct drm_i915_private { /* storage for physical objects */ struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; } mm; + struct sdvo_device_mapping sdvo_mappings[2]; } drm_i915_private_t; /** driver private structure attached to each drm_gem_object */ diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 9d78cff33b24..754dd22fdd77 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -30,6 +30,8 @@ #include "i915_drv.h" #include "intel_bios.h" +#define SLAVE_ADDR1 0x70 +#define SLAVE_ADDR2 0x72 static void * find_section(struct bdb_header *bdb, int section_id) @@ -193,6 +195,88 @@ parse_general_features(struct drm_i915_private *dev_priv, } } +static void +parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, + struct bdb_header *bdb) +{ + struct sdvo_device_mapping *p_mapping; + struct bdb_general_definitions *p_defs; + struct child_device_config *p_child; + int i, child_device_num, count; + u16 block_size, *block_ptr; + + p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); + if (!p_defs) { + DRM_DEBUG("No general definition block is found\n"); + return; + } + /* judge whether the size of child device meets the requirements. + * If the child device size obtained from general definition block + * is different with sizeof(struct child_device_config), skip the + * parsing of sdvo device info + */ + if (p_defs->child_dev_size != sizeof(*p_child)) { + /* different child dev size . Ignore it */ + DRM_DEBUG("different child size is found. Invalid.\n"); + return; + } + /* get the block size of general definitions */ + block_ptr = (u16 *)((char *)p_defs - 2); + block_size = *block_ptr; + /* get the number of child device */ + child_device_num = (block_size - sizeof(*p_defs)) / + sizeof(*p_child); + count = 0; + for (i = 0; i < child_device_num; i++) { + p_child = &(p_defs->devices[i]); + if (!p_child->device_type) { + /* skip the device block if device type is invalid */ + continue; + } + if (p_child->slave_addr != SLAVE_ADDR1 && + p_child->slave_addr != SLAVE_ADDR2) { + /* + * If the slave address is neither 0x70 nor 0x72, + * it is not a SDVO device. Skip it. + */ + continue; + } + if (p_child->dvo_port != DEVICE_PORT_DVOB && + p_child->dvo_port != DEVICE_PORT_DVOC) { + /* skip the incorrect SDVO port */ + DRM_DEBUG("Incorrect SDVO port. Skip it \n"); + continue; + } + DRM_DEBUG("the SDVO device with slave addr %2x is found on " + "%s port\n", + p_child->slave_addr, + (p_child->dvo_port == DEVICE_PORT_DVOB) ? + "SDVOB" : "SDVOC"); + p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]); + if (!p_mapping->initialized) { + p_mapping->dvo_port = p_child->dvo_port; + p_mapping->slave_addr = p_child->slave_addr; + p_mapping->dvo_wiring = p_child->dvo_wiring; + p_mapping->initialized = 1; + } else { + DRM_DEBUG("Maybe one SDVO port is shared by " + "two SDVO device.\n"); + } + if (p_child->slave2_addr) { + /* Maybe this is a SDVO device with multiple inputs */ + /* And the mapping info is not added */ + DRM_DEBUG("there exists the slave2_addr. Maybe this " + "is a SDVO device with multiple inputs.\n"); + } + count++; + } + + if (!count) { + /* No SDVO device info is found */ + DRM_DEBUG("No SDVO device info is found in VBT\n"); + } + return; +} /** * intel_init_bios - initialize VBIOS settings & find VBT * @dev: DRM device @@ -242,7 +326,7 @@ intel_init_bios(struct drm_device *dev) parse_general_features(dev_priv, bdb); parse_lfp_panel_data(dev_priv, bdb); parse_sdvo_panel_data(dev_priv, bdb); - + parse_sdvo_device_mapping(dev_priv, bdb); pci_unmap_rom(pdev, bios); return 0; -- cgit v1.2.3 From 714605e4a05787c51a5ac36c926d2169cfdfbfba Mon Sep 17 00:00:00 2001 From: yakui_zhao Date: Sun, 31 May 2009 17:18:07 +0800 Subject: drm/i915: Initialize the SDVO device based on the sdvo info parsed from VBT http://bugs.freedesktop.org/show_bug.cgi?id=20429 Signed-off-by: Zhao Yakui [anholt: Massive cleanup of the slave addr function] Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/intel_sdvo.c | 43 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index f3ef6bfd8ffc..d8fb88d335cd 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1741,6 +1741,43 @@ static struct i2c_algorithm intel_sdvo_i2c_bit_algo = { .master_xfer = intel_sdvo_master_xfer, }; +static u8 +intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct sdvo_device_mapping *my_mapping, *other_mapping; + + if (output_device == SDVOB) { + my_mapping = &dev_priv->sdvo_mappings[0]; + other_mapping = &dev_priv->sdvo_mappings[1]; + } else { + my_mapping = &dev_priv->sdvo_mappings[1]; + other_mapping = &dev_priv->sdvo_mappings[0]; + } + + /* If the BIOS described our SDVO device, take advantage of it. */ + if (my_mapping->slave_addr) + return my_mapping->slave_addr; + + /* If the BIOS only described a different SDVO device, use the + * address that it isn't using. + */ + if (other_mapping->slave_addr) { + if (other_mapping->slave_addr == 0x70) + return 0x72; + else + return 0x70; + } + + /* No SDVO device info is found for another DVO port, + * so use mapping assumption we had before BIOS parsing. + */ + if (output_device == SDVOB) + return 0x70; + else + return 0x72; +} + bool intel_sdvo_init(struct drm_device *dev, int output_device) { struct drm_connector *connector; @@ -1752,6 +1789,7 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) u8 ch[0x40]; int i; int encoder_type, output_id; + u8 slave_addr; intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); if (!intel_output) { @@ -1770,16 +1808,15 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) if (!i2cbus) goto err_inteloutput; + slave_addr = intel_sdvo_get_slave_addr(dev, output_device); sdvo_priv->i2c_bus = i2cbus; if (output_device == SDVOB) { output_id = 1; - sdvo_priv->i2c_bus->slave_addr = 0x38; } else { output_id = 2; - sdvo_priv->i2c_bus->slave_addr = 0x39; } - + sdvo_priv->i2c_bus->slave_addr = slave_addr >> 1; sdvo_priv->output_device = output_device; intel_output->i2c_bus = i2cbus; intel_output->dev_priv = sdvo_priv; -- cgit v1.2.3 From 70aa96ca2d8d938fc036ef8fd189b0151f4fc3ba Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Wed, 27 May 2009 17:20:39 -0400 Subject: drm/i915: add ignore lvds quirk info for AOpen Mini PC Fix a FIXME in the intel LVDS bring-up code, adding the appropriate blacklist entry for the AOpen Mini PC, courtesy of a dmidecode dump from Florian Demmer. Signed-off-by: Jarod Wilson CC: Florian Demmer Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/intel_lvds.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index eea3a548b82a..e4ca6a3cdbbc 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -485,8 +485,14 @@ static const struct dmi_system_id __initdata intel_no_lvds[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Studio Hybrid 140g"), }, }, - - /* FIXME: add a check for the Aopen Mini PC */ + { + .callback = intel_no_lvds_dmi_callback, + .ident = "AOpen Mini PC", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "AOpen"), + DMI_MATCH(DMI_PRODUCT_NAME, "i965GMx-IF"), + }, + }, { } /* terminating entry */ }; -- cgit v1.2.3 From 42c2798b35b95c471877133e19ccc3cab00e9b65 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Tue, 5 May 2009 13:13:16 -0700 Subject: drm/i915: apply G45 vblank count code to all G4x chips and fix max_frame_count All G4x and newer chips use the new style frame count register, with a full 32 bit frame count. Update the code to reflect this. Signed-off-by: Jesse Barnes Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_dma.c | 5 ++++- drivers/gpu/drm/i915/i915_irq.c | 2 -- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 5d36059d6f4e..68e882cd9651 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1161,8 +1161,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) #endif dev->driver->get_vblank_counter = i915_get_vblank_counter; - if (IS_GM45(dev)) + dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ + if (IS_G4X(dev)) { + dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ dev->driver->get_vblank_counter = gm45_get_vblank_counter; + } i915_gem_load(dev); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4b0bcbd58b8f..701d6809deb7 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -574,8 +574,6 @@ int i915_driver_irq_postinstall(struct drm_device *dev) dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; - dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ - /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; -- cgit v1.2.3 From b66d18ddb16603d1e1ec39cb2ff3abf3fd212180 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 15 May 2009 14:11:48 -0700 Subject: drm/i915: avoid non-atomic sysrq execution The sysrq functions are executed in hardirq context, so we shouldn't be calling sleeping functions from them, like mutex_locks or memory allocations. Fix up the i915 sysrq handler to avoid this. Signed-off-by: Jesse Barnes Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/intel_fb.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index e4652dcdd9bb..7a66b91ccf42 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -857,9 +857,15 @@ void intelfb_restore(void) drm_crtc_helper_set_config(&kernelfb_mode); } +static void intelfb_restore_work_fn(struct work_struct *ignored) +{ + intelfb_restore(); +} +static DECLARE_WORK(intelfb_restore_work, intelfb_restore_work_fn); + static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3) { - intelfb_restore(); + schedule_work(&intelfb_restore_work); } static struct sysrq_key_op sysrq_intelfb_restore_op = { -- cgit v1.2.3 From 1b8e69662e1a086878bf930a6042daf7f8a076cc Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Fri, 5 Jun 2009 14:37:23 +0000 Subject: pnp: add PNP resource range checking function Add a PNP resource range check function, indicating whether a resource has been assigned to any device. Signed-off-by: Bjorn Helgaas [apw@canonical.com: fixed up exports et al] Signed-off-by: Andy Whitcroft Signed-off-by: Eric Anholt --- drivers/pnp/resource.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'drivers') diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c index f604061d2bb0..ba9765427886 100644 --- a/drivers/pnp/resource.c +++ b/drivers/pnp/resource.c @@ -638,6 +638,24 @@ int pnp_possible_config(struct pnp_dev *dev, int type, resource_size_t start, } EXPORT_SYMBOL(pnp_possible_config); +int pnp_range_reserved(resource_size_t start, resource_size_t end) +{ + struct pnp_dev *dev; + struct pnp_resource *pnp_res; + resource_size_t *dev_start, *dev_end; + + pnp_for_each_dev(dev) { + list_for_each_entry(pnp_res, &dev->resources, list) { + dev_start = &pnp_res->res.start; + dev_end = &pnp_res->res.end; + if (ranged_conflict(&start, &end, dev_start, dev_end)) + return 1; + } + } + return 0; +} +EXPORT_SYMBOL(pnp_range_reserved); + /* format is: pnp_reserve_irq=irq1[,irq2] .... */ static int __init pnp_setup_reserve_irq(char *str) { -- cgit v1.2.3 From 2ac6bf4ddc87c3b6b609f8fa82f6ebbffeac12f4 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Fri, 5 Jun 2009 10:36:24 -0700 Subject: IB/mlx4: Add strong ordering to local inval and fast reg work requests The ConnectX Programmer's Reference Manual states that the "SO" bit must be set when posting Fast Register and Local Invalidate send work requests. When this bit is set, the work request will be executed only after all previous work requests on the send queue have been executed. (If the bit is not set, Fast Register and Local Invalidate WQEs may begin execution too early, which violates the defined semantics for these operations) This fixes the issue with NFS/RDMA reported in Signed-off-by: Jack Morgenstein Cc: Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mlx4/qp.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 20724aee76f4..c4a02648c8af 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1585,12 +1585,16 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, break; case IB_WR_LOCAL_INV: + ctrl->srcrb_flags |= + cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); set_local_inv_seg(wqe, wr->ex.invalidate_rkey); wqe += sizeof (struct mlx4_wqe_local_inval_seg); size += sizeof (struct mlx4_wqe_local_inval_seg) / 16; break; case IB_WR_FAST_REG_MR: + ctrl->srcrb_flags |= + cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); set_fmr_seg(wqe, wr); wqe += sizeof (struct mlx4_wqe_fmr_seg); size += sizeof (struct mlx4_wqe_fmr_seg) / 16; -- cgit v1.2.3 From e957b60d1583022a0f7c03267d37fcae2ddb78b1 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Sun, 7 Jun 2009 13:52:52 +0200 Subject: ide-gd: implement block device ->set_capacity method (v2) * Use ->probed_capacity to store native device capacity for ATA disks. * Add ->set_capacity method to struct ide_disk_ops. * Implement disk device ->set_capacity method for ATA disks. * Implement block device ->set_capacity method. v2: * Check if LBA and HPA are supported in ide_disk_set_capacity(). * According to the spec the SET MAX ADDRESS command shall be immediately preceded by a READ NATIVE MAX ADDRESS command. * Add ide_disk_hpa_{get_native,set}_capacity() helpers. Together with the previous patch adding ->set_capacity block device method this allows automatic disabling of Host Protected Area (HPA) if any partitions overlapping HPA are detected. Cc: Robert Hancock Cc: Frans Pop Cc: "Andries E. Brouwer" Cc: Al Viro Emphatically-Acked-by: Alan Cox Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/ide-disk.c | 67 +++++++++++++++++++++++++++++++++++++++++--------- drivers/ide/ide-gd.c | 14 +++++++++++ 2 files changed, 70 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index a9fbe2c31210..61a6d3546221 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -302,14 +302,12 @@ static const struct drive_list_entry hpa_list[] = { { NULL, NULL } }; -static void idedisk_check_hpa(ide_drive_t *drive) +static u64 ide_disk_hpa_get_native_capacity(ide_drive_t *drive, int lba48) { - unsigned long long capacity, set_max; - int lba48 = ata_id_lba48_enabled(drive->id); + u64 capacity, set_max; capacity = drive->capacity64; - - set_max = idedisk_read_native_max_address(drive, lba48); + set_max = idedisk_read_native_max_address(drive, lba48); if (ide_in_drive_list(drive->id, hpa_list)) { /* @@ -320,9 +318,31 @@ static void idedisk_check_hpa(ide_drive_t *drive) set_max--; } + return set_max; +} + +static u64 ide_disk_hpa_set_capacity(ide_drive_t *drive, u64 set_max, int lba48) +{ + set_max = idedisk_set_max_address(drive, set_max, lba48); + if (set_max) + drive->capacity64 = set_max; + + return set_max; +} + +static void idedisk_check_hpa(ide_drive_t *drive) +{ + u64 capacity, set_max; + int lba48 = ata_id_lba48_enabled(drive->id); + + capacity = drive->capacity64; + set_max = ide_disk_hpa_get_native_capacity(drive, lba48); + if (set_max <= capacity) return; + drive->probed_capacity = set_max; + printk(KERN_INFO "%s: Host Protected Area detected.\n" "\tcurrent capacity is %llu sectors (%llu MB)\n" "\tnative capacity is %llu sectors (%llu MB)\n", @@ -330,13 +350,10 @@ static void idedisk_check_hpa(ide_drive_t *drive) capacity, sectors_to_MB(capacity), set_max, sectors_to_MB(set_max)); - set_max = idedisk_set_max_address(drive, set_max, lba48); - - if (set_max) { - drive->capacity64 = set_max; + set_max = ide_disk_hpa_set_capacity(drive, set_max, lba48); + if (set_max) printk(KERN_INFO "%s: Host Protected Area disabled.\n", drive->name); - } } static int ide_disk_get_capacity(ide_drive_t *drive) @@ -358,6 +375,8 @@ static int ide_disk_get_capacity(ide_drive_t *drive) drive->capacity64 = drive->cyl * drive->head * drive->sect; } + drive->probed_capacity = drive->capacity64; + if (lba) { drive->dev_flags |= IDE_DFLAG_LBA; @@ -376,7 +395,7 @@ static int ide_disk_get_capacity(ide_drive_t *drive) "%llu sectors (%llu MB)\n", drive->name, (unsigned long long)drive->capacity64, sectors_to_MB(drive->capacity64)); - drive->capacity64 = 1ULL << 28; + drive->probed_capacity = drive->capacity64 = 1ULL << 28; } if ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && @@ -392,6 +411,31 @@ static int ide_disk_get_capacity(ide_drive_t *drive) return 0; } +static u64 ide_disk_set_capacity(ide_drive_t *drive, u64 capacity) +{ + u64 set = min(capacity, drive->probed_capacity); + u16 *id = drive->id; + int lba48 = ata_id_lba48_enabled(id); + + if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 || + ata_id_hpa_enabled(id) == 0) + goto out; + + /* + * according to the spec the SET MAX ADDRESS command shall be + * immediately preceded by a READ NATIVE MAX ADDRESS command + */ + capacity = ide_disk_hpa_get_native_capacity(drive, lba48); + if (capacity == 0) + goto out; + + set = ide_disk_hpa_set_capacity(drive, set, lba48); + if (set) + return set; +out: + return drive->capacity64; +} + static void idedisk_prepare_flush(struct request_queue *q, struct request *rq) { ide_drive_t *drive = q->queuedata; @@ -741,6 +785,7 @@ static int ide_disk_set_doorlock(ide_drive_t *drive, struct gendisk *disk, const struct ide_disk_ops ide_ata_disk_ops = { .check = ide_disk_check, + .set_capacity = ide_disk_set_capacity, .get_capacity = ide_disk_get_capacity, .setup = ide_disk_setup, .flush = ide_disk_flush, diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c index 4b6b71e2cdf5..214119026b3f 100644 --- a/drivers/ide/ide-gd.c +++ b/drivers/ide/ide-gd.c @@ -287,6 +287,19 @@ static int ide_gd_media_changed(struct gendisk *disk) return ret; } +static unsigned long long ide_gd_set_capacity(struct gendisk *disk, + unsigned long long capacity) +{ + struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj); + ide_drive_t *drive = idkp->drive; + const struct ide_disk_ops *disk_ops = drive->disk_ops; + + if (disk_ops->set_capacity) + return disk_ops->set_capacity(drive, capacity); + + return drive->capacity64; +} + static int ide_gd_revalidate_disk(struct gendisk *disk) { struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj); @@ -315,6 +328,7 @@ static struct block_device_operations ide_gd_ops = { .locked_ioctl = ide_gd_ioctl, .getgeo = ide_gd_getgeo, .media_changed = ide_gd_media_changed, + .set_capacity = ide_gd_set_capacity, .revalidate_disk = ide_gd_revalidate_disk }; -- cgit v1.2.3 From 075affcbe01d4d7cefcd0e30a98df1253bcf8d92 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Sun, 7 Jun 2009 13:52:52 +0200 Subject: ide: preserve Host Protected Area by default (v2) From the perspective of most users of recent systems, disabling Host Protected Area (HPA) can break vendor RAID formats, GPT partitions and risks corrupting firmware or overwriting vendor system recovery tools. Unfortunately the original (kernels < 2.6.30) behavior (unconditionally disabling HPA and using full disk capacity) was introduced at the time when the main use of HPA was to make the drive look small enough for the BIOS to allow the system to boot with large capacity drives. Thus to allow the maximum compatibility with the existing setups (using HPA and partitioned with HPA disabled) we automically disable HPA if any partitions overlapping HPA are detected. Additionally HPA can also be disabled using the "nohpa" module parameter (i.e. "ide_core.nohpa=0.0" to disable HPA on /dev/hda). v2: Fix ->resume HPA support. While at it: - remove stale "idebus=" entry from Documentation/kernel-parameters.txt Cc: Robert Hancock Cc: Frans Pop Cc: "Andries E. Brouwer" Cc: Al Viro Acked-by: Sergei Shtylyov [patch description was based on input from Alan Cox and Frans Pop] Emphatically-Acked-by: Alan Cox Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/ide-disk.c | 8 +++++++- drivers/ide/ide.c | 10 ++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 61a6d3546221..3d92c9d54d47 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -350,6 +350,9 @@ static void idedisk_check_hpa(ide_drive_t *drive) capacity, sectors_to_MB(capacity), set_max, sectors_to_MB(set_max)); + if ((drive->dev_flags & IDE_DFLAG_NOHPA) == 0) + return; + set_max = ide_disk_hpa_set_capacity(drive, set_max, lba48); if (set_max) printk(KERN_INFO "%s: Host Protected Area disabled.\n", @@ -430,8 +433,11 @@ static u64 ide_disk_set_capacity(ide_drive_t *drive, u64 capacity) goto out; set = ide_disk_hpa_set_capacity(drive, set, lba48); - if (set) + if (set) { + /* needed for ->resume to disable HPA */ + drive->dev_flags |= IDE_DFLAG_NOHPA; return set; + } out: return drive->capacity64; } diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index 92c9b90931e7..16d056939f9f 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c @@ -211,6 +211,11 @@ static unsigned int ide_noflush; module_param_call(noflush, ide_set_dev_param_mask, NULL, &ide_noflush, 0); MODULE_PARM_DESC(noflush, "disable flush requests for a device"); +static unsigned int ide_nohpa; + +module_param_call(nohpa, ide_set_dev_param_mask, NULL, &ide_nohpa, 0); +MODULE_PARM_DESC(nohpa, "disable Host Protected Area for a device"); + static unsigned int ide_noprobe; module_param_call(noprobe, ide_set_dev_param_mask, NULL, &ide_noprobe, 0); @@ -281,6 +286,11 @@ static void ide_dev_apply_params(ide_drive_t *drive, u8 unit) drive->name); drive->dev_flags |= IDE_DFLAG_NOFLUSH; } + if (ide_nohpa & (1 << i)) { + printk(KERN_INFO "ide: disabling Host Protected Area for %s\n", + drive->name); + drive->dev_flags |= IDE_DFLAG_NOHPA; + } if (ide_noprobe & (1 << i)) { printk(KERN_INFO "ide: skipping probe for %s\n", drive->name); drive->dev_flags |= IDE_DFLAG_NOPROBE; -- cgit v1.2.3 From 72b9304f04d0724a25251e9e9041aa95f89c15dd Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 7 Jun 2009 15:37:03 +0200 Subject: pdc202xx_old: use ide_dma_test_irq() The driver's dma_test_irq() method, although tests some chip specific interrupt bits, finally always relies on the SFF-8038i standard interrupt bit. I see no point in testing the bits that are not trusted anyway -- the driver should be fully able to use the standard method implemetation, ide_dma_test_irq(). With this change 'pdc202xx_dma_ops' finally becomes identical to 'sff_dma_ops', and we can get rid of it... Signed-off-by: Sergei Shtylyov Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/pdc202xx_old.c | 39 ++------------------------------------- 1 file changed, 2 insertions(+), 37 deletions(-) (limited to 'drivers') diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c index b3bc96f930a6..668f452d74e9 100644 --- a/drivers/ide/pdc202xx_old.c +++ b/drivers/ide/pdc202xx_old.c @@ -203,30 +203,6 @@ static int pdc202xx_dma_end(ide_drive_t *drive) return ide_dma_end(drive); } -static int pdc202xx_dma_test_irq(ide_drive_t *drive) -{ - ide_hwif_t *hwif = drive->hwif; - unsigned long high_16 = hwif->extra_base - 16; - u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); - u8 sc1d = inb(high_16 + 0x001d); - - if (hwif->channel) { - /* bit7: Error, bit6: Interrupting, bit5: FIFO Full, bit4: FIFO Empty */ - if ((sc1d & 0x50) == 0x50) - goto somebody_else; - else if ((sc1d & 0x40) == 0x40) - return (dma_stat & 4) == 4; - } else { - /* bit3: Error, bit2: Interrupting, bit1: FIFO Full, bit0: FIFO Empty */ - if ((sc1d & 0x05) == 0x05) - goto somebody_else; - else if ((sc1d & 0x04) == 0x04) - return (dma_stat & 4) == 4; - } -somebody_else: - return (dma_stat & 4) == 4; /* return 1 if INTR asserted */ -} - static void pdc202xx_reset(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; @@ -313,23 +289,12 @@ static const struct ide_port_ops pdc2026x_port_ops = { .cable_detect = pdc2026x_cable_detect, }; -static const struct ide_dma_ops pdc20246_dma_ops = { - .dma_host_set = ide_dma_host_set, - .dma_setup = ide_dma_setup, - .dma_start = ide_dma_start, - .dma_end = ide_dma_end, - .dma_test_irq = pdc202xx_dma_test_irq, - .dma_lost_irq = ide_dma_lost_irq, - .dma_timer_expiry = ide_dma_sff_timer_expiry, - .dma_sff_read_status = ide_dma_sff_read_status, -}; - static const struct ide_dma_ops pdc2026x_dma_ops = { .dma_host_set = ide_dma_host_set, .dma_setup = ide_dma_setup, .dma_start = pdc202xx_dma_start, .dma_end = pdc202xx_dma_end, - .dma_test_irq = pdc202xx_dma_test_irq, + .dma_test_irq = ide_dma_test_irq, .dma_lost_irq = pdc202xx_dma_lost_irq, .dma_timer_expiry = ide_dma_sff_timer_expiry, .dma_clear = pdc202xx_reset, @@ -354,7 +319,7 @@ static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = { .name = DRV_NAME, .init_chipset = init_chipset_pdc202xx, .port_ops = &pdc20246_port_ops, - .dma_ops = &pdc20246_dma_ops, + .dma_ops = &sff_dma_ops, .host_flags = IDE_HFLAGS_PDC202XX, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, -- cgit v1.2.3 From 1221e241e3a6f1ff5b0de03d58d871f7c995781b Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 7 Jun 2009 15:37:04 +0200 Subject: pdc202xx_old: don't call pdc202xx_reset() on IRQ timeout The driver's dma_lost_irq() and dma_clear() methods call pdc202xx_reset() which resets both channels at once -- most probably by driving RESET- on them. Not only such reset can severely disturb concurrent operations on another channel, it is also a clear overkill (especially in the first case) and is completely unexpected and thus not properly handled by the IDE core in this context (in the second case the usual SRST reset would most probably ensue anyway though); it also causes quite arbitrary 2-second delay. Hence, use the standard ide_dma_lost_irq() method and don't install the optional dma_clear() method at all -- the driver should do well without this age-old cruft... Signed-off-by: Sergei Shtylyov Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/pdc202xx_old.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c index 668f452d74e9..494b5404f277 100644 --- a/drivers/ide/pdc202xx_old.c +++ b/drivers/ide/pdc202xx_old.c @@ -219,12 +219,6 @@ static void pdc202xx_reset(ide_drive_t *drive) ide_set_max_pio(drive); } -static void pdc202xx_dma_lost_irq(ide_drive_t *drive) -{ - pdc202xx_reset(drive); - ide_dma_lost_irq(drive); -} - static int init_chipset_pdc202xx(struct pci_dev *dev) { unsigned long dmabase = pci_resource_start(dev, 4); @@ -295,9 +289,8 @@ static const struct ide_dma_ops pdc2026x_dma_ops = { .dma_start = pdc202xx_dma_start, .dma_end = pdc202xx_dma_end, .dma_test_irq = ide_dma_test_irq, - .dma_lost_irq = pdc202xx_dma_lost_irq, + .dma_lost_irq = ide_dma_lost_irq, .dma_timer_expiry = ide_dma_sff_timer_expiry, - .dma_clear = pdc202xx_reset, .dma_sff_read_status = ide_dma_sff_read_status, }; -- cgit v1.2.3 From ffddf1717b0d388879c646eaf6261a2b393c06ad Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 7 Jun 2009 15:37:05 +0200 Subject: pdc202xx_old: kill resetproc() method The driver's resetproc() method resets both channels at once -- most probably by driving RESET- on them. Not only such reset can severely disturb concurrent operations on another channel, it also ensues 2-second delay, while there's no apparent reason why SRST reset being performed prior to resetproc() call needs to be followed up by another reset. Signed-off-by: Sergei Shtylyov Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/pdc202xx_old.c | 17 ----------------- 1 file changed, 17 deletions(-) (limited to 'drivers') diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c index 494b5404f277..4980dd7b2e28 100644 --- a/drivers/ide/pdc202xx_old.c +++ b/drivers/ide/pdc202xx_old.c @@ -203,22 +203,6 @@ static int pdc202xx_dma_end(ide_drive_t *drive) return ide_dma_end(drive); } -static void pdc202xx_reset(ide_drive_t *drive) -{ - ide_hwif_t *hwif = drive->hwif; - unsigned long high_16 = hwif->extra_base - 16; - u8 udma_speed_flag = inb(high_16 | 0x001f); - - printk(KERN_WARNING "PDC202xx: software reset...\n"); - - outb(udma_speed_flag | 0x10, high_16 | 0x001f); - mdelay(100); - outb(udma_speed_flag & ~0x10, high_16 | 0x001f); - mdelay(2000); /* 2 seconds ?! */ - - ide_set_max_pio(drive); -} - static int init_chipset_pdc202xx(struct pci_dev *dev) { unsigned long dmabase = pci_resource_start(dev, 4); @@ -279,7 +263,6 @@ static const struct ide_port_ops pdc2026x_port_ops = { .set_pio_mode = pdc202xx_set_pio_mode, .set_dma_mode = pdc202xx_set_mode, .quirkproc = pdc202xx_quirkproc, - .resetproc = pdc202xx_reset, .cable_detect = pdc2026x_cable_detect, }; -- cgit v1.2.3 From 626542ca2277961aaa64855206574f8ca4f360e3 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sun, 7 Jun 2009 15:37:05 +0200 Subject: ide-tape: change IDE_AFLAG_IGNORE_DSC non-atomically There are two sites where the flag is being changed: ide_retry_pc and idetape_do_request. Both codepaths are protected by hwif->busy (ide_lock_port) and therefore we shouldn't need the atomic accesses. Spotted-by: Jiri Slaby Signed-off-by: Borislav Petkov Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/ide-atapi.c | 2 +- drivers/ide/ide-tape.c | 12 +++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index afe5a4323879..fbcb8513a4c8 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c @@ -258,7 +258,7 @@ void ide_retry_pc(ide_drive_t *drive) pc->req_xfer = sense_rq->data_len; if (drive->media == ide_tape) - set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags); + drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC; if (ide_queue_sense_rq(drive, pc)) ide_complete_rq(drive, -EIO, blk_rq_bytes(drive->hwif->rq)); diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 203bbeac182f..f1d3c7b2a2b5 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -656,15 +656,15 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive, if ((drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) == 0 && (rq->cmd[13] & REQ_IDETAPE_PC2) == 0) - set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags); + drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC; if (drive->dev_flags & IDE_DFLAG_POST_RESET) { - set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags); + drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC; drive->dev_flags &= ~IDE_DFLAG_POST_RESET; } - if (!test_and_clear_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags) && - (stat & ATA_DSC) == 0) { + if (!(drive->atapi_flags & IDE_AFLAG_IGNORE_DSC) && + !(stat & ATA_DSC)) { if (postponed_rq == NULL) { tape->dsc_polling_start = jiffies; tape->dsc_poll_freq = tape->best_dsc_rw_freq; @@ -684,7 +684,9 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive, tape->dsc_poll_freq = IDETAPE_DSC_MA_SLOW; idetape_postpone_request(drive); return ide_stopped; - } + } else + drive->atapi_flags &= ~IDE_AFLAG_IGNORE_DSC; + if (rq->cmd[13] & REQ_IDETAPE_READ) { pc = &tape->queued_pc; ide_tape_create_rw_cmd(tape, pc, rq, READ_6); -- cgit v1.2.3 From 49d8078ad1c3dca5b11ce18391bf6bd9af9acdf5 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sun, 7 Jun 2009 15:37:06 +0200 Subject: ide-tape: fix IDE_AFLAG_* atomic accesses These flags used to be bit numbers and now are single bits in the ->atapi_flags vector. Use them properly. Spotted-by: Jiri Slaby Signed-off-by: Borislav Petkov Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/ide-tape.c | 43 ++++++++++++++++++++++++++----------------- 1 file changed, 26 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index f1d3c7b2a2b5..055f52e1ea0e 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -397,7 +397,8 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc) if (readpos[0] & 0x4) { printk(KERN_INFO "ide-tape: Block location is unknown" "to the tape\n"); - clear_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags); + clear_bit(ilog2(IDE_AFLAG_ADDRESS_VALID), + &drive->atapi_flags); uptodate = 0; err = IDE_DRV_ERROR_GENERAL; } else { @@ -406,7 +407,8 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc) tape->partition = readpos[1]; tape->first_frame = be32_to_cpup((__be32 *)&readpos[4]); - set_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags); + set_bit(ilog2(IDE_AFLAG_ADDRESS_VALID), + &drive->atapi_flags); } } @@ -746,7 +748,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout) int load_attempted = 0; /* Wait for the tape to become ready */ - set_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags); + set_bit(ilog2(IDE_AFLAG_MEDIUM_PRESENT), &drive->atapi_flags); timeout += jiffies; while (time_before(jiffies, timeout)) { if (ide_do_test_unit_ready(drive, disk) == 0) @@ -822,7 +824,7 @@ static void __ide_tape_discard_merge_buffer(ide_drive_t *drive) if (tape->chrdev_dir != IDETAPE_DIR_READ) return; - clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags); + clear_bit(ilog2(IDE_AFLAG_FILEMARK), &drive->atapi_flags); tape->valid = 0; if (tape->buf != NULL) { kfree(tape->buf); @@ -1115,7 +1117,8 @@ static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op, if (tape->chrdev_dir == IDETAPE_DIR_READ) { tape->valid = 0; - if (test_and_clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) + if (test_and_clear_bit(ilog2(IDE_AFLAG_FILEMARK), + &drive->atapi_flags)) ++count; ide_tape_discard_merge_buffer(drive, 0); } @@ -1170,7 +1173,7 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf, debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count); if (tape->chrdev_dir != IDETAPE_DIR_READ) { - if (test_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags)) + if (test_bit(ilog2(IDE_AFLAG_DETECT_BS), &drive->atapi_flags)) if (count > tape->blk_size && (count % tape->blk_size) == 0) tape->user_bs_factor = count / tape->blk_size; @@ -1186,7 +1189,8 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf, /* refill if staging buffer is empty */ if (!tape->valid) { /* If we are at a filemark, nothing more to read */ - if (test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) + if (test_bit(ilog2(IDE_AFLAG_FILEMARK), + &drive->atapi_flags)) break; /* read */ if (idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, @@ -1204,7 +1208,7 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf, done += todo; } - if (!done && test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) { + if (!done && test_bit(ilog2(IDE_AFLAG_FILEMARK), &drive->atapi_flags)) { debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name); idetape_space_over_filemarks(drive, MTFSF, 1); @@ -1338,7 +1342,8 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count) ide_tape_discard_merge_buffer(drive, 0); retval = ide_do_start_stop(drive, disk, !IDETAPE_LU_LOAD_MASK); if (!retval) - clear_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags); + clear_bit(ilog2(IDE_AFLAG_MEDIUM_PRESENT), + &drive->atapi_flags); return retval; case MTNOP: ide_tape_discard_merge_buffer(drive, 0); @@ -1360,9 +1365,11 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count) mt_count % tape->blk_size) return -EIO; tape->user_bs_factor = mt_count / tape->blk_size; - clear_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags); + clear_bit(ilog2(IDE_AFLAG_DETECT_BS), + &drive->atapi_flags); } else - set_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags); + set_bit(ilog2(IDE_AFLAG_DETECT_BS), + &drive->atapi_flags); return 0; case MTSEEK: ide_tape_discard_merge_buffer(drive, 0); @@ -1507,20 +1514,20 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp) filp->private_data = tape; - if (test_and_set_bit(IDE_AFLAG_BUSY, &drive->atapi_flags)) { + if (test_and_set_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags)) { retval = -EBUSY; goto out_put_tape; } retval = idetape_wait_ready(drive, 60 * HZ); if (retval) { - clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags); + clear_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags); printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name); goto out_put_tape; } idetape_read_position(drive); - if (!test_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags)) + if (!test_bit(ilog2(IDE_AFLAG_ADDRESS_VALID), &drive->atapi_flags)) (void)idetape_rewind_tape(drive); /* Read block size and write protect status from drive. */ @@ -1536,7 +1543,7 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp) if (tape->write_prot) { if ((filp->f_flags & O_ACCMODE) == O_WRONLY || (filp->f_flags & O_ACCMODE) == O_RDWR) { - clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags); + clear_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags); retval = -EROFS; goto out_put_tape; } @@ -1593,15 +1600,17 @@ static int idetape_chrdev_release(struct inode *inode, struct file *filp) ide_tape_discard_merge_buffer(drive, 1); } - if (minor < 128 && test_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags)) + if (minor < 128 && test_bit(ilog2(IDE_AFLAG_MEDIUM_PRESENT), + &drive->atapi_flags)) (void) idetape_rewind_tape(drive); + if (tape->chrdev_dir == IDETAPE_DIR_NONE) { if (tape->door_locked == DOOR_LOCKED) { if (!ide_set_media_lock(drive, tape->disk, 0)) tape->door_locked = DOOR_UNLOCKED; } } - clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags); + clear_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags); ide_tape_put(tape); unlock_kernel(); return 0; -- cgit v1.2.3 From dff8817b78e6e6a4913f2caf7637d62dcc49a03c Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Sun, 7 Jun 2009 15:37:06 +0200 Subject: icside: remove superfluous ->maskproc method [inspired by pata_icside] Enabling/disabling of card IRQs is handled fine by IRQ and IDE subsystems so there is no need for custom ->maskproc method. Moreover icside_maskproc() would enable IRQ only if it was already enabled [because of 'if (state->enabled && !mask)' check]. Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/icside.c | 64 ++++------------------------------------------------ 1 file changed, 4 insertions(+), 60 deletions(-) (limited to 'drivers') diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c index c5269fa1f733..5af3d0ffaf0a 100644 --- a/drivers/ide/icside.c +++ b/drivers/ide/icside.c @@ -65,8 +65,6 @@ static struct cardinfo icside_cardinfo_v6_2 = { }; struct icside_state { - unsigned int channel; - unsigned int enabled; void __iomem *irq_port; void __iomem *ioc_base; unsigned int sel; @@ -116,18 +114,11 @@ static void icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr) struct icside_state *state = ec->irq_data; void __iomem *base = state->irq_port; - state->enabled = 1; + writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1); + readb(base + ICS_ARCIN_V6_INTROFFSET_2); - switch (state->channel) { - case 0: - writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1); - readb(base + ICS_ARCIN_V6_INTROFFSET_2); - break; - case 1: - writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2); - readb(base + ICS_ARCIN_V6_INTROFFSET_1); - break; - } + writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2); + readb(base + ICS_ARCIN_V6_INTROFFSET_1); } /* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) @@ -137,8 +128,6 @@ static void icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) { struct icside_state *state = ec->irq_data; - state->enabled = 0; - readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); } @@ -160,44 +149,6 @@ static const expansioncard_ops_t icside_ops_arcin_v6 = { .irqpending = icside_irqpending_arcin_v6, }; -/* - * Handle routing of interrupts. This is called before - * we write the command to the drive. - */ -static void icside_maskproc(ide_drive_t *drive, int mask) -{ - ide_hwif_t *hwif = drive->hwif; - struct expansion_card *ec = ECARD_DEV(hwif->dev); - struct icside_state *state = ecard_get_drvdata(ec); - unsigned long flags; - - local_irq_save(flags); - - state->channel = hwif->channel; - - if (state->enabled && !mask) { - switch (hwif->channel) { - case 0: - writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); - readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); - break; - case 1: - writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); - readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); - break; - } - } else { - readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); - readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); - } - - local_irq_restore(flags); -} - -static const struct ide_port_ops icside_v6_no_dma_port_ops = { - .maskproc = icside_maskproc, -}; - #ifdef CONFIG_BLK_DEV_IDEDMA_ICS /* * SG-DMA support. @@ -275,7 +226,6 @@ static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode) static const struct ide_port_ops icside_v6_port_ops = { .set_dma_mode = icside_set_dma_mode, - .maskproc = icside_maskproc, }; static void icside_dma_host_set(ide_drive_t *drive, int on) @@ -319,11 +269,6 @@ static int icside_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd) */ BUG_ON(dma_channel_active(ec->dma)); - /* - * Ensure that we have the right interrupt routed. - */ - icside_maskproc(drive, 0); - /* * Route the DMA signals to the correct interface. */ @@ -452,7 +397,6 @@ err_free: static const struct ide_port_info icside_v6_port_info __initdata = { .init_dma = icside_dma_off_init, - .port_ops = &icside_v6_no_dma_port_ops, .dma_ops = &icside_v6_dma_ops, .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, .mwdma_mask = ATA_MWDMA2, -- cgit v1.2.3 From f9952beeaa851f7f79a4dd895bfed3f3ff6deebc Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Sun, 7 Jun 2009 15:37:06 +0200 Subject: ide: remove superfluous SELECT_MASK() call from ide_driveid_update() We always call SELECT_MASK(drive, 0) after ide_dev_read_id() call so there is no need to do it again in the error path. Moreover with the combination of HPT36x controller and the drive on the quirk_drives[] list this can result in superfluous enable_irq() call which in turn will trigger WARN() in __enable_irq(). Acked-by: Sergei Shtylyov Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/ide-iops.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c index 06fe002116ec..8dff623f9da3 100644 --- a/drivers/ide/ide-iops.c +++ b/drivers/ide/ide-iops.c @@ -311,7 +311,6 @@ int ide_driveid_update(ide_drive_t *drive) return 1; out_err: - SELECT_MASK(drive, 0); if (rc == 2) printk(KERN_ERR "%s: %s: bad status\n", drive->name, __func__); kfree(id); -- cgit v1.2.3 From d6dcdea726855d78048e4bfb950342afc0e83e47 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Sun, 7 Jun 2009 15:37:07 +0200 Subject: ide: remove superfluous SELECT_MASK() call from do_rw_taskfile() With ->write_devctl method call (which unmasks drive IRQ) preceding SELECT_MASK() call there is really no need for the latter. Moreover with the combination of HPT36x controller and the drive on the quirk_drives[] list this can result in superfluous enable_irq() call which in turn will trigger WARN() in __enable_irq(). Acked-by: Sergei Shtylyov Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/ide-taskfile.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index 8cab3c26acda..fbcb4151b0b7 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c @@ -98,7 +98,6 @@ ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd) if ((cmd->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) { ide_tf_dump(drive->name, cmd); tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); - SELECT_MASK(drive, 0); if (cmd->ftf_flags & IDE_FTFLAG_OUT_DATA) { u8 data[2] = { cmd->tf.data, cmd->hob.data }; -- cgit v1.2.3 From d328e7657de1fde30141365466589ab259cf4f64 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Sun, 7 Jun 2009 15:37:07 +0200 Subject: hpt366: sync quirk_drives[] list with pdc202xx_{new,old}.c Acked-by: Sergei Shtylyov Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/hpt366.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c index 0feb66c720e1..47b31dacd526 100644 --- a/drivers/ide/hpt366.c +++ b/drivers/ide/hpt366.c @@ -141,6 +141,10 @@ static const char *quirk_drives[] = { "QUANTUM FIREBALLlct08 08", "QUANTUM FIREBALLP KA6.4", + "QUANTUM FIREBALLP KA9.1", + "QUANTUM FIREBALLP KX13.6", + "QUANTUM FIREBALLP KX20.5", + "QUANTUM FIREBALLP KX27.3", "QUANTUM FIREBALLP LM20.4", "QUANTUM FIREBALLP LM20.5", NULL -- cgit v1.2.3 From 0fcef027f60318cfa64ae4cdf5aa33905607d650 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Sun, 7 Jun 2009 15:37:08 +0200 Subject: hpt366: enable all quirks for devices on quirk_drives[] list Enable also quirks in do_reset1() and ide_config_drive_speed() for devices on quirk_drives[] list. Acked-by: Sergei Shtylyov Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/hpt366.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c index 47b31dacd526..cb04523e31cb 100644 --- a/drivers/ide/hpt366.c +++ b/drivers/ide/hpt366.c @@ -740,7 +740,7 @@ static void hpt3xx_quirkproc(ide_drive_t *drive) while (*list) if (strstr(m, *list++)) { - drive->quirk_list = 1; + drive->quirk_list = 2; return; } -- cgit v1.2.3 From 8bc1e5aa06a2a9a425c4a6795fc564cba1521487 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Sun, 7 Jun 2009 15:37:09 +0200 Subject: ide: respect quirk_drives[] list on all controllers * Add ide_check_nien_quirk_list() helper to the core code and then use it in ide_port_tune_devices(). * Remove no longer needed ->quirkproc methods from hpt366.c and pdc202xx_{new,old}.c. Acked-by: Sergei Shtylyov Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/hpt366.c | 27 --------------------------- drivers/ide/ide-iops.c | 25 +++++++++++++++++++++++++ drivers/ide/ide-probe.c | 2 ++ drivers/ide/pdc202xx_new.c | 26 -------------------------- drivers/ide/pdc202xx_old.c | 27 --------------------------- 5 files changed, 27 insertions(+), 80 deletions(-) (limited to 'drivers') diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c index cb04523e31cb..a2e9f6c65a93 100644 --- a/drivers/ide/hpt366.c +++ b/drivers/ide/hpt366.c @@ -138,18 +138,6 @@ #undef HPT_RESET_STATE_ENGINE #undef HPT_DELAY_INTERRUPT -static const char *quirk_drives[] = { - "QUANTUM FIREBALLlct08 08", - "QUANTUM FIREBALLP KA6.4", - "QUANTUM FIREBALLP KA9.1", - "QUANTUM FIREBALLP KX13.6", - "QUANTUM FIREBALLP KX20.5", - "QUANTUM FIREBALLP KX27.3", - "QUANTUM FIREBALLP LM20.4", - "QUANTUM FIREBALLP LM20.5", - NULL -}; - static const char *bad_ata100_5[] = { "IBM-DTLA-307075", "IBM-DTLA-307060", @@ -733,20 +721,6 @@ static void hpt3xx_set_pio_mode(ide_drive_t *drive, const u8 pio) hpt3xx_set_mode(drive, XFER_PIO_0 + pio); } -static void hpt3xx_quirkproc(ide_drive_t *drive) -{ - char *m = (char *)&drive->id[ATA_ID_PROD]; - const char **list = quirk_drives; - - while (*list) - if (strstr(m, *list++)) { - drive->quirk_list = 2; - return; - } - - drive->quirk_list = 0; -} - static void hpt3xx_maskproc(ide_drive_t *drive, int mask) { ide_hwif_t *hwif = drive->hwif; @@ -1408,7 +1382,6 @@ static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2) static const struct ide_port_ops hpt3xx_port_ops = { .set_pio_mode = hpt3xx_set_pio_mode, .set_dma_mode = hpt3xx_set_mode, - .quirkproc = hpt3xx_quirkproc, .maskproc = hpt3xx_maskproc, .mdma_filter = hpt3xx_mdma_filter, .udma_filter = hpt3xx_udma_filter, diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c index 8dff623f9da3..c55349537c27 100644 --- a/drivers/ide/ide-iops.c +++ b/drivers/ide/ide-iops.c @@ -282,6 +282,31 @@ no_80w: return 0; } +static const char *nien_quirk_list[] = { + "QUANTUM FIREBALLlct08 08", + "QUANTUM FIREBALLP KA6.4", + "QUANTUM FIREBALLP KA9.1", + "QUANTUM FIREBALLP KX13.6", + "QUANTUM FIREBALLP KX20.5", + "QUANTUM FIREBALLP KX27.3", + "QUANTUM FIREBALLP LM20.4", + "QUANTUM FIREBALLP LM20.5", + NULL +}; + +void ide_check_nien_quirk_list(ide_drive_t *drive) +{ + const char **list, *m = (char *)&drive->id[ATA_ID_PROD]; + + for (list = nien_quirk_list; *list != NULL; list++) + if (strstr(m, *list) != NULL) { + drive->quirk_list = 2; + return; + } + + drive->quirk_list = 0; +} + int ide_driveid_update(ide_drive_t *drive) { u16 *id; diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 89574b0bd56d..28f95cb41c29 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -732,6 +732,8 @@ static void ide_port_tune_devices(ide_hwif_t *hwif) int i; ide_port_for_each_present_dev(i, drive, hwif) { + ide_check_nien_quirk_list(drive); + if (port_ops && port_ops->quirkproc) port_ops->quirkproc(drive); } diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c index b68906c3c17e..65ba8239e7b5 100644 --- a/drivers/ide/pdc202xx_new.c +++ b/drivers/ide/pdc202xx_new.c @@ -40,18 +40,6 @@ #define DBG(fmt, args...) #endif -static const char *pdc_quirk_drives[] = { - "QUANTUM FIREBALLlct08 08", - "QUANTUM FIREBALLP KA6.4", - "QUANTUM FIREBALLP KA9.1", - "QUANTUM FIREBALLP LM20.4", - "QUANTUM FIREBALLP KX13.6", - "QUANTUM FIREBALLP KX20.5", - "QUANTUM FIREBALLP KX27.3", - "QUANTUM FIREBALLP LM20.5", - NULL -}; - static u8 max_dma_rate(struct pci_dev *pdev) { u8 mode; @@ -200,19 +188,6 @@ static u8 pdcnew_cable_detect(ide_hwif_t *hwif) return ATA_CBL_PATA80; } -static void pdcnew_quirkproc(ide_drive_t *drive) -{ - const char **list, *m = (char *)&drive->id[ATA_ID_PROD]; - - for (list = pdc_quirk_drives; *list != NULL; list++) - if (strstr(m, *list) != NULL) { - drive->quirk_list = 2; - return; - } - - drive->quirk_list = 0; -} - static void pdcnew_reset(ide_drive_t *drive) { /* @@ -473,7 +448,6 @@ static struct pci_dev * __devinit pdc20270_get_dev2(struct pci_dev *dev) static const struct ide_port_ops pdcnew_port_ops = { .set_pio_mode = pdcnew_set_pio_mode, .set_dma_mode = pdcnew_set_dma_mode, - .quirkproc = pdcnew_quirkproc, .resetproc = pdcnew_reset, .cable_detect = pdcnew_cable_detect, }; diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c index 4980dd7b2e28..fe01db679a39 100644 --- a/drivers/ide/pdc202xx_old.c +++ b/drivers/ide/pdc202xx_old.c @@ -23,18 +23,6 @@ #define PDC202XX_DEBUG_DRIVE_INFO 0 -static const char *pdc_quirk_drives[] = { - "QUANTUM FIREBALLlct08 08", - "QUANTUM FIREBALLP KA6.4", - "QUANTUM FIREBALLP KA9.1", - "QUANTUM FIREBALLP LM20.4", - "QUANTUM FIREBALLP KX13.6", - "QUANTUM FIREBALLP KX20.5", - "QUANTUM FIREBALLP KX27.3", - "QUANTUM FIREBALLP LM20.5", - NULL -}; - static void pdc_old_disable_66MHz_clock(ide_hwif_t *); static void pdc202xx_set_mode(ide_drive_t *drive, const u8 speed) @@ -151,19 +139,6 @@ static void pdc_old_disable_66MHz_clock(ide_hwif_t *hwif) outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg); } -static void pdc202xx_quirkproc(ide_drive_t *drive) -{ - const char **list, *m = (char *)&drive->id[ATA_ID_PROD]; - - for (list = pdc_quirk_drives; *list != NULL; list++) - if (strstr(m, *list) != NULL) { - drive->quirk_list = 2; - return; - } - - drive->quirk_list = 0; -} - static void pdc202xx_dma_start(ide_drive_t *drive) { if (drive->current_speed > XFER_UDMA_2) @@ -256,13 +231,11 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev, static const struct ide_port_ops pdc20246_port_ops = { .set_pio_mode = pdc202xx_set_pio_mode, .set_dma_mode = pdc202xx_set_mode, - .quirkproc = pdc202xx_quirkproc, }; static const struct ide_port_ops pdc2026x_port_ops = { .set_pio_mode = pdc202xx_set_pio_mode, .set_dma_mode = pdc202xx_set_mode, - .quirkproc = pdc202xx_quirkproc, .cable_detect = pdc2026x_cable_detect, }; -- cgit v1.2.3 From 734affdcae20af4fec95e46a64fb29f063a15c19 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Sun, 7 Jun 2009 15:37:10 +0200 Subject: ide: add IDE_DFLAG_NIEN_QUIRK device flag Add IDE_DFLAG_NIEN_QUIRK device flag and use it instead of drive->quirk_list. There should be no functional changes caused by this patch. Acked-by: Sergei Shtylyov Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/hpt366.c | 2 +- drivers/ide/ide-eh.c | 5 +++-- drivers/ide/ide-io.c | 8 ++++++-- drivers/ide/ide-iops.c | 6 ++---- 4 files changed, 12 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c index a2e9f6c65a93..7ce68ef6b904 100644 --- a/drivers/ide/hpt366.c +++ b/drivers/ide/hpt366.c @@ -727,7 +727,7 @@ static void hpt3xx_maskproc(ide_drive_t *drive, int mask) struct pci_dev *dev = to_pci_dev(hwif->dev); struct hpt_info *info = hpt3xx_get_info(hwif->dev); - if (drive->quirk_list == 0) + if ((drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0) return; if (info->chip_type >= HPT370) { diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c index 39d589254d41..2b9141979613 100644 --- a/drivers/ide/ide-eh.c +++ b/drivers/ide/ide-eh.c @@ -407,8 +407,9 @@ static ide_startstop_t do_reset1(ide_drive_t *drive, int do_not_try_atapi) /* more than enough time */ udelay(10); /* clear SRST, leave nIEN (unless device is on the quirk list) */ - tp_ops->write_devctl(hwif, (drive->quirk_list == 2 ? 0 : ATA_NIEN) | - ATA_DEVCTL_OBS); + tp_ops->write_devctl(hwif, + ((drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) ? 0 : ATA_NIEN) | + ATA_DEVCTL_OBS); /* more than enough time */ udelay(10); hwif->poll_timeout = jiffies + WAIT_WORSTCASE; diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 9654bd34cf52..243cf6561e7e 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -488,11 +488,15 @@ repeat: if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) && hwif != prev_port) { + ide_drive_t *cur_dev = + prev_port ? prev_port->cur_dev : NULL; + /* * set nIEN for previous port, drives in the - * quirk_list may not like intr setups/cleanups + * quirk list may not like intr setups/cleanups */ - if (prev_port && prev_port->cur_dev->quirk_list == 0) + if (cur_dev && + (cur_dev->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0) prev_port->tp_ops->write_devctl(prev_port, ATA_NIEN | ATA_DEVCTL_OBS); diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c index c55349537c27..fa047150a1c6 100644 --- a/drivers/ide/ide-iops.c +++ b/drivers/ide/ide-iops.c @@ -300,11 +300,9 @@ void ide_check_nien_quirk_list(ide_drive_t *drive) for (list = nien_quirk_list; *list != NULL; list++) if (strstr(m, *list) != NULL) { - drive->quirk_list = 2; + drive->dev_flags |= IDE_DFLAG_NIEN_QUIRK; return; } - - drive->quirk_list = 0; } int ide_driveid_update(ide_drive_t *drive) @@ -389,7 +387,7 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed) tp_ops->exec_command(hwif, ATA_CMD_SET_FEATURES); - if (drive->quirk_list == 2) + if (drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); error = __ide_wait_stat(drive, drive->ready_stat, -- cgit v1.2.3 From 3772a99175f5378b5001e8da364341a8b8226a4a Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 22 May 2009 14:50:54 -0400 Subject: [SCSI] lpfc 8.3.2 : Reorganization for SLI4 Preps the organization of the driver so that the bottom half, which interacts with the hardware, can share common code sequences for attachment, detachment, initialization, teardown, etc with new hardware. For very common code sections, which become specific to the interface type, the driver uses an indirect function call. The function is set at initialization. For less common sections, such as initialization, the driver looks at the interface type and calls the routines relative to the interface. Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc.h | 72 +- drivers/scsi/lpfc/lpfc_ct.c | 4 +- drivers/scsi/lpfc/lpfc_debugfs.c | 17 +- drivers/scsi/lpfc/lpfc_els.c | 84 +- drivers/scsi/lpfc/lpfc_hbadisc.c | 69 +- drivers/scsi/lpfc/lpfc_init.c | 1955 ++++++++++++++++++++++++++++---------- drivers/scsi/lpfc/lpfc_scsi.c | 446 ++++++--- drivers/scsi/lpfc/lpfc_sli.c | 1006 +++++++++++++------- 8 files changed, 2541 insertions(+), 1112 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 1105f9a111ba..6c24c9aabe7b 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -23,6 +23,13 @@ struct lpfc_sli2_slim; +#define LPFC_PCI_DEV_LP 0x1 +#define LPFC_PCI_DEV_OC 0x2 + +#define LPFC_SLI_REV2 2 +#define LPFC_SLI_REV3 3 +#define LPFC_SLI_REV4 4 + #define LPFC_MAX_TARGET 4096 /* max number of targets supported */ #define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els requests */ @@ -264,8 +271,8 @@ enum hba_state { }; struct lpfc_vport { - struct list_head listentry; struct lpfc_hba *phba; + struct list_head listentry; uint8_t port_type; #define LPFC_PHYSICAL_PORT 1 #define LPFC_NPIV_PORT 2 @@ -420,8 +427,66 @@ enum intr_type_t { }; struct lpfc_hba { + /* SCSI interface function jump table entries */ + int (*lpfc_new_scsi_buf) + (struct lpfc_vport *, int); + struct lpfc_scsi_buf * (*lpfc_get_scsi_buf) + (struct lpfc_hba *); + int (*lpfc_scsi_prep_dma_buf) + (struct lpfc_hba *, struct lpfc_scsi_buf *); + void (*lpfc_scsi_unprep_dma_buf) + (struct lpfc_hba *, struct lpfc_scsi_buf *); + void (*lpfc_release_scsi_buf) + (struct lpfc_hba *, struct lpfc_scsi_buf *); + void (*lpfc_rampdown_queue_depth) + (struct lpfc_hba *); + void (*lpfc_scsi_prep_cmnd) + (struct lpfc_vport *, struct lpfc_scsi_buf *, + struct lpfc_nodelist *); + int (*lpfc_scsi_prep_task_mgmt_cmd) + (struct lpfc_vport *, struct lpfc_scsi_buf *, + unsigned int, uint8_t); + + /* IOCB interface function jump table entries */ + int (*__lpfc_sli_issue_iocb) + (struct lpfc_hba *, uint32_t, + struct lpfc_iocbq *, uint32_t); + void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *, + struct lpfc_iocbq *); + int (*lpfc_hba_down_post)(struct lpfc_hba *phba); + + + IOCB_t * (*lpfc_get_iocb_from_iocbq) + (struct lpfc_iocbq *); + void (*lpfc_scsi_cmd_iocb_cmpl) + (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); + + /* MBOX interface function jump table entries */ + int (*lpfc_sli_issue_mbox) + (struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); + /* Slow-path IOCB process function jump table entries */ + void (*lpfc_sli_handle_slow_ring_event) + (struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + uint32_t mask); + /* INIT device interface function jump table entries */ + int (*lpfc_sli_hbq_to_firmware) + (struct lpfc_hba *, uint32_t, struct hbq_dmabuf *); + int (*lpfc_sli_brdrestart) + (struct lpfc_hba *); + int (*lpfc_sli_brdready) + (struct lpfc_hba *, uint32_t); + void (*lpfc_handle_eratt) + (struct lpfc_hba *); + void (*lpfc_stop_port) + (struct lpfc_hba *); + + + /* SLI4 specific HBA data structure */ + struct lpfc_sli4_hba sli4_hba; + struct lpfc_sli sli; - uint32_t sli_rev; /* SLI2 or SLI3 */ + uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */ + uint32_t sli_rev; /* SLI2, SLI3, or SLI4 */ uint32_t sli3_options; /* Mask of enabled SLI3 options */ #define LPFC_SLI3_HBQ_ENABLED 0x01 #define LPFC_SLI3_NPIV_ENABLED 0x02 @@ -526,11 +591,12 @@ struct lpfc_hba { unsigned long data_flags; uint32_t hbq_in_use; /* HBQs in use flag */ - struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ + struct list_head rb_pend_list; /* Received buffers to be processed */ uint32_t hbq_count; /* Count of configured HBQs */ struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */ + unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */ unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */ void __iomem *slim_memmap_p; /* Kernel memory mapped address for PCI BAR0 */ diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 896c7b0351e5..4164b935ea9f 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -267,8 +267,6 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, uint32_t tmo, uint8_t retry) { struct lpfc_hba *phba = vport->phba; - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; IOCB_t *icmd; struct lpfc_iocbq *geniocb; int rc; @@ -331,7 +329,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT; geniocb->vport = vport; geniocb->retry = retry; - rc = lpfc_sli_issue_iocb(phba, pring, geniocb, 0); + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0); if (rc == IOCB_ERROR) { lpfc_sli_release_iocbq(phba, geniocb); diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 52be5644e07a..5dd66925f4ca 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -280,6 +280,8 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) struct lpfc_dmabuf *d_buf; struct hbq_dmabuf *hbq_buf; + if (phba->sli_rev != 3) + return 0; cnt = LPFC_HBQINFO_SIZE; spin_lock_irq(&phba->hbalock); @@ -489,12 +491,15 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) pring->next_cmdidx, pring->local_getidx, pring->flag, pgpp->rspPutInx, pring->numRiocb); } - word0 = readl(phba->HAregaddr); - word1 = readl(phba->CAregaddr); - word2 = readl(phba->HSregaddr); - word3 = readl(phba->HCregaddr); - len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x HC:%08x\n", - word0, word1, word2, word3); + + if (phba->sli_rev <= LPFC_SLI_REV3) { + word0 = readl(phba->HAregaddr); + word1 = readl(phba->CAregaddr); + word2 = readl(phba->HSregaddr); + word3 = readl(phba->HCregaddr); + len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x " + "HC:%08x\n", word0, word1, word2, word3); + } spin_unlock_irq(&phba->hbalock); return len; } diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index b8b34cf5c3d2..8c5c3aea4a19 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -84,7 +84,8 @@ lpfc_els_chk_latt(struct lpfc_vport *vport) uint32_t ha_copy; if (vport->port_state >= LPFC_VPORT_READY || - phba->link_state == LPFC_LINK_DOWN) + phba->link_state == LPFC_LINK_DOWN || + phba->sli_rev > LPFC_SLI_REV3) return 0; /* Read the HBA Host Attention Register */ @@ -305,7 +306,7 @@ els_iocb_free_pcmb_exit: * 0 - successfully issued fabric registration login for @vport * -ENXIO -- failed to issue fabric registration login for @vport **/ -static int +int lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; @@ -345,8 +346,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) err = 4; goto fail; } - rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, - 0); + rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0); if (rc) { err = 5; goto fail_free_mbox; @@ -1350,14 +1350,12 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) IOCB_t *icmd; struct lpfc_nodelist *ndlp; struct lpfc_iocbq *elsiocb; - struct lpfc_sli_ring *pring; struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; int ret; psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ ndlp = lpfc_findnode_did(vport, did); if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) @@ -1391,7 +1389,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) phba->fc_stat.elsXmitPLOGI++; elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; - ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); + ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (ret == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); @@ -1501,14 +1499,9 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, PRLI *npr; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; - struct lpfc_sli_ring *pring; - struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; - psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ - cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, ELS_CMD_PRLI); @@ -1550,7 +1543,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_PRLI_SND; spin_unlock_irq(shost->host_lock); - if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == + IOCB_ERROR) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_PRLI_SND; spin_unlock_irq(shost->host_lock); @@ -1788,8 +1782,6 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ADISC *ap; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; uint8_t *pcmd; uint16_t cmdsize; @@ -1822,7 +1814,8 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_ADISC_SND; spin_unlock_irq(shost->host_lock); - if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == + IOCB_ERROR) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_ADISC_SND; spin_unlock_irq(shost->host_lock); @@ -1937,15 +1930,10 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_hba *phba = vport->phba; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; - struct lpfc_sli_ring *pring; - struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; int rc; - psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; - spin_lock_irq(shost->host_lock); if (ndlp->nlp_flag & NLP_LOGO_SND) { spin_unlock_irq(shost->host_lock); @@ -1978,7 +1966,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_LOGO_SND; spin_unlock_irq(shost->host_lock); - rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { spin_lock_irq(shost->host_lock); @@ -2058,14 +2046,12 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) struct lpfc_hba *phba = vport->phba; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; - struct lpfc_sli_ring *pring; struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; struct lpfc_nodelist *ndlp; psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ cmdsize = (sizeof(uint32_t) + sizeof(SCR)); ndlp = lpfc_findnode_did(vport, nportid); @@ -2108,7 +2094,8 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) phba->fc_stat.elsXmitSCR++; elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; - if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == + IOCB_ERROR) { /* The additional lpfc_nlp_put will cause the following * lpfc_els_free_iocb routine to trigger the rlease of * the node. @@ -2152,7 +2139,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) struct lpfc_hba *phba = vport->phba; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; - struct lpfc_sli_ring *pring; struct lpfc_sli *psli; FARP *fp; uint8_t *pcmd; @@ -2162,7 +2148,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) struct lpfc_nodelist *ndlp; psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ cmdsize = (sizeof(uint32_t) + sizeof(FARP)); ndlp = lpfc_findnode_did(vport, nportid); @@ -2219,7 +2204,8 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) phba->fc_stat.elsXmitFARPR++; elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; - if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == + IOCB_ERROR) { /* The additional lpfc_nlp_put will cause the following * lpfc_els_free_iocb routine to trigger the release of * the node. @@ -2961,6 +2947,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) */ lpfc_nlp_not_used(ndlp); } + return; } @@ -3170,7 +3157,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, IOCB_t *icmd; IOCB_t *oldcmd; struct lpfc_iocbq *elsiocb; - struct lpfc_sli_ring *pring; struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; @@ -3178,7 +3164,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, ELS_PKT *els_pkt_ptr; psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ oldcmd = &oldiocb->iocb; switch (flag) { @@ -3266,7 +3251,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, } phba->fc_stat.elsXmitACC++; - rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); return 1; @@ -3305,15 +3290,12 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, IOCB_t *icmd; IOCB_t *oldcmd; struct lpfc_iocbq *elsiocb; - struct lpfc_sli_ring *pring; struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; int rc; psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ - cmdsize = 2 * sizeof(uint32_t); elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); @@ -3346,7 +3328,7 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, phba->fc_stat.elsXmitLSRJT++; elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; - rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); @@ -3379,8 +3361,6 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; ADISC *ap; IOCB_t *icmd, *oldcmd; struct lpfc_iocbq *elsiocb; @@ -3422,7 +3402,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, phba->fc_stat.elsXmitACC++; elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; - rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); return 1; @@ -3459,14 +3439,12 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, IOCB_t *icmd; IOCB_t *oldcmd; struct lpfc_iocbq *elsiocb; - struct lpfc_sli_ring *pring; struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; int rc; psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ cmdsize = sizeof(uint32_t) + sizeof(PRLI); elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, @@ -3520,7 +3498,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, phba->fc_stat.elsXmitACC++; elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; - rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); return 1; @@ -3562,15 +3540,12 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, RNID *rn; IOCB_t *icmd, *oldcmd; struct lpfc_iocbq *elsiocb; - struct lpfc_sli_ring *pring; struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; int rc; psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; - cmdsize = sizeof(uint32_t) + sizeof(uint32_t) + (2 * sizeof(struct lpfc_name)); if (format) @@ -3626,7 +3601,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, elsiocb->context1 = NULL; /* Don't need ndlp for cmpl, * it could be freed */ - rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); return 1; @@ -4440,8 +4415,6 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, static void lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; MAILBOX_t *mb; IOCB_t *icmd; RPS_RSP *rps_rsp; @@ -4507,7 +4480,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) ndlp->nlp_rpi); elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; phba->fc_stat.elsXmitACC++; - if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) + if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) lpfc_els_free_iocb(phba, elsiocb); return; } @@ -4616,8 +4589,6 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, IOCB_t *icmd, *oldcmd; RPL_RSP rpl_rsp; struct lpfc_iocbq *elsiocb; - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; uint8_t *pcmd; elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, @@ -4654,7 +4625,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, ndlp->nlp_rpi); elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; phba->fc_stat.elsXmitACC++; - if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == + IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); return 1; } @@ -6139,7 +6111,6 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; uint8_t *pcmd; @@ -6169,7 +6140,8 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_LOGO_SND; spin_unlock_irq(shost->host_lock); - if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == + IOCB_ERROR) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_LOGO_SND; spin_unlock_irq(shost->host_lock); @@ -6224,7 +6196,6 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) struct lpfc_iocbq *iocb; unsigned long iflags; int ret; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; IOCB_t *cmd; repeat: @@ -6248,7 +6219,7 @@ repeat: "Fabric sched1: ste:x%x", iocb->vport->port_state, 0, 0); - ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); + ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); if (ret == IOCB_ERROR) { iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; @@ -6394,7 +6365,6 @@ static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) { unsigned long iflags; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; int ready; int ret; @@ -6418,7 +6388,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) "Fabric sched2: ste:x%x", iocb->vport->port_state, 0, 0); - ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); + ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); if (ret == IOCB_ERROR) { iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index e764ce0bf704..25fc96c9081f 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -555,23 +555,24 @@ lpfc_work_done(struct lpfc_hba *phba) /* * Turn on Ring interrupts */ - spin_lock_irq(&phba->hbalock); - control = readl(phba->HCregaddr); - if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { - lpfc_debugfs_slow_ring_trc(phba, - "WRK Enable ring: cntl:x%x hacopy:x%x", - control, ha_copy, 0); - - control |= (HC_R0INT_ENA << LPFC_ELS_RING); - writel(control, phba->HCregaddr); - readl(phba->HCregaddr); /* flush */ - } - else { - lpfc_debugfs_slow_ring_trc(phba, - "WRK Ring ok: cntl:x%x hacopy:x%x", - control, ha_copy, 0); + if (phba->sli_rev <= LPFC_SLI_REV3) { + spin_lock_irq(&phba->hbalock); + control = readl(phba->HCregaddr); + if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { + lpfc_debugfs_slow_ring_trc(phba, + "WRK Enable ring: cntl:x%x hacopy:x%x", + control, ha_copy, 0); + + control |= (HC_R0INT_ENA << LPFC_ELS_RING); + writel(control, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + } else { + lpfc_debugfs_slow_ring_trc(phba, + "WRK Ring ok: cntl:x%x hacopy:x%x", + control, ha_copy, 0); + } + spin_unlock_irq(&phba->hbalock); } - spin_unlock_irq(&phba->hbalock); } lpfc_work_list_done(phba); } @@ -689,7 +690,7 @@ lpfc_port_link_failure(struct lpfc_vport *vport) lpfc_can_disctmo(vport); } -static void +void lpfc_linkdown_port(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); @@ -1147,10 +1148,12 @@ lpfc_enable_la(struct lpfc_hba *phba) struct lpfc_sli *psli = &phba->sli; spin_lock_irq(&phba->hbalock); psli->sli_flag |= LPFC_PROCESS_LA; - control = readl(phba->HCregaddr); - control |= HC_LAINT_ENA; - writel(control, phba->HCregaddr); - readl(phba->HCregaddr); /* flush */ + if (phba->sli_rev <= LPFC_SLI_REV3) { + control = readl(phba->HCregaddr); + control |= HC_LAINT_ENA; + writel(control, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + } spin_unlock_irq(&phba->hbalock); } @@ -2919,11 +2922,13 @@ restart_disc: * set port_state to PORT_READY if SLI2. * cmpl_reg_vpi will set port_state to READY for SLI3. */ - if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) - lpfc_issue_reg_vpi(phba, vport); - else { /* NPIV Not enabled */ - lpfc_issue_clear_la(phba, vport); - vport->port_state = LPFC_VPORT_READY; + if (phba->sli_rev < LPFC_SLI_REV4) { + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) + lpfc_issue_reg_vpi(phba, vport); + else { /* NPIV Not enabled */ + lpfc_issue_clear_la(phba, vport); + vport->port_state = LPFC_VPORT_READY; + } } /* Setup and issue mailbox INITIALIZE LINK command */ @@ -2959,11 +2964,13 @@ restart_disc: * set port_state to PORT_READY if SLI2. * cmpl_reg_vpi will set port_state to READY for SLI3. */ - if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) - lpfc_issue_reg_vpi(phba, vport); - else { /* NPIV Not enabled */ - lpfc_issue_clear_la(phba, vport); - vport->port_state = LPFC_VPORT_READY; + if (phba->sli_rev < LPFC_SLI_REV4) { + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) + lpfc_issue_reg_vpi(phba, vport); + else { /* NPIV Not enabled */ + lpfc_issue_clear_la(phba, vport); + vport->port_state = LPFC_VPORT_READY; + } } break; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 86d1bdcbf2d8..3f06ce2becf5 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -571,16 +571,20 @@ lpfc_hba_down_prep(struct lpfc_hba *phba) { struct lpfc_vport **vports; int i; - /* Disable interrupts */ - writel(0, phba->HCregaddr); - readl(phba->HCregaddr); /* flush */ + + if (phba->sli_rev <= LPFC_SLI_REV3) { + /* Disable interrupts */ + writel(0, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + } if (phba->pport->load_flag & FC_UNLOADING) lpfc_cleanup_discovery_resources(phba->pport); else { vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) + for (i = 0; i <= phba->max_vports && + vports[i] != NULL; i++) lpfc_cleanup_discovery_resources(vports[i]); lpfc_destroy_vport_work_array(phba, vports); } @@ -588,7 +592,7 @@ lpfc_hba_down_prep(struct lpfc_hba *phba) } /** - * lpfc_hba_down_post - Perform lpfc uninitialization after HBA reset + * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset * @phba: pointer to lpfc HBA data structure. * * This routine will do uninitialization after the HBA is reset when bring @@ -598,8 +602,8 @@ lpfc_hba_down_prep(struct lpfc_hba *phba) * 0 - sucess. * Any other value - error. **/ -int -lpfc_hba_down_post(struct lpfc_hba *phba) +static int +lpfc_hba_down_post_s3(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; @@ -909,13 +913,30 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) phba->work_hs = old_host_status & ~HS_FFER1; + spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~DEFER_ERATT; + spin_unlock_irq(&phba->hbalock); phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); phba->work_status[1] = readl(phba->MBslimaddr + 0xac); } +static void +lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) +{ + struct lpfc_board_event_header board_event; + struct Scsi_Host *shost; + + board_event.event_type = FC_REG_BOARD_EVENT; + board_event.subcategory = LPFC_EVENT_PORTINTERR; + shost = lpfc_shost_from_vport(phba->pport); + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(board_event), + (char *) &board_event, + LPFC_NL_VENDOR_ID); +} + /** - * lpfc_handle_eratt - The HBA hardware error handler + * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler * @phba: pointer to lpfc hba data structure. * * This routine is invoked to handle the following HBA hardware error @@ -924,8 +945,8 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) * 2 - DMA ring index out of range * 3 - Mailbox command came back as unknown **/ -void -lpfc_handle_eratt(struct lpfc_hba *phba) +static void +lpfc_handle_eratt_s3(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct lpfc_sli *psli = &phba->sli; @@ -934,24 +955,23 @@ lpfc_handle_eratt(struct lpfc_hba *phba) unsigned long temperature; struct temp_event temp_event_data; struct Scsi_Host *shost; - struct lpfc_board_event_header board_event; /* If the pci channel is offline, ignore possible errors, - * since we cannot communicate with the pci card anyway. */ - if (pci_channel_offline(phba->pcidev)) + * since we cannot communicate with the pci card anyway. + */ + if (pci_channel_offline(phba->pcidev)) { + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~DEFER_ERATT; + spin_unlock_irq(&phba->hbalock); return; + } + /* If resets are disabled then leave the HBA alone and return */ if (!phba->cfg_enable_hba_reset) return; /* Send an internal error event to mgmt application */ - board_event.event_type = FC_REG_BOARD_EVENT; - board_event.subcategory = LPFC_EVENT_PORTINTERR; - shost = lpfc_shost_from_vport(phba->pport); - fc_host_post_vendor_event(shost, fc_get_event_number(), - sizeof(board_event), - (char *) &board_event, - LPFC_NL_VENDOR_ID); + lpfc_board_errevt_to_mgmt(phba); if (phba->hba_flag & DEFER_ERATT) lpfc_handle_deferred_eratt(phba); @@ -1137,7 +1157,7 @@ lpfc_handle_latt_err_exit: * 0 - pointer to the VPD passed in is NULL * 1 - success **/ -static int +int lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) { uint8_t lenlo, lenhi; @@ -1533,7 +1553,8 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; icmd->ulpLe = 1; - if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == + IOCB_ERROR) { lpfc_mbuf_free(phba, mp1->virt, mp1->phys); kfree(mp1); cnt++; @@ -1761,7 +1782,6 @@ lpfc_cleanup(struct lpfc_vport *vport) * Lets wait for this to happen, if needed. */ while (!list_empty(&vport->fc_nodes)) { - if (i++ > 3000) { lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0233 Nodelist not empty\n"); @@ -1782,7 +1802,6 @@ lpfc_cleanup(struct lpfc_vport *vport) /* Wait for any activity on ndlps to settle */ msleep(10); } - return; } /** @@ -1803,22 +1822,36 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport) } /** - * lpfc_stop_phba_timers - Stop all the timers associated with an HBA + * lpfc_stop_hba_timers - Stop all the timers associated with an HBA * @phba: pointer to lpfc hba data structure. * * This routine stops all the timers associated with a HBA. This function is * invoked before either putting a HBA offline or unloading the driver. **/ -static void -lpfc_stop_phba_timers(struct lpfc_hba *phba) +void +lpfc_stop_hba_timers(struct lpfc_hba *phba) { - del_timer_sync(&phba->fcp_poll_timer); lpfc_stop_vport_timers(phba->pport); del_timer_sync(&phba->sli.mbox_tmo); del_timer_sync(&phba->fabric_block_timer); - phba->hb_outstanding = 0; - del_timer_sync(&phba->hb_tmofunc); del_timer_sync(&phba->eratt_poll); + del_timer_sync(&phba->hb_tmofunc); + phba->hb_outstanding = 0; + + switch (phba->pci_dev_grp) { + case LPFC_PCI_DEV_LP: + /* Stop any LightPulse device specific driver timers */ + del_timer_sync(&phba->fcp_poll_timer); + break; + case LPFC_PCI_DEV_OC: + /* Stop any OneConnect device sepcific driver timers */ + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0297 Invalid device group (x%x)\n", + phba->pci_dev_grp); + break; + } return; } @@ -2509,9 +2542,8 @@ lpfc_disable_msi(struct lpfc_hba *phba) * * This routine it invoked to log the currently used active interrupt mode * to the device. - */ -static void -lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) + **/ +static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) { switch (intr_mode) { case 0: @@ -2534,293 +2566,380 @@ lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) return; } -static void -lpfc_stop_port(struct lpfc_hba *phba) -{ - /* Clear all interrupt enable conditions */ - writel(0, phba->HCregaddr); - readl(phba->HCregaddr); /* flush */ - /* Clear all pending interrupts */ - writel(0xffffffff, phba->HAregaddr); - readl(phba->HAregaddr); /* flush */ - - /* Reset some HBA SLI setup states */ - lpfc_stop_phba_timers(phba); - phba->pport->work_port_events = 0; - - return; -} - /** - * lpfc_enable_intr - Enable device interrupt + * lpfc_enable_pci_dev - Enable a generic PCI device. * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to enable device interrupt and associate driver's - * interrupt handler(s) to interrupt vector(s). Depends on the interrupt - * mode configured to the driver, the driver will try to fallback from the - * configured interrupt mode to an interrupt mode which is supported by the - * platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ. + * This routine is invoked to enable the PCI device that is common to all + * PCI devices. * * Return codes - * 0 - sucessful - * other values - error + * 0 - sucessful + * other values - error **/ -static uint32_t -lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) +static int +lpfc_enable_pci_dev(struct lpfc_hba *phba) { - uint32_t intr_mode = LPFC_INTR_ERROR; - int retval; + struct pci_dev *pdev; + int bars; - if (cfg_mode == 2) { - /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ - retval = lpfc_sli_config_port(phba, 3); - if (!retval) { - /* Now, try to enable MSI-X interrupt mode */ - retval = lpfc_enable_msix(phba); - if (!retval) { - /* Indicate initialization to MSI-X mode */ - phba->intr_type = MSIX; - intr_mode = 2; - } - } - } + /* Obtain PCI device reference */ + if (!phba->pcidev) + goto out_error; + else + pdev = phba->pcidev; + /* Select PCI BARs */ + bars = pci_select_bars(pdev, IORESOURCE_MEM); + /* Enable PCI device */ + if (pci_enable_device_mem(pdev)) + goto out_error; + /* Request PCI resource for the device */ + if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) + goto out_disable_device; + /* Set up device as PCI master and save state for EEH */ + pci_set_master(pdev); + pci_try_set_mwi(pdev); + pci_save_state(pdev); - /* Fallback to MSI if MSI-X initialization failed */ - if (cfg_mode >= 1 && phba->intr_type == NONE) { - retval = lpfc_enable_msi(phba); - if (!retval) { - /* Indicate initialization to MSI mode */ - phba->intr_type = MSI; - intr_mode = 1; - } - } + return 0; - /* Fallback to INTx if both MSI-X/MSI initalization failed */ - if (phba->intr_type == NONE) { - retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, - IRQF_SHARED, LPFC_DRIVER_NAME, phba); - if (!retval) { - /* Indicate initialization to INTx mode */ - phba->intr_type = INTx; - intr_mode = 0; - } - } - return intr_mode; +out_disable_device: + pci_disable_device(pdev); +out_error: + return -ENODEV; } /** - * lpfc_disable_intr - Disable device interrupt + * lpfc_disable_pci_dev - Disable a generic PCI device. * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to disable device interrupt and disassociate the - * driver's interrupt handler(s) from interrupt vector(s). Depending on the - * interrupt mode, the driver will release the interrupt vector(s) for the - * message signaled interrupt. + * This routine is invoked to disable the PCI device that is common to all + * PCI devices. **/ static void -lpfc_disable_intr(struct lpfc_hba *phba) +lpfc_disable_pci_dev(struct lpfc_hba *phba) { - /* Disable the currently initialized interrupt mode */ - if (phba->intr_type == MSIX) - lpfc_disable_msix(phba); - else if (phba->intr_type == MSI) - lpfc_disable_msi(phba); - else if (phba->intr_type == INTx) - free_irq(phba->pcidev->irq, phba); + struct pci_dev *pdev; + int bars; - /* Reset interrupt management states */ - phba->intr_type = NONE; - phba->sli.slistat.sli_intr = 0; + /* Obtain PCI device reference */ + if (!phba->pcidev) + return; + else + pdev = phba->pcidev; + /* Select PCI BARs */ + bars = pci_select_bars(pdev, IORESOURCE_MEM); + /* Release PCI resource and disable PCI device */ + pci_release_selected_regions(pdev, bars); + pci_disable_device(pdev); + /* Null out PCI private reference to driver */ + pci_set_drvdata(pdev, NULL); return; } /** - * lpfc_pci_probe_one - lpfc PCI probe func to register device to PCI subsystem - * @pdev: pointer to PCI device - * @pid: pointer to PCI device identifier - * - * This routine is to be registered to the kernel's PCI subsystem. When an - * Emulex HBA is presented in PCI bus, the kernel PCI subsystem looks at - * PCI device-specific information of the device and driver to see if the - * driver state that it can support this kind of device. If the match is - * successful, the driver core invokes this routine. If this routine - * determines it can claim the HBA, it does all the initialization that it - * needs to do to handle the HBA properly. + * lpfc_reset_hba - Reset a hba + * @phba: pointer to lpfc hba data structure. * - * Return code - * 0 - driver can claim the device - * negative value - driver can not claim the device + * This routine is invoked to reset a hba device. It brings the HBA + * offline, performs a board restart, and then brings the board back + * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up + * on outstanding mailbox commands. **/ -static int __devinit -lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) +void +lpfc_reset_hba(struct lpfc_hba *phba) { - struct lpfc_vport *vport = NULL; - struct lpfc_hba *phba; - struct lpfc_sli *psli; - struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; - struct Scsi_Host *shost = NULL; - void *ptr; - unsigned long bar0map_len, bar2map_len; - int error = -ENODEV, retval; - int i, hbq_count; - uint16_t iotag; - uint32_t cfg_mode, intr_mode; - int bars = pci_select_bars(pdev, IORESOURCE_MEM); - struct lpfc_adapter_event_header adapter_event; - - if (pci_enable_device_mem(pdev)) - goto out; - if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) - goto out_disable_device; - - phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL); - if (!phba) - goto out_release_regions; - - atomic_set(&phba->fast_event_count, 0); - spin_lock_init(&phba->hbalock); - - /* Initialize ndlp management spinlock */ - spin_lock_init(&phba->ndlp_lock); - - phba->pcidev = pdev; + /* If resets are disabled then set error state and return. */ + if (!phba->cfg_enable_hba_reset) { + phba->link_state = LPFC_HBA_ERROR; + return; + } + lpfc_offline_prep(phba); + lpfc_offline(phba); + lpfc_sli_brdrestart(phba); + lpfc_online(phba); + lpfc_unblock_mgmt_io(phba); +} - /* Assign an unused board number */ - if ((phba->brd_no = lpfc_get_instance()) < 0) - goto out_free_phba; +/** + * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up the driver internal resources specific to + * support the SLI-3 HBA device it attached to. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli; - INIT_LIST_HEAD(&phba->port_list); - init_waitqueue_head(&phba->wait_4_mlo_m_q); /* - * Get all the module params for configuring this host and then - * establish the host. + * Initialize timers used by driver */ - lpfc_get_cfgparam(phba); - phba->max_vpi = LPFC_MAX_VPI; - /* Initialize timers used by driver */ + /* Heartbeat timer */ init_timer(&phba->hb_tmofunc); phba->hb_tmofunc.function = lpfc_hb_timeout; phba->hb_tmofunc.data = (unsigned long)phba; psli = &phba->sli; + /* MBOX heartbeat timer */ init_timer(&psli->mbox_tmo); psli->mbox_tmo.function = lpfc_mbox_timeout; psli->mbox_tmo.data = (unsigned long) phba; + /* FCP polling mode timer */ init_timer(&phba->fcp_poll_timer); phba->fcp_poll_timer.function = lpfc_poll_timeout; phba->fcp_poll_timer.data = (unsigned long) phba; + /* Fabric block timer */ init_timer(&phba->fabric_block_timer); phba->fabric_block_timer.function = lpfc_fabric_block_timeout; phba->fabric_block_timer.data = (unsigned long) phba; + /* EA polling mode timer */ init_timer(&phba->eratt_poll); phba->eratt_poll.function = lpfc_poll_eratt; phba->eratt_poll.data = (unsigned long) phba; - pci_set_master(pdev); - pci_save_state(pdev); - pci_try_set_mwi(pdev); - - if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(64)) != 0) - if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(32)) != 0) - goto out_idr_remove; + /* Host attention work mask setup */ + phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); + phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); + /* Get all the module params for configuring this host */ + lpfc_get_cfgparam(phba); /* - * Get the bus address of Bar0 and Bar2 and the number of bytes - * required by each mapping. + * Since the sg_tablesize is module parameter, the sg_dma_buf_size + * used to create the sg_dma_buf_pool must be dynamically calculated. + * 2 segments are added since the IOCB needs a command and response bde. */ - phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0); - bar0map_len = pci_resource_len(phba->pcidev, 0); - - phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2); - bar2map_len = pci_resource_len(phba->pcidev, 2); - - /* Map HBA SLIM to a kernel virtual address. */ - phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); - if (!phba->slim_memmap_p) { - error = -ENODEV; - dev_printk(KERN_ERR, &pdev->dev, - "ioremap failed for SLIM memory.\n"); - goto out_idr_remove; - } - - /* Map HBA Control Registers to a kernel virtual address. */ - phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); - if (!phba->ctrl_regs_memmap_p) { - error = -ENODEV; - dev_printk(KERN_ERR, &pdev->dev, - "ioremap failed for HBA control registers.\n"); - goto out_iounmap_slim; + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); + + if (phba->cfg_enable_bg) { + phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; + phba->cfg_sg_dma_buf_size += + phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); } - /* Allocate memory for SLI-2 structures */ - phba->slim2p.virt = dma_alloc_coherent(&phba->pcidev->dev, - SLI2_SLIM_SIZE, - &phba->slim2p.phys, - GFP_KERNEL); - if (!phba->slim2p.virt) - goto out_iounmap; + /* Also reinitialize the host templates with new values. */ + lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; - memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); - phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); - phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); - phba->IOCBs = (phba->slim2p.virt + - offsetof(struct lpfc_sli2_slim, IOCBs)); + phba->max_vpi = LPFC_MAX_VPI; + /* This will be set to correct value after config_port mbox */ + phba->max_vports = 0; - phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev, - lpfc_sli_hbq_size(), - &phba->hbqslimp.phys, - GFP_KERNEL); - if (!phba->hbqslimp.virt) - goto out_free_slim; + /* + * Initialize the SLI Layer to run with lpfc HBAs. + */ + lpfc_sli_setup(phba); + lpfc_sli_queue_setup(phba); - hbq_count = lpfc_sli_hbq_count(); - ptr = phba->hbqslimp.virt; - for (i = 0; i < hbq_count; ++i) { - phba->hbqs[i].hbq_virt = ptr; - INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); - ptr += (lpfc_hbq_defs[i]->entry_count * - sizeof(struct lpfc_hbq_entry)); - } - phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; - phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; + /* Allocate device driver memory */ + if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) + return -ENOMEM; - memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); + return 0; +} - INIT_LIST_HEAD(&phba->hbqbuf_in_list); +/** + * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset the driver internal resources set up + * specific for supporting the SLI-3 HBA device it attached to. + **/ +static void +lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) +{ + /* Free device driver memory allocated */ + lpfc_mem_free_all(phba); - /* Initialize the SLI Layer to run with lpfc HBAs. */ - lpfc_sli_setup(phba); - lpfc_sli_queue_setup(phba); + return; +} - retval = lpfc_mem_alloc(phba); - if (retval) { - error = retval; - goto out_free_hbqslimp; +/** + * lpfc_init_api_table_setup - Set up init api fucntion jump table + * @phba: The hba struct for which this call is being executed. + * @dev_grp: The HBA PCI-Device group number. + * + * This routine sets up the device INIT interface API function jump table + * in @phba struct. + * + * Returns: 0 - success, -ENODEV - failure. + **/ +int +lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) +{ + switch (dev_grp) { + case LPFC_PCI_DEV_LP: + phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; + phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; + phba->lpfc_stop_port = lpfc_stop_port_s3; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1431 Invalid HBA PCI-device group: 0x%x\n", + dev_grp); + return -ENODEV; + break; + } + return 0; +} + +/** + * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up the driver internal resources before the + * device specific resource setup to support the HBA device it attached to. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) +{ + /* + * Driver resources common to all SLI revisions + */ + atomic_set(&phba->fast_event_count, 0); + spin_lock_init(&phba->hbalock); + + /* Initialize ndlp management spinlock */ + spin_lock_init(&phba->ndlp_lock); + + INIT_LIST_HEAD(&phba->port_list); + INIT_LIST_HEAD(&phba->work_list); + init_waitqueue_head(&phba->wait_4_mlo_m_q); + + /* Initialize the wait queue head for the kernel thread */ + init_waitqueue_head(&phba->work_waitq); + + /* Initialize the scsi buffer list used by driver for scsi IO */ + spin_lock_init(&phba->scsi_buf_list_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); + + /* Initialize the fabric iocb list */ + INIT_LIST_HEAD(&phba->fabric_iocb_list); + + /* Initialize list to save ELS buffers */ + INIT_LIST_HEAD(&phba->elsbuf); + + /* Initialize FCF connection rec list */ + INIT_LIST_HEAD(&phba->fcf_conn_rec_list); + + return 0; +} + +/** + * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up the driver internal resources after the + * device specific resource setup to support the HBA device it attached to. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) +{ + int error; + + /* Startup the kernel thread for this host adapter. */ + phba->worker_thread = kthread_run(lpfc_do_work, phba, + "lpfc_worker_%d", phba->brd_no); + if (IS_ERR(phba->worker_thread)) { + error = PTR_ERR(phba->worker_thread); + return error; + } + + return 0; +} + +/** + * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset the driver internal resources set up after + * the device specific resource setup for supporting the HBA device it + * attached to. + **/ +static void +lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) +{ + /* Stop kernel worker thread */ + kthread_stop(phba->worker_thread); +} + +/** + * lpfc_free_iocb_list - Free iocb list. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to free the driver's IOCB list and memory. + **/ +static void +lpfc_free_iocb_list(struct lpfc_hba *phba) +{ + struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; + + spin_lock_irq(&phba->hbalock); + list_for_each_entry_safe(iocbq_entry, iocbq_next, + &phba->lpfc_iocb_list, list) { + list_del(&iocbq_entry->list); + kfree(iocbq_entry); + phba->total_iocbq_bufs--; } + spin_unlock_irq(&phba->hbalock); + + return; +} + +/** + * lpfc_init_iocb_list - Allocate and initialize iocb list. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate and initizlize the driver's IOCB + * list and set up the IOCB tag array accordingly. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) +{ + struct lpfc_iocbq *iocbq_entry = NULL; + uint16_t iotag; + int i; /* Initialize and populate the iocb list per host. */ INIT_LIST_HEAD(&phba->lpfc_iocb_list); - for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) { + for (i = 0; i < iocb_count; i++) { iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); if (iocbq_entry == NULL) { printk(KERN_ERR "%s: only allocated %d iocbs of " "expected %d count. Unloading driver.\n", __func__, i, LPFC_IOCB_LIST_CNT); - error = -ENOMEM; goto out_free_iocbq; } iotag = lpfc_sli_next_iotag(phba, iocbq_entry); if (iotag == 0) { - kfree (iocbq_entry); + kfree(iocbq_entry); printk(KERN_ERR "%s: failed to allocate IOTAG. " - "Unloading driver.\n", - __func__); - error = -ENOMEM; + "Unloading driver.\n", __func__); goto out_free_iocbq; } + iocbq_entry->sli4_xritag = NO_XRI; spin_lock_irq(&phba->hbalock); list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); @@ -2828,71 +2947,799 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) spin_unlock_irq(&phba->hbalock); } - /* Initialize HBA structure */ - phba->fc_edtov = FF_DEF_EDTOV; - phba->fc_ratov = FF_DEF_RATOV; - phba->fc_altov = FF_DEF_ALTOV; - phba->fc_arbtov = FF_DEF_ARBTOV; + return 0; - INIT_LIST_HEAD(&phba->work_list); - phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); - phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); +out_free_iocbq: + lpfc_free_iocb_list(phba); - /* Initialize the wait queue head for the kernel thread */ - init_waitqueue_head(&phba->work_waitq); + return -ENOMEM; +} - /* Startup the kernel thread for this host adapter. */ - phba->worker_thread = kthread_run(lpfc_do_work, phba, - "lpfc_worker_%d", phba->brd_no); - if (IS_ERR(phba->worker_thread)) { - error = PTR_ERR(phba->worker_thread); - goto out_free_iocbq; +/** + * lpfc_hba_alloc - Allocate driver hba data structure for a device. + * @pdev: pointer to pci device data structure. + * + * This routine is invoked to allocate the driver hba data structure for an + * HBA device. If the allocation is successful, the phba reference to the + * PCI device data structure is set. + * + * Return codes + * pointer to @phba - sucessful + * NULL - error + **/ +static struct lpfc_hba * +lpfc_hba_alloc(struct pci_dev *pdev) +{ + struct lpfc_hba *phba; + + /* Allocate memory for HBA structure */ + phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); + if (!phba) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1417 Failed to allocate hba struct.\n"); + return NULL; } - /* Initialize the list of scsi buffers used by driver for scsi IO. */ - spin_lock_init(&phba->scsi_buf_list_lock); - INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); + /* Set reference to PCI device in HBA structure */ + phba->pcidev = pdev; - /* Initialize list of fabric iocbs */ - INIT_LIST_HEAD(&phba->fabric_iocb_list); + /* Assign an unused board number */ + phba->brd_no = lpfc_get_instance(); + if (phba->brd_no < 0) { + kfree(phba); + return NULL; + } - /* Initialize list to save ELS buffers */ - INIT_LIST_HEAD(&phba->elsbuf); + return phba; +} + +/** + * lpfc_hba_free - Free driver hba data structure with a device. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to free the driver hba data structure with an + * HBA device. + **/ +static void +lpfc_hba_free(struct lpfc_hba *phba) +{ + /* Release the driver assigned board number */ + idr_remove(&lpfc_hba_index, phba->brd_no); + + kfree(phba); + return; +} + +/** + * lpfc_create_shost - Create hba physical port with associated scsi host. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to create HBA physical port and associate a SCSI + * host with it. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_create_shost(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport; + struct Scsi_Host *shost; + + /* Initialize HBA FC structure */ + phba->fc_edtov = FF_DEF_EDTOV; + phba->fc_ratov = FF_DEF_RATOV; + phba->fc_altov = FF_DEF_ALTOV; + phba->fc_arbtov = FF_DEF_ARBTOV; vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); if (!vport) - goto out_kthread_stop; + return -ENODEV; shost = lpfc_shost_from_vport(vport); phba->pport = vport; lpfc_debugfs_initialize(vport); + /* Put reference to SCSI host to driver's device private data */ + pci_set_drvdata(phba->pcidev, shost); - pci_set_drvdata(pdev, shost); + return 0; +} - phba->MBslimaddr = phba->slim_memmap_p; - phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; - phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; - phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; - phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; +/** + * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to destroy HBA physical port and the associated + * SCSI host. + **/ +static void +lpfc_destroy_shost(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport = phba->pport; + + /* Destroy physical port that associated with the SCSI host */ + destroy_port(vport); + + return; +} + +/** + * lpfc_setup_bg - Setup Block guard structures and debug areas. + * @phba: pointer to lpfc hba data structure. + * @shost: the shost to be used to detect Block guard settings. + * + * This routine sets up the local Block guard protocol settings for @shost. + * This routine also allocates memory for debugging bg buffers. + **/ +static void +lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) +{ + int pagecnt = 10; + if (lpfc_prot_mask && lpfc_prot_guard) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "1478 Registering BlockGuard with the " + "SCSI layer\n"); + scsi_host_set_prot(shost, lpfc_prot_mask); + scsi_host_set_guard(shost, lpfc_prot_guard); + } + if (!_dump_buf_data) { + while (pagecnt) { + spin_lock_init(&_dump_buf_lock); + _dump_buf_data = + (char *) __get_free_pages(GFP_KERNEL, pagecnt); + if (_dump_buf_data) { + printk(KERN_ERR "BLKGRD allocated %d pages for " + "_dump_buf_data at 0x%p\n", + (1 << pagecnt), _dump_buf_data); + _dump_buf_data_order = pagecnt; + memset(_dump_buf_data, 0, + ((1 << PAGE_SHIFT) << pagecnt)); + break; + } else + --pagecnt; + } + if (!_dump_buf_data_order) + printk(KERN_ERR "BLKGRD ERROR unable to allocate " + "memory for hexdump\n"); + } else + printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p" + "\n", _dump_buf_data); + if (!_dump_buf_dif) { + while (pagecnt) { + _dump_buf_dif = + (char *) __get_free_pages(GFP_KERNEL, pagecnt); + if (_dump_buf_dif) { + printk(KERN_ERR "BLKGRD allocated %d pages for " + "_dump_buf_dif at 0x%p\n", + (1 << pagecnt), _dump_buf_dif); + _dump_buf_dif_order = pagecnt; + memset(_dump_buf_dif, 0, + ((1 << PAGE_SHIFT) << pagecnt)); + break; + } else + --pagecnt; + } + if (!_dump_buf_dif_order) + printk(KERN_ERR "BLKGRD ERROR unable to allocate " + "memory for hexdump\n"); + } else + printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n", + _dump_buf_dif); +} + +/** + * lpfc_post_init_setup - Perform necessary device post initialization setup. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to perform all the necessary post initialization + * setup for the device. + **/ +static void +lpfc_post_init_setup(struct lpfc_hba *phba) +{ + struct Scsi_Host *shost; + struct lpfc_adapter_event_header adapter_event; + + /* Get the default values for Model Name and Description */ + lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); + + /* + * hba setup may have changed the hba_queue_depth so we need to + * adjust the value of can_queue. + */ + shost = pci_get_drvdata(phba->pcidev); + shost->can_queue = phba->cfg_hba_queue_depth - 10; + if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) + lpfc_setup_bg(phba, shost); + + lpfc_host_attrib_init(shost); + + if (phba->cfg_poll & DISABLE_FCP_RING_INT) { + spin_lock_irq(shost->host_lock); + lpfc_poll_start_timer(phba); + spin_unlock_irq(shost->host_lock); + } + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0428 Perform SCSI scan\n"); + /* Send board arrival event to upper layer */ + adapter_event.event_type = FC_REG_ADAPTER_EVENT; + adapter_event.subcategory = LPFC_EVENT_ARRIVAL; + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(adapter_event), + (char *) &adapter_event, + LPFC_NL_VENDOR_ID); + return; +} + +/** + * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up the PCI device memory space for device + * with SLI-3 interface spec. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) +{ + struct pci_dev *pdev; + unsigned long bar0map_len, bar2map_len; + int i, hbq_count; + void *ptr; + int error = -ENODEV; + + /* Obtain PCI device reference */ + if (!phba->pcidev) + return error; + else + pdev = phba->pcidev; + + /* Set the device DMA mask size */ + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) + return error; + + /* Get the bus address of Bar0 and Bar2 and the number of bytes + * required by each mapping. + */ + phba->pci_bar0_map = pci_resource_start(pdev, 0); + bar0map_len = pci_resource_len(pdev, 0); + + phba->pci_bar2_map = pci_resource_start(pdev, 2); + bar2map_len = pci_resource_len(pdev, 2); + + /* Map HBA SLIM to a kernel virtual address. */ + phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); + if (!phba->slim_memmap_p) { + dev_printk(KERN_ERR, &pdev->dev, + "ioremap failed for SLIM memory.\n"); + goto out; + } + + /* Map HBA Control Registers to a kernel virtual address. */ + phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); + if (!phba->ctrl_regs_memmap_p) { + dev_printk(KERN_ERR, &pdev->dev, + "ioremap failed for HBA control registers.\n"); + goto out_iounmap_slim; + } + + /* Allocate memory for SLI-2 structures */ + phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, + SLI2_SLIM_SIZE, + &phba->slim2p.phys, + GFP_KERNEL); + if (!phba->slim2p.virt) + goto out_iounmap; + + memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); + phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); + phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); + phba->IOCBs = (phba->slim2p.virt + + offsetof(struct lpfc_sli2_slim, IOCBs)); + + phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, + lpfc_sli_hbq_size(), + &phba->hbqslimp.phys, + GFP_KERNEL); + if (!phba->hbqslimp.virt) + goto out_free_slim; + + hbq_count = lpfc_sli_hbq_count(); + ptr = phba->hbqslimp.virt; + for (i = 0; i < hbq_count; ++i) { + phba->hbqs[i].hbq_virt = ptr; + INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); + ptr += (lpfc_hbq_defs[i]->entry_count * + sizeof(struct lpfc_hbq_entry)); + } + phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; + phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; + + memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); + + INIT_LIST_HEAD(&phba->rb_pend_list); + + phba->MBslimaddr = phba->slim_memmap_p; + phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; + phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; + phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; + phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; + + return 0; + +out_free_slim: + dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, + phba->slim2p.virt, phba->slim2p.phys); +out_iounmap: + iounmap(phba->ctrl_regs_memmap_p); +out_iounmap_slim: + iounmap(phba->slim_memmap_p); +out: + return error; +} + +/** + * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset the PCI device memory space for device + * with SLI-3 interface spec. + **/ +static void +lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) +{ + struct pci_dev *pdev; + + /* Obtain PCI device reference */ + if (!phba->pcidev) + return; + else + pdev = phba->pcidev; + + /* Free coherent DMA memory allocated */ + dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), + phba->hbqslimp.virt, phba->hbqslimp.phys); + dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, + phba->slim2p.virt, phba->slim2p.phys); + + /* I/O memory unmap */ + iounmap(phba->ctrl_regs_memmap_p); + iounmap(phba->slim_memmap_p); + + return; +} + +/** + * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to enable the MSI-X interrupt vectors to device + * with SLI-3 interface specs. The kernel function pci_enable_msix() is + * called to enable the MSI-X vectors. Note that pci_enable_msix(), once + * invoked, enables either all or nothing, depending on the current + * availability of PCI vector resources. The device driver is responsible + * for calling the individual request_irq() to register each MSI-X vector + * with a interrupt handler, which is done in this function. Note that + * later when device is unloading, the driver should always call free_irq() + * on all MSI-X vectors it has done request_irq() on before calling + * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device + * will be left with MSI-X enabled and leaks its vectors. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_sli_enable_msix(struct lpfc_hba *phba) +{ + int rc, i; + LPFC_MBOXQ_t *pmb; + + /* Set up MSI-X multi-message vectors */ + for (i = 0; i < LPFC_MSIX_VECTORS; i++) + phba->msix_entries[i].entry = i; + + /* Configure MSI-X capability structure */ + rc = pci_enable_msix(phba->pcidev, phba->msix_entries, + ARRAY_SIZE(phba->msix_entries)); + if (rc) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0420 PCI enable MSI-X failed (%d)\n", rc); + goto msi_fail_out; + } + for (i = 0; i < LPFC_MSIX_VECTORS; i++) + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0477 MSI-X entry[%d]: vector=x%x " + "message=%d\n", i, + phba->msix_entries[i].vector, + phba->msix_entries[i].entry); + /* + * Assign MSI-X vectors to interrupt handlers + */ + + /* vector-0 is associated to slow-path handler */ + rc = request_irq(phba->msix_entries[0].vector, + &lpfc_sli_sp_intr_handler, IRQF_SHARED, + LPFC_SP_DRIVER_HANDLER_NAME, phba); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0421 MSI-X slow-path request_irq failed " + "(%d)\n", rc); + goto msi_fail_out; + } + + /* vector-1 is associated to fast-path handler */ + rc = request_irq(phba->msix_entries[1].vector, + &lpfc_sli_fp_intr_handler, IRQF_SHARED, + LPFC_FP_DRIVER_HANDLER_NAME, phba); + + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0429 MSI-X fast-path request_irq failed " + "(%d)\n", rc); + goto irq_fail_out; + } + + /* + * Configure HBA MSI-X attention conditions to messages + */ + pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + + if (!pmb) { + rc = -ENOMEM; + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0474 Unable to allocate memory for issuing " + "MBOX_CONFIG_MSI command\n"); + goto mem_fail_out; + } + rc = lpfc_config_msi(phba, pmb); + if (rc) + goto mbx_fail_out; + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, + "0351 Config MSI mailbox command failed, " + "mbxCmd x%x, mbxStatus x%x\n", + pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); + goto mbx_fail_out; + } + + /* Free memory allocated for mailbox command */ + mempool_free(pmb, phba->mbox_mem_pool); + return rc; + +mbx_fail_out: + /* Free memory allocated for mailbox command */ + mempool_free(pmb, phba->mbox_mem_pool); + +mem_fail_out: + /* free the irq already requested */ + free_irq(phba->msix_entries[1].vector, phba); + +irq_fail_out: + /* free the irq already requested */ + free_irq(phba->msix_entries[0].vector, phba); + +msi_fail_out: + /* Unconfigure MSI-X capability structure */ + pci_disable_msix(phba->pcidev); + return rc; +} + +/** + * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to release the MSI-X vectors and then disable the + * MSI-X interrupt mode to device with SLI-3 interface spec. + **/ +static void +lpfc_sli_disable_msix(struct lpfc_hba *phba) +{ + int i; + + /* Free up MSI-X multi-message vectors */ + for (i = 0; i < LPFC_MSIX_VECTORS; i++) + free_irq(phba->msix_entries[i].vector, phba); + /* Disable MSI-X */ + pci_disable_msix(phba->pcidev); + + return; +} + +/** + * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to enable the MSI interrupt mode to device with + * SLI-3 interface spec. The kernel function pci_enable_msi() is called to + * enable the MSI vector. The device driver is responsible for calling the + * request_irq() to register MSI vector with a interrupt the handler, which + * is done in this function. + * + * Return codes + * 0 - sucessful + * other values - error + */ +static int +lpfc_sli_enable_msi(struct lpfc_hba *phba) +{ + int rc; + + rc = pci_enable_msi(phba->pcidev); + if (!rc) + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0462 PCI enable MSI mode success.\n"); + else { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0471 PCI enable MSI mode failed (%d)\n", rc); + return rc; + } + + rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, + IRQF_SHARED, LPFC_DRIVER_NAME, phba); + if (rc) { + pci_disable_msi(phba->pcidev); + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0478 MSI request_irq failed (%d)\n", rc); + } + return rc; +} + +/** + * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to disable the MSI interrupt mode to device with + * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has + * done request_irq() on before calling pci_disable_msi(). Failure to do so + * results in a BUG_ON() and a device will be left with MSI enabled and leaks + * its vector. + */ +static void +lpfc_sli_disable_msi(struct lpfc_hba *phba) +{ + free_irq(phba->pcidev->irq, phba); + pci_disable_msi(phba->pcidev); + return; +} + +/** + * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to enable device interrupt and associate driver's + * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface + * spec. Depends on the interrupt mode configured to the driver, the driver + * will try to fallback from the configured interrupt mode to an interrupt + * mode which is supported by the platform, kernel, and device in the order + * of: + * MSI-X -> MSI -> IRQ. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static uint32_t +lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) +{ + uint32_t intr_mode = LPFC_INTR_ERROR; + int retval; + + if (cfg_mode == 2) { + /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ + retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); + if (!retval) { + /* Now, try to enable MSI-X interrupt mode */ + retval = lpfc_sli_enable_msix(phba); + if (!retval) { + /* Indicate initialization to MSI-X mode */ + phba->intr_type = MSIX; + intr_mode = 2; + } + } + } + + /* Fallback to MSI if MSI-X initialization failed */ + if (cfg_mode >= 1 && phba->intr_type == NONE) { + retval = lpfc_sli_enable_msi(phba); + if (!retval) { + /* Indicate initialization to MSI mode */ + phba->intr_type = MSI; + intr_mode = 1; + } + } + + /* Fallback to INTx if both MSI-X/MSI initalization failed */ + if (phba->intr_type == NONE) { + retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, + IRQF_SHARED, LPFC_DRIVER_NAME, phba); + if (!retval) { + /* Indicate initialization to INTx mode */ + phba->intr_type = INTx; + intr_mode = 0; + } + } + return intr_mode; +} + +/** + * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to disable device interrupt and disassociate the + * driver's interrupt handler(s) from interrupt vector(s) to device with + * SLI-3 interface spec. Depending on the interrupt mode, the driver will + * release the interrupt vector(s) for the message signaled interrupt. + **/ +static void +lpfc_sli_disable_intr(struct lpfc_hba *phba) +{ + /* Disable the currently initialized interrupt mode */ + if (phba->intr_type == MSIX) + lpfc_sli_disable_msix(phba); + else if (phba->intr_type == MSI) + lpfc_sli_disable_msi(phba); + else if (phba->intr_type == INTx) + free_irq(phba->pcidev->irq, phba); + + /* Reset interrupt management states */ + phba->intr_type = NONE; + phba->sli.slistat.sli_intr = 0; + + return; +} + +/** + * lpfc_unset_hba - Unset SLI3 hba device initialization + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset the HBA device initialization steps to + * a device with SLI-3 interface spec. + **/ +static void +lpfc_unset_hba(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport = phba->pport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + spin_lock_irq(shost->host_lock); + vport->load_flag |= FC_UNLOADING; + spin_unlock_irq(shost->host_lock); + + lpfc_stop_hba_timers(phba); + + phba->pport->work_port_events = 0; + + lpfc_sli_hba_down(phba); + + lpfc_sli_brdrestart(phba); + + lpfc_sli_disable_intr(phba); + + return; +} + +/** + * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. + * @pdev: pointer to PCI device + * @pid: pointer to PCI device identifier + * + * This routine is to be called to attach a device with SLI-3 interface spec + * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is + * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific + * information of the device and driver to see if the driver state that it can + * support this kind of device. If the match is successful, the driver core + * invokes this routine. If this routine determines it can claim the HBA, it + * does all the initialization that it needs to do to handle the HBA properly. + * + * Return code + * 0 - driver can claim the device + * negative value - driver can not claim the device + **/ +static int __devinit +lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) +{ + struct lpfc_hba *phba; + struct lpfc_vport *vport = NULL; + int error; + uint32_t cfg_mode, intr_mode; + + /* Allocate memory for HBA structure */ + phba = lpfc_hba_alloc(pdev); + if (!phba) + return -ENOMEM; + + /* Perform generic PCI device enabling operation */ + error = lpfc_enable_pci_dev(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1401 Failed to enable pci device.\n"); + goto out_free_phba; + } + + /* Set up SLI API function jump table for PCI-device group-0 HBAs */ + error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); + if (error) + goto out_disable_pci_dev; + + /* Set up SLI-3 specific device PCI memory space */ + error = lpfc_sli_pci_mem_setup(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1402 Failed to set up pci memory space.\n"); + goto out_disable_pci_dev; + } + + /* Set up phase-1 common device driver resources */ + error = lpfc_setup_driver_resource_phase1(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1403 Failed to set up driver resource.\n"); + goto out_unset_pci_mem_s3; + } + + /* Set up SLI-3 specific device driver resources */ + error = lpfc_sli_driver_resource_setup(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1404 Failed to set up driver resource.\n"); + goto out_unset_pci_mem_s3; + } + + /* Initialize and populate the iocb list per host */ + error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1405 Failed to initialize iocb list.\n"); + goto out_unset_driver_resource_s3; + } + + /* Set up common device driver resources */ + error = lpfc_setup_driver_resource_phase2(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1406 Failed to set up driver resource.\n"); + goto out_free_iocb_list; + } + + /* Create SCSI host to the physical port */ + error = lpfc_create_shost(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1407 Failed to create scsi host.\n"); + goto out_unset_driver_resource; + } /* Configure sysfs attributes */ - if (lpfc_alloc_sysfs_attr(vport)) { + vport = phba->pport; + error = lpfc_alloc_sysfs_attr(vport); + if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1476 Failed to allocate sysfs attr\n"); - error = -ENOMEM; - goto out_destroy_port; + goto out_destroy_shost; } + /* Now, trying to enable interrupt and bring up the device */ cfg_mode = phba->cfg_use_msi; while (true) { + /* Put device to a known state before enabling interrupt */ + lpfc_stop_port(phba); /* Configure and enable interrupt */ - intr_mode = lpfc_enable_intr(phba, cfg_mode); + intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0426 Failed to enable interrupt.\n"); + "0431 Failed to enable interrupt.\n"); + error = -ENODEV; goto out_free_sysfs_attr; } - /* HBA SLI setup */ + /* SLI-3 HBA setup */ if (lpfc_sli_hba_setup(phba)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1477 Failed to set up hba\n"); @@ -2902,185 +3749,65 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) /* Wait 50ms for the interrupts of previous mailbox commands */ msleep(50); - /* Check active interrupts received */ - if (phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { + /* Check active interrupts on message signaled interrupts */ + if (intr_mode == 0 || + phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { /* Log the current active interrupt mode */ phba->intr_mode = intr_mode; lpfc_log_intr_mode(phba, intr_mode); break; } else { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0451 Configure interrupt mode (%d) " + "0447 Configure interrupt mode (%d) " "failed active interrupt test.\n", intr_mode); - if (intr_mode == 0) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0479 Failed to enable " - "interrupt.\n"); - error = -ENODEV; - goto out_remove_device; - } - /* Stop HBA SLI setups */ - lpfc_stop_port(phba); /* Disable the current interrupt mode */ - lpfc_disable_intr(phba); + lpfc_sli_disable_intr(phba); /* Try next level of interrupt mode */ cfg_mode = --intr_mode; } } - /* - * hba setup may have changed the hba_queue_depth so we need to adjust - * the value of can_queue. - */ - shost->can_queue = phba->cfg_hba_queue_depth - 10; - if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { - - if (lpfc_prot_mask && lpfc_prot_guard) { - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "1478 Registering BlockGuard with the " - "SCSI layer\n"); - - scsi_host_set_prot(shost, lpfc_prot_mask); - scsi_host_set_guard(shost, lpfc_prot_guard); - } - } - - if (!_dump_buf_data) { - int pagecnt = 10; - while (pagecnt) { - spin_lock_init(&_dump_buf_lock); - _dump_buf_data = - (char *) __get_free_pages(GFP_KERNEL, pagecnt); - if (_dump_buf_data) { - printk(KERN_ERR "BLKGRD allocated %d pages for " - "_dump_buf_data at 0x%p\n", - (1 << pagecnt), _dump_buf_data); - _dump_buf_data_order = pagecnt; - memset(_dump_buf_data, 0, ((1 << PAGE_SHIFT) - << pagecnt)); - break; - } else { - --pagecnt; - } - - } - - if (!_dump_buf_data_order) - printk(KERN_ERR "BLKGRD ERROR unable to allocate " - "memory for hexdump\n"); - - } else { - printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p" - "\n", _dump_buf_data); - } - - - if (!_dump_buf_dif) { - int pagecnt = 10; - while (pagecnt) { - _dump_buf_dif = - (char *) __get_free_pages(GFP_KERNEL, pagecnt); - if (_dump_buf_dif) { - printk(KERN_ERR "BLKGRD allocated %d pages for " - "_dump_buf_dif at 0x%p\n", - (1 << pagecnt), _dump_buf_dif); - _dump_buf_dif_order = pagecnt; - memset(_dump_buf_dif, 0, ((1 << PAGE_SHIFT) - << pagecnt)); - break; - } else { - --pagecnt; - } - - } - - if (!_dump_buf_dif_order) - printk(KERN_ERR "BLKGRD ERROR unable to allocate " - "memory for hexdump\n"); - - } else { - printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n", - _dump_buf_dif); - } - - lpfc_host_attrib_init(shost); - - if (phba->cfg_poll & DISABLE_FCP_RING_INT) { - spin_lock_irq(shost->host_lock); - lpfc_poll_start_timer(phba); - spin_unlock_irq(shost->host_lock); - } + /* Perform post initialization setup */ + lpfc_post_init_setup(phba); - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0428 Perform SCSI scan\n"); - /* Send board arrival event to upper layer */ - adapter_event.event_type = FC_REG_ADAPTER_EVENT; - adapter_event.subcategory = LPFC_EVENT_ARRIVAL; - fc_host_post_vendor_event(shost, fc_get_event_number(), - sizeof(adapter_event), - (char *) &adapter_event, - LPFC_NL_VENDOR_ID); + /* Check if there are static vports to be created. */ + lpfc_create_static_vport(phba); return 0; out_remove_device: - spin_lock_irq(shost->host_lock); - vport->load_flag |= FC_UNLOADING; - spin_unlock_irq(shost->host_lock); - lpfc_stop_phba_timers(phba); - phba->pport->work_port_events = 0; - lpfc_disable_intr(phba); - lpfc_sli_hba_down(phba); - lpfc_sli_brdrestart(phba); + lpfc_unset_hba(phba); out_free_sysfs_attr: lpfc_free_sysfs_attr(vport); -out_destroy_port: - destroy_port(vport); -out_kthread_stop: - kthread_stop(phba->worker_thread); -out_free_iocbq: - list_for_each_entry_safe(iocbq_entry, iocbq_next, - &phba->lpfc_iocb_list, list) { - kfree(iocbq_entry); - phba->total_iocbq_bufs--; - } - lpfc_mem_free(phba); -out_free_hbqslimp: - dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), - phba->hbqslimp.virt, phba->hbqslimp.phys); -out_free_slim: - dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, - phba->slim2p.virt, phba->slim2p.phys); -out_iounmap: - iounmap(phba->ctrl_regs_memmap_p); -out_iounmap_slim: - iounmap(phba->slim_memmap_p); -out_idr_remove: - idr_remove(&lpfc_hba_index, phba->brd_no); +out_destroy_shost: + lpfc_destroy_shost(phba); +out_unset_driver_resource: + lpfc_unset_driver_resource_phase2(phba); +out_free_iocb_list: + lpfc_free_iocb_list(phba); +out_unset_driver_resource_s3: + lpfc_sli_driver_resource_unset(phba); +out_unset_pci_mem_s3: + lpfc_sli_pci_mem_unset(phba); +out_disable_pci_dev: + lpfc_disable_pci_dev(phba); out_free_phba: - kfree(phba); -out_release_regions: - pci_release_selected_regions(pdev, bars); -out_disable_device: - pci_disable_device(pdev); -out: - pci_set_drvdata(pdev, NULL); - if (shost) - scsi_host_put(shost); + lpfc_hba_free(phba); return error; } /** - * lpfc_pci_remove_one - lpfc PCI func to unregister device from PCI subsystem + * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. * @pdev: pointer to PCI device * - * This routine is to be registered to the kernel's PCI subsystem. When an - * Emulex HBA is removed from PCI bus, it performs all the necessary cleanup - * for the HBA device to be removed from the PCI subsystem properly. + * This routine is to be called to disattach a device with SLI-3 interface + * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is + * removed from PCI bus, it performs all the necessary cleanup for the HBA + * device to be removed from the PCI subsystem properly. **/ static void __devexit -lpfc_pci_remove_one(struct pci_dev *pdev) +lpfc_pci_remove_one_s3(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; @@ -3098,7 +3825,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev) /* Release all the vports against this physical port */ vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++) + for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) fc_vport_terminate(vports[i]->fc_vport); lpfc_destroy_vport_work_array(phba, vports); @@ -3120,7 +3847,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev) /* Final cleanup of txcmplq and reset the HBA */ lpfc_sli_brdrestart(phba); - lpfc_stop_phba_timers(phba); + lpfc_stop_hba_timers(phba); spin_lock_irq(&phba->hbalock); list_del_init(&vport->listentry); spin_unlock_irq(&phba->hbalock); @@ -3128,7 +3855,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev) lpfc_debugfs_terminate(vport); /* Disable interrupt */ - lpfc_disable_intr(phba); + lpfc_sli_disable_intr(phba); pci_set_drvdata(pdev, NULL); scsi_host_put(shost); @@ -3138,7 +3865,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev) * corresponding pools here. */ lpfc_scsi_free(phba); - lpfc_mem_free(phba); + lpfc_mem_free_all(phba); dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt, phba->hbqslimp.phys); @@ -3151,36 +3878,35 @@ lpfc_pci_remove_one(struct pci_dev *pdev) iounmap(phba->ctrl_regs_memmap_p); iounmap(phba->slim_memmap_p); - idr_remove(&lpfc_hba_index, phba->brd_no); - - kfree(phba); + lpfc_hba_free(phba); pci_release_selected_regions(pdev, bars); pci_disable_device(pdev); } /** - * lpfc_pci_suspend_one - lpfc PCI func to suspend device for power management + * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt * @pdev: pointer to PCI device * @msg: power management message * - * This routine is to be registered to the kernel's PCI subsystem to support - * system Power Management (PM). When PM invokes this method, it quiesces the - * device by stopping the driver's worker thread for the device, turning off - * device's interrupt and DMA, and bring the device offline. Note that as the - * driver implements the minimum PM requirements to a power-aware driver's PM - * support for suspend/resume -- all the possible PM messages (SUSPEND, - * HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND - * and the driver will fully reinitialize its device during resume() method - * call, the driver will set device to PCI_D3hot state in PCI config space - * instead of setting it according to the @msg provided by the PM. + * This routine is to be called from the kernel's PCI subsystem to support + * system Power Management (PM) to device with SLI-3 interface spec. When + * PM invokes this method, it quiesces the device by stopping the driver's + * worker thread for the device, turning off device's interrupt and DMA, + * and bring the device offline. Note that as the driver implements the + * minimum PM requirements to a power-aware driver's PM support for the + * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) + * to the suspend() method call will be treated as SUSPEND and the driver will + * fully reinitialize its device during resume() method call, the driver will + * set device to PCI_D3hot state in PCI config space instead of setting it + * according to the @msg provided by the PM. * * Return code - * 0 - driver suspended the device - * Error otherwise + * 0 - driver suspended the device + * Error otherwise **/ static int -lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) +lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; @@ -3194,7 +3920,7 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) kthread_stop(phba->worker_thread); /* Disable interrupt from device */ - lpfc_disable_intr(phba); + lpfc_sli_disable_intr(phba); /* Save device state to PCI config space */ pci_save_state(pdev); @@ -3204,25 +3930,26 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) } /** - * lpfc_pci_resume_one - lpfc PCI func to resume device for power management + * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt * @pdev: pointer to PCI device * - * This routine is to be registered to the kernel's PCI subsystem to support - * system Power Management (PM). When PM invokes this method, it restores - * the device's PCI config space state and fully reinitializes the device - * and brings it online. Note that as the driver implements the minimum PM - * requirements to a power-aware driver's PM for suspend/resume -- all - * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() - * method call will be treated as SUSPEND and the driver will fully - * reinitialize its device during resume() method call, the device will be - * set to PCI_D0 directly in PCI config space before restoring the state. + * This routine is to be called from the kernel's PCI subsystem to support + * system Power Management (PM) to device with SLI-3 interface spec. When PM + * invokes this method, it restores the device's PCI config space state and + * fully reinitializes the device and brings it online. Note that as the + * driver implements the minimum PM requirements to a power-aware driver's + * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, + * FREEZE) to the suspend() method call will be treated as SUSPEND and the + * driver will fully reinitialize its device during resume() method call, + * the device will be set to PCI_D0 directly in PCI config space before + * restoring the state. * * Return code - * 0 - driver suspended the device - * Error otherwise + * 0 - driver suspended the device + * Error otherwise **/ static int -lpfc_pci_resume_one(struct pci_dev *pdev) +lpfc_pci_resume_one_s3(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; @@ -3250,7 +3977,7 @@ lpfc_pci_resume_one(struct pci_dev *pdev) } /* Configure and enable interrupt */ - intr_mode = lpfc_enable_intr(phba, phba->intr_mode); + intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0430 PM resume Failed to enable interrupt\n"); @@ -3269,23 +3996,24 @@ lpfc_pci_resume_one(struct pci_dev *pdev) } /** - * lpfc_io_error_detected - Driver method for handling PCI I/O error detected + * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error * @pdev: pointer to PCI device. * @state: the current PCI connection state. * - * This routine is registered to the PCI subsystem for error handling. This - * function is called by the PCI subsystem after a PCI bus error affecting - * this device has been detected. When this function is invoked, it will - * need to stop all the I/Os and interrupt(s) to the device. Once that is - * done, it will return PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to - * perform proper recovery as desired. + * This routine is called from the PCI subsystem for I/O error handling to + * device with SLI-3 interface spec. This function is called by the PCI + * subsystem after a PCI bus error affecting this device has been detected. + * When this function is invoked, it will need to stop all the I/Os and + * interrupt(s) to the device. Once that is done, it will return + * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery + * as desired. * * Return codes - * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery - * PCI_ERS_RESULT_DISCONNECT - device could not be recovered + * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery + * PCI_ERS_RESULT_DISCONNECT - device could not be recovered **/ -static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) +static pci_ers_result_t +lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; @@ -3312,30 +4040,32 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, lpfc_sli_abort_iocb_ring(phba, pring); /* Disable interrupt */ - lpfc_disable_intr(phba); + lpfc_sli_disable_intr(phba); /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } /** - * lpfc_io_slot_reset - Restart a PCI device from scratch + * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. * @pdev: pointer to PCI device. * - * This routine is registered to the PCI subsystem for error handling. This is - * called after PCI bus has been reset to restart the PCI card from scratch, - * as if from a cold-boot. During the PCI subsystem error recovery, after the - * driver returns PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform - * proper error recovery and then call this routine before calling the .resume - * method to recover the device. This function will initialize the HBA device, - * enable the interrupt, but it will just put the HBA to offline state without - * passing any I/O traffic. + * This routine is called from the PCI subsystem for error handling to + * device with SLI-3 interface spec. This is called after PCI bus has been + * reset to restart the PCI card from scratch, as if from a cold-boot. + * During the PCI subsystem error recovery, after driver returns + * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error + * recovery and then call this routine before calling the .resume method + * to recover the device. This function will initialize the HBA device, + * enable the interrupt, but it will just put the HBA to offline state + * without passing any I/O traffic. * * Return codes - * PCI_ERS_RESULT_RECOVERED - the device has been recovered - * PCI_ERS_RESULT_DISCONNECT - device could not be recovered + * PCI_ERS_RESULT_RECOVERED - the device has been recovered + * PCI_ERS_RESULT_DISCONNECT - device could not be recovered */ -static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) +static pci_ers_result_t +lpfc_io_slot_reset_s3(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; @@ -3354,11 +4084,11 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) pci_set_master(pdev); spin_lock_irq(&phba->hbalock); - psli->sli_flag &= ~LPFC_SLI2_ACTIVE; + psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); /* Configure and enable interrupt */ - intr_mode = lpfc_enable_intr(phba, phba->intr_mode); + intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0427 Cannot re-enable interrupt after " @@ -3378,15 +4108,17 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) } /** - * lpfc_io_resume - Resume PCI I/O operation + * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. * @pdev: pointer to PCI device * - * This routine is registered to the PCI subsystem for error handling. It is - * called when kernel error recovery tells the lpfc driver that it is ok to - * resume normal PCI operation after PCI bus error recovery. After this call, - * traffic can start to flow from this device again. + * This routine is called from the PCI subsystem for error handling to device + * with SLI-3 interface spec. It is called when kernel error recovery tells + * the lpfc driver that it is ok to resume normal PCI operation after PCI bus + * error recovery. After this call, traffic can start to flow from this device + * again. */ -static void lpfc_io_resume(struct pci_dev *pdev) +static void +lpfc_io_resume_s3(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; @@ -3394,6 +4126,235 @@ static void lpfc_io_resume(struct pci_dev *pdev) lpfc_online(phba); } +/** + * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem + * @pdev: pointer to PCI device + * @pid: pointer to PCI device identifier + * + * This routine is to be registered to the kernel's PCI subsystem. When an + * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks + * at PCI device-specific information of the device and driver to see if the + * driver state that it can support this kind of device. If the match is + * successful, the driver core invokes this routine. This routine dispatches + * the action to the proper SLI-3 or SLI-4 device probing routine, which will + * do all the initialization that it needs to do to handle the HBA device + * properly. + * + * Return code + * 0 - driver can claim the device + * negative value - driver can not claim the device + **/ +static int __devinit +lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) +{ + int rc; + uint16_t dev_id; + + if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id)) + return -ENODEV; + + switch (dev_id) { + default: + rc = lpfc_pci_probe_one_s3(pdev, pid); + break; + } + return rc; +} + +/** + * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem + * @pdev: pointer to PCI device + * + * This routine is to be registered to the kernel's PCI subsystem. When an + * Emulex HBA is removed from PCI bus, the driver core invokes this routine. + * This routine dispatches the action to the proper SLI-3 or SLI-4 device + * remove routine, which will perform all the necessary cleanup for the + * device to be removed from the PCI subsystem properly. + **/ +static void __devexit +lpfc_pci_remove_one(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + switch (phba->pci_dev_grp) { + case LPFC_PCI_DEV_LP: + lpfc_pci_remove_one_s3(pdev); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1424 Invalid PCI device group: 0x%x\n", + phba->pci_dev_grp); + break; + } + return; +} + +/** + * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management + * @pdev: pointer to PCI device + * @msg: power management message + * + * This routine is to be registered to the kernel's PCI subsystem to support + * system Power Management (PM). When PM invokes this method, it dispatches + * the action to the proper SLI-3 or SLI-4 device suspend routine, which will + * suspend the device. + * + * Return code + * 0 - driver suspended the device + * Error otherwise + **/ +static int +lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + int rc = -ENODEV; + + switch (phba->pci_dev_grp) { + case LPFC_PCI_DEV_LP: + rc = lpfc_pci_suspend_one_s3(pdev, msg); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1425 Invalid PCI device group: 0x%x\n", + phba->pci_dev_grp); + break; + } + return rc; +} + +/** + * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management + * @pdev: pointer to PCI device + * + * This routine is to be registered to the kernel's PCI subsystem to support + * system Power Management (PM). When PM invokes this method, it dispatches + * the action to the proper SLI-3 or SLI-4 device resume routine, which will + * resume the device. + * + * Return code + * 0 - driver suspended the device + * Error otherwise + **/ +static int +lpfc_pci_resume_one(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + int rc = -ENODEV; + + switch (phba->pci_dev_grp) { + case LPFC_PCI_DEV_LP: + rc = lpfc_pci_resume_one_s3(pdev); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1426 Invalid PCI device group: 0x%x\n", + phba->pci_dev_grp); + break; + } + return rc; +} + +/** + * lpfc_io_error_detected - lpfc method for handling PCI I/O error + * @pdev: pointer to PCI device. + * @state: the current PCI connection state. + * + * This routine is registered to the PCI subsystem for error handling. This + * function is called by the PCI subsystem after a PCI bus error affecting + * this device has been detected. When this routine is invoked, it dispatches + * the action to the proper SLI-3 or SLI-4 device error detected handling + * routine, which will perform the proper error detected operation. + * + * Return codes + * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery + * PCI_ERS_RESULT_DISCONNECT - device could not be recovered + **/ +static pci_ers_result_t +lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; + + switch (phba->pci_dev_grp) { + case LPFC_PCI_DEV_LP: + rc = lpfc_io_error_detected_s3(pdev, state); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1427 Invalid PCI device group: 0x%x\n", + phba->pci_dev_grp); + break; + } + return rc; +} + +/** + * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch + * @pdev: pointer to PCI device. + * + * This routine is registered to the PCI subsystem for error handling. This + * function is called after PCI bus has been reset to restart the PCI card + * from scratch, as if from a cold-boot. When this routine is invoked, it + * dispatches the action to the proper SLI-3 or SLI-4 device reset handling + * routine, which will perform the proper device reset. + * + * Return codes + * PCI_ERS_RESULT_RECOVERED - the device has been recovered + * PCI_ERS_RESULT_DISCONNECT - device could not be recovered + **/ +static pci_ers_result_t +lpfc_io_slot_reset(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; + + switch (phba->pci_dev_grp) { + case LPFC_PCI_DEV_LP: + rc = lpfc_io_slot_reset_s3(pdev); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1428 Invalid PCI device group: 0x%x\n", + phba->pci_dev_grp); + break; + } + return rc; +} + +/** + * lpfc_io_resume - lpfc method for resuming PCI I/O operation + * @pdev: pointer to PCI device + * + * This routine is registered to the PCI subsystem for error handling. It + * is called when kernel error recovery tells the lpfc driver that it is + * OK to resume normal PCI operation after PCI bus error recovery. When + * this routine is invoked, it dispatches the action to the proper SLI-3 + * or SLI-4 device io_resume routine, which will resume the device operation. + **/ +static void +lpfc_io_resume(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + switch (phba->pci_dev_grp) { + case LPFC_PCI_DEV_LP: + lpfc_io_resume_s3(pdev); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1429 Invalid PCI device group: 0x%x\n", + phba->pci_dev_grp); + break; + } + return; +} + static struct pci_device_id lpfc_id_table[] = { {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, PCI_ANY_ID, PCI_ANY_ID, }, @@ -3469,6 +4430,10 @@ static struct pci_device_id lpfc_id_table[] = { PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S, + PCI_ANY_ID, PCI_ANY_ID, }, { 0 } }; @@ -3486,7 +4451,7 @@ static struct pci_driver lpfc_driver = { .probe = lpfc_pci_probe_one, .remove = __devexit_p(lpfc_pci_remove_one), .suspend = lpfc_pci_suspend_one, - .resume = lpfc_pci_resume_one, + .resume = lpfc_pci_resume_one, .err_handler = &lpfc_err_handler, }; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 167b66dd34c7..a226c053c0f4 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -438,22 +438,23 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba) } /** - * lpfc_new_scsi_buf - Scsi buffer allocator + * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec * @vport: The virtual port for which this call being executed. + * @num_to_allocate: The requested number of buffers to allocate. * - * This routine allocates a scsi buffer, which contains all the necessary - * information needed to initiate a SCSI I/O. The non-DMAable buffer region - * contains information to build the IOCB. The DMAable region contains - * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to - * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL - * and the BPL BDE is setup in the IOCB. + * This routine allocates a scsi buffer for device with SLI-3 interface spec, + * the scsi buffer contains all the necessary information needed to initiate + * a SCSI I/O. The non-DMAable buffer region contains information to build + * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP, + * and the initial BPL. In addition to allocating memory, the FCP CMND and + * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB. * * Return codes: - * NULL - Error - * Pointer to lpfc_scsi_buf data structure - Success + * int - number of scsi buffers that were allocated. + * 0 = failure, less than num_to_alloc is a partial failure. **/ -static struct lpfc_scsi_buf * -lpfc_new_scsi_buf(struct lpfc_vport *vport) +static int +lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) { struct lpfc_hba *phba = vport->phba; struct lpfc_scsi_buf *psb; @@ -463,107 +464,134 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport) dma_addr_t pdma_phys_fcp_rsp; dma_addr_t pdma_phys_bpl; uint16_t iotag; + int bcnt; - psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); - if (!psb) - return NULL; + for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { + psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); + if (!psb) + break; - /* - * Get memory from the pci pool to map the virt space to pci bus space - * for an I/O. The DMA buffer includes space for the struct fcp_cmnd, - * struct fcp_rsp and the number of bde's necessary to support the - * sg_tablesize. - */ - psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL, - &psb->dma_handle); - if (!psb->data) { - kfree(psb); - return NULL; - } - - /* Initialize virtual ptrs to dma_buf region. */ - memset(psb->data, 0, phba->cfg_sg_dma_buf_size); - - /* Allocate iotag for psb->cur_iocbq. */ - iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); - if (iotag == 0) { - pci_pool_free(phba->lpfc_scsi_dma_buf_pool, - psb->data, psb->dma_handle); - kfree (psb); - return NULL; - } - psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; - - psb->fcp_cmnd = psb->data; - psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); - psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + - sizeof(struct fcp_rsp); - - /* Initialize local short-hand pointers. */ - bpl = psb->fcp_bpl; - pdma_phys_fcp_cmd = psb->dma_handle; - pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); - pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) + + /* + * Get memory from the pci pool to map the virt space to pci + * bus space for an I/O. The DMA buffer includes space for the + * struct fcp_cmnd, struct fcp_rsp and the number of bde's + * necessary to support the sg_tablesize. + */ + psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, + GFP_KERNEL, &psb->dma_handle); + if (!psb->data) { + kfree(psb); + break; + } + + /* Initialize virtual ptrs to dma_buf region. */ + memset(psb->data, 0, phba->cfg_sg_dma_buf_size); + + /* Allocate iotag for psb->cur_iocbq. */ + iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); + if (iotag == 0) { + pci_pool_free(phba->lpfc_scsi_dma_buf_pool, + psb->data, psb->dma_handle); + kfree(psb); + break; + } + psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; + + psb->fcp_cmnd = psb->data; + psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); + psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp); - /* - * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg - * list bdes. Initialize the first two and leave the rest for - * queuecommand. - */ - bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); - bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); - bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); - bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; - bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); - - /* Setup the physical region for the FCP RSP */ - bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); - bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); - bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); - bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; - bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); + /* Initialize local short-hand pointers. */ + bpl = psb->fcp_bpl; + pdma_phys_fcp_cmd = psb->dma_handle; + pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); + pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp); + + /* + * The first two bdes are the FCP_CMD and FCP_RSP. The balance + * are sg list bdes. Initialize the first two and leave the + * rest for queuecommand. + */ + bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); + bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); + bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); + bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); + + /* Setup the physical region for the FCP RSP */ + bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); + bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); + bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); + bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); + + /* + * Since the IOCB for the FCP I/O is built into this + * lpfc_scsi_buf, initialize it with all known data now. + */ + iocb = &psb->cur_iocbq.iocb; + iocb->un.fcpi64.bdl.ulpIoTag32 = 0; + if ((phba->sli_rev == 3) && + !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { + /* fill in immediate fcp command BDE */ + iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; + iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); + iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, + unsli3.fcp_ext.icd); + iocb->un.fcpi64.bdl.addrHigh = 0; + iocb->ulpBdeCount = 0; + iocb->ulpLe = 0; + /* fill in responce BDE */ + iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = + BUFF_TYPE_BDE_64; + iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = + sizeof(struct fcp_rsp); + iocb->unsli3.fcp_ext.rbde.addrLow = + putPaddrLow(pdma_phys_fcp_rsp); + iocb->unsli3.fcp_ext.rbde.addrHigh = + putPaddrHigh(pdma_phys_fcp_rsp); + } else { + iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; + iocb->un.fcpi64.bdl.bdeSize = + (2 * sizeof(struct ulp_bde64)); + iocb->un.fcpi64.bdl.addrLow = + putPaddrLow(pdma_phys_bpl); + iocb->un.fcpi64.bdl.addrHigh = + putPaddrHigh(pdma_phys_bpl); + iocb->ulpBdeCount = 1; + iocb->ulpLe = 1; + } + iocb->ulpClass = CLASS3; + psb->status = IOSTAT_SUCCESS; - /* - * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, - * initialize it with all known data now. - */ - iocb = &psb->cur_iocbq.iocb; - iocb->un.fcpi64.bdl.ulpIoTag32 = 0; - if ((phba->sli_rev == 3) && - !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { - /* fill in immediate fcp command BDE */ - iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; - iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); - iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, - unsli3.fcp_ext.icd); - iocb->un.fcpi64.bdl.addrHigh = 0; - iocb->ulpBdeCount = 0; - iocb->ulpLe = 0; - /* fill in responce BDE */ - iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; - iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = - sizeof(struct fcp_rsp); - iocb->unsli3.fcp_ext.rbde.addrLow = - putPaddrLow(pdma_phys_fcp_rsp); - iocb->unsli3.fcp_ext.rbde.addrHigh = - putPaddrHigh(pdma_phys_fcp_rsp); - } else { - iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; - iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); - iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl); - iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl); - iocb->ulpBdeCount = 1; - iocb->ulpLe = 1; } - iocb->ulpClass = CLASS3; - return psb; + return bcnt; } /** - * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list list of Hba - * @phba: The Hba for which this call is being executed. + * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator + * @vport: The virtual port for which this call being executed. + * @num_to_allocate: The requested number of buffers to allocate. + * + * This routine wraps the actual SCSI buffer allocator function pointer from + * the lpfc_hba struct. + * + * Return codes: + * int - number of scsi buffers that were allocated. + * 0 = failure, less than num_to_alloc is a partial failure. + **/ +static inline int +lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc) +{ + return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc); +} + +/** + * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA + * @phba: The HBA for which this call is being executed. * * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list * and returns to caller. @@ -591,7 +619,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba) } /** - * lpfc_release_scsi_buf - Return a scsi buffer back to hba's lpfc_scsi_buf_list + * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list * @phba: The Hba for which this call is being executed. * @psb: The scsi buffer which is being released. * @@ -599,7 +627,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba) * lpfc_scsi_buf_list list. **/ static void -lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) +lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) { unsigned long iflag = 0; @@ -610,21 +638,36 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) } /** - * lpfc_scsi_prep_dma_buf - Routine to do DMA mapping for scsi buffer + * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. + * @phba: The Hba for which this call is being executed. + * @psb: The scsi buffer which is being released. + * + * This routine releases @psb scsi buffer by adding it to tail of @phba + * lpfc_scsi_buf_list list. + **/ +static void +lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) +{ + + phba->lpfc_release_scsi_buf(phba, psb); +} + +/** + * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec * @phba: The Hba for which this call is being executed. * @lpfc_cmd: The scsi buffer which is going to be mapped. * * This routine does the pci dma mapping for scatter-gather list of scsi cmnd - * field of @lpfc_cmd. This routine scans through sg elements and format the - * bdea. This routine also initializes all IOCB fields which are dependent on - * scsi command request buffer. + * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans + * through sg elements and format the bdea. This routine also initializes all + * IOCB fields which are dependent on scsi command request buffer. * * Return codes: * 1 - Error * 0 - Success **/ static int -lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) +lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) { struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; struct scatterlist *sgel = NULL; @@ -1411,6 +1454,24 @@ out: return ret; } +/** + * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer + * @phba: The Hba for which this call is being executed. + * @lpfc_cmd: The scsi buffer which is going to be mapped. + * + * This routine wraps the actual DMA mapping function pointer from the + * lpfc_hba struct. + * + * Return codes: + * 1 - Error + * 0 - Success + **/ +static inline int +lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) +{ + return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); +} + /** * lpfc_send_scsi_error_event - Posts an event when there is SCSI error * @phba: Pointer to hba context object. @@ -1504,15 +1565,15 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, } /** - * lpfc_scsi_unprep_dma_buf - Routine to un-map DMA mapping of scatter gather - * @phba: The Hba for which this call is being executed. + * lpfc_scsi_unprep_dma_buf_s3 - Un-map DMA mapping of SG-list for SLI3 dev + * @phba: The HBA for which this call is being executed. * @psb: The scsi buffer which is going to be un-mapped. * * This routine does DMA un-mapping of scatter gather list of scsi command - * field of @lpfc_cmd. + * field of @lpfc_cmd for device with SLI-3 interface spec. **/ static void -lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) +lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) { /* * There are only two special cases to consider. (1) the scsi command @@ -1528,6 +1589,20 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) psb->pCmd->sc_data_direction); } +/** + * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list + * @phba: The Hba for which this call is being executed. + * @psb: The scsi buffer which is going to be un-mapped. + * + * This routine does DMA un-mapping of scatter gather list of scsi command + * field of @lpfc_cmd for device with SLI-4 interface spec. + **/ +static void +lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) +{ + phba->lpfc_scsi_unprep_dma_buf(phba, psb); +} + /** * lpfc_handler_fcp_err - FCP response handler * @vport: The virtual port for which this call is being executed. @@ -1676,7 +1751,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine * @phba: The Hba for which this call is being executed. * @pIocbIn: The command IOCBQ for the scsi cmnd. - * @pIocbOut: The response IOCBQ for the scsi cmnd . + * @pIocbOut: The response IOCBQ for the scsi cmnd. * * This routine assigns scsi command result by looking into response IOCB * status field appropriately. This routine handles QUEUE FULL condition as @@ -1957,16 +2032,16 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) } /** - * lpfc_scsi_prep_cmnd - Routine to convert scsi cmnd to FCP information unit + * lpfc_scsi_prep_cmnd_s3 - Convert scsi cmnd to FCP infor unit for SLI3 dev * @vport: The virtual port for which this call is being executed. * @lpfc_cmd: The scsi command which needs to send. * @pnode: Pointer to lpfc_nodelist. * * This routine initializes fcp_cmnd and iocb data structure from scsi command - * to transfer. + * to transfer for device with SLI3 interface spec. **/ static void -lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, +lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_nodelist *pnode) { struct lpfc_hba *phba = vport->phba; @@ -2013,8 +2088,11 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, if (scsi_sg_count(scsi_cmnd)) { if (datadir == DMA_TO_DEVICE) { iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; - iocb_cmd->un.fcpi.fcpi_parm = 0; - iocb_cmd->ulpPU = 0; + if (phba->sli_rev < LPFC_SLI_REV4) { + iocb_cmd->un.fcpi.fcpi_parm = 0; + iocb_cmd->ulpPU = 0; + } else + iocb_cmd->ulpPU = PARM_READ_CHECK; fcp_cmnd->fcpCntl3 = WRITE_DATA; phba->fc4OutputRequests++; } else { @@ -2051,20 +2129,37 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, } /** - * lpfc_scsi_prep_task_mgmt_cmnd - Convert scsi TM cmnd to FCP information unit + * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit + * @vport: The virtual port for which this call is being executed. + * @lpfc_cmd: The scsi command which needs to send. + * @pnode: Pointer to lpfc_nodelist. + * + * This routine wraps the actual convert SCSI cmnd function pointer from + * the lpfc_hba struct. + **/ +static inline void +lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, + struct lpfc_nodelist *pnode) +{ + vport->phba->lpfc_scsi_prep_cmnd(vport, lpfc_cmd, pnode); +} + +/** + * lpfc_scsi_prep_task_mgmt_cmnd_s3 - Convert SLI3 scsi TM cmd to FCP info unit * @vport: The virtual port for which this call is being executed. * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. * @lun: Logical unit number. * @task_mgmt_cmd: SCSI task management command. * - * This routine creates FCP information unit corresponding to @task_mgmt_cmd. + * This routine creates FCP information unit corresponding to @task_mgmt_cmd + * for device with SLI-3 interface spec. * * Return codes: * 0 - Error * 1 - Success **/ static int -lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, +lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, unsigned int lun, uint8_t task_mgmt_cmd) @@ -2113,6 +2208,67 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, return 1; } +/** + * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info + * @vport: The virtual port for which this call is being executed. + * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. + * @lun: Logical unit number. + * @task_mgmt_cmd: SCSI task management command. + * + * This routine wraps the actual convert SCSI TM to FCP information unit + * function pointer from the lpfc_hba struct. + * + * Return codes: + * 0 - Error + * 1 - Success + **/ +static inline int +lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, + struct lpfc_scsi_buf *lpfc_cmd, + unsigned int lun, + uint8_t task_mgmt_cmd) +{ + struct lpfc_hba *phba = vport->phba; + + return phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun, + task_mgmt_cmd); +} + +/** + * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table + * @phba: The hba struct for which this call is being executed. + * @dev_grp: The HBA PCI-Device group number. + * + * This routine sets up the SCSI interface API function jump table in @phba + * struct. + * Returns: 0 - success, -ENODEV - failure. + **/ +int +lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) +{ + + switch (dev_grp) { + case LPFC_PCI_DEV_LP: + phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3; + phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; + phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s3; + phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s3; + phba->lpfc_scsi_prep_task_mgmt_cmd = + lpfc_scsi_prep_task_mgmt_cmd_s3; + phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1418 Invalid HBA PCI-device group: 0x%x\n", + dev_grp); + return -ENODEV; + break; + } + phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf; + phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; + return 0; +} + /** * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command * @phba: The Hba for which this call is being executed. @@ -2178,9 +2334,8 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport, lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0702 Issue Target Reset to TGT %d Data: x%x x%x\n", tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); - status = lpfc_sli_issue_iocb_wait(phba, - &phba->sli.ring[phba->sli.fcp_ring], - iocbq, iocbqrsp, lpfc_cmd->timeout); + status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, + iocbq, iocbqrsp, lpfc_cmd->timeout); if (status != IOCB_SUCCESS) { if (status == IOCB_TIMEDOUT) { iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; @@ -2305,7 +2460,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) struct Scsi_Host *shost = cmnd->device->host; struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - struct lpfc_sli *psli = &phba->sli; struct lpfc_rport_data *rdata = cmnd->device->hostdata; struct lpfc_nodelist *ndlp = rdata->pnode; struct lpfc_scsi_buf *lpfc_cmd; @@ -2427,7 +2581,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); atomic_inc(&ndlp->cmd_pending); - err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], + err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); if (err) { atomic_dec(&ndlp->cmd_pending); @@ -2490,7 +2644,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) struct Scsi_Host *shost = cmnd->device->host; struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring]; struct lpfc_iocbq *iocb; struct lpfc_iocbq *abtsiocb; struct lpfc_scsi_buf *lpfc_cmd; @@ -2531,7 +2684,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) icmd = &abtsiocb->iocb; icmd->un.acxri.abortType = ABORT_TYPE_ABTS; icmd->un.acxri.abortContextTag = cmd->ulpContext; - icmd->un.acxri.abortIoTag = cmd->ulpIoTag; + if (phba->sli_rev == LPFC_SLI_REV4) + icmd->un.acxri.abortIoTag = iocb->sli4_xritag; + else + icmd->un.acxri.abortIoTag = cmd->ulpIoTag; icmd->ulpLe = 1; icmd->ulpClass = cmd->ulpClass; @@ -2542,7 +2698,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; abtsiocb->vport = vport; - if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) == + IOCB_ERROR) { lpfc_sli_release_iocbq(phba, abtsiocb); ret = FAILED; goto out; @@ -2668,8 +2825,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) "0703 Issue target reset to TGT %d LUN %d " "rpi x%x nlp_flag x%x\n", cmnd->device->id, cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); - status = lpfc_sli_issue_iocb_wait(phba, - &phba->sli.ring[phba->sli.fcp_ring], + status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, iocbq, iocbqrsp, lpfc_cmd->timeout); if (status == IOCB_TIMEDOUT) { iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; @@ -2825,11 +2981,10 @@ lpfc_slave_alloc(struct scsi_device *sdev) { struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; struct lpfc_hba *phba = vport->phba; - struct lpfc_scsi_buf *scsi_buf = NULL; struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); - uint32_t total = 0, i; + uint32_t total = 0; uint32_t num_to_alloc = 0; - unsigned long flags; + int num_allocated = 0; if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; @@ -2863,20 +3018,13 @@ lpfc_slave_alloc(struct scsi_device *sdev) (phba->cfg_hba_queue_depth - total)); num_to_alloc = phba->cfg_hba_queue_depth - total; } - - for (i = 0; i < num_to_alloc; i++) { - scsi_buf = lpfc_new_scsi_buf(vport); - if (!scsi_buf) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, - "0706 Failed to allocate " - "command buffer\n"); - break; - } - - spin_lock_irqsave(&phba->scsi_buf_list_lock, flags); - phba->total_scsi_bufs++; - list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags); + num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc); + if (num_to_alloc != num_allocated) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "0708 Allocation request of %d " + "command buffers did not succeed. " + "Allocated %d buffers.\n", + num_to_alloc, num_allocated); } return 0; } diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index eb5c75c45ba4..e2d07d97fa8b 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -142,7 +142,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba) } /** - * __lpfc_sli_release_iocbq - Release iocb to the iocb pool + * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool * @phba: Pointer to HBA context object. * @iocbq: Pointer to driver iocb object. * @@ -152,7 +152,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba) * clears all other fields of the iocb object when it is freed. **/ static void -__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) +__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) { size_t start_clean = offsetof(struct lpfc_iocbq, iocb); @@ -160,9 +160,26 @@ __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) * Clean all volatile data fields, preserve iotag and node struct. */ memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); + iocbq->sli4_xritag = NO_XRI; list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); } +/** + * __lpfc_sli_release_iocbq - Release iocb to the iocb pool + * @phba: Pointer to HBA context object. + * @iocbq: Pointer to driver iocb object. + * + * This function is called with hbalock held to release driver + * iocb object to the iocb pool. The iotag in the iocb object + * does not change for each use of the iocb object. This function + * clears all other fields of the iocb object when it is freed. + **/ +static void +__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) +{ + phba->__lpfc_sli_release_iocbq(phba, iocbq); +} + /** * lpfc_sli_release_iocbq - Release iocb to the iocb pool * @phba: Pointer to HBA context object. @@ -779,8 +796,8 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) phba->hbqs[i].buffer_count = 0; } /* Return all HBQ buffer that are in-fly */ - list_for_each_entry_safe(dmabuf, next_dmabuf, - &phba->hbqbuf_in_list, list) { + list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list, + list) { hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); list_del(&hbq_buf->dbuf.list); if (hbq_buf->tag == -1) { @@ -814,9 +831,27 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) * pointer to the hbq entry if it successfully post the buffer * else it will return NULL. **/ -static struct lpfc_hbq_entry * +static int lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, struct hbq_dmabuf *hbq_buf) +{ + return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); +} + +/** + * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware + * @phba: Pointer to HBA context object. + * @hbqno: HBQ number. + * @hbq_buf: Pointer to HBQ buffer. + * + * This function is called with the hbalock held to post a hbq buffer to the + * firmware. If the function finds an empty slot in the HBQ, it will post the + * buffer and place it on the hbq_buffer_list. The function will return zero if + * it successfully post the buffer else it will return an error. + **/ +static int +lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, + struct hbq_dmabuf *hbq_buf) { struct lpfc_hbq_entry *hbqe; dma_addr_t physaddr = hbq_buf->dbuf.phys; @@ -838,8 +873,9 @@ lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, /* flush */ readl(phba->hbq_put + hbqno); list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); - } - return hbqe; + return 0; + } else + return -ENOMEM; } /* HBQ for ELS and CT traffic. */ @@ -914,7 +950,7 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) dbuf.list); hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | (hbqno << 16)); - if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { + if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { phba->hbqs[hbqno].buffer_count++; posted++; } else @@ -964,6 +1000,25 @@ lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) lpfc_hbq_defs[qno]->init_count)); } +/** + * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list + * @phba: Pointer to HBA context object. + * @hbqno: HBQ number. + * + * This function removes the first hbq buffer on an hbq list and returns a + * pointer to that buffer. If it finds no buffers on the list it returns NULL. + **/ +static struct hbq_dmabuf * +lpfc_sli_hbqbuf_get(struct list_head *rb_list) +{ + struct lpfc_dmabuf *d_buf; + + list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); + if (!d_buf) + return NULL; + return container_of(d_buf, struct hbq_dmabuf, dbuf); +} + /** * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag * @phba: Pointer to HBA context object. @@ -985,12 +1040,15 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) if (hbqno >= LPFC_MAX_HBQS) return NULL; + spin_lock_irq(&phba->hbalock); list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); if (hbq_buf->tag == tag) { + spin_unlock_irq(&phba->hbalock); return hbq_buf; } } + spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, "1803 Bad hbq tag. Data: x%x x%x\n", tag, phba->hbqs[tag >> 16].buffer_count); @@ -1013,9 +1071,8 @@ lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) if (hbq_buffer) { hbqno = hbq_buffer->tag >> 16; - if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { + if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); - } } } @@ -1317,6 +1374,45 @@ lpfc_sli_get_buff(struct lpfc_hba *phba, return &hbq_entry->dbuf; } +/** + * lpfc_complete_unsol_iocb - Complete an unsolicited sequence + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @saveq: Pointer to the iocbq struct representing the sequence starting frame. + * @fch_r_ctl: the r_ctl for the first frame of the sequence. + * @fch_type: the type for the first frame of the sequence. + * + * This function is called with no lock held. This function uses the r_ctl and + * type of the received sequence to find the correct callback function to call + * to process the sequence. + **/ +static int +lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, + uint32_t fch_type) +{ + int i; + + /* unSolicited Responses */ + if (pring->prt[0].profile) { + if (pring->prt[0].lpfc_sli_rcv_unsol_event) + (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, + saveq); + return 1; + } + /* We must search, based on rctl / type + for the right routine */ + for (i = 0; i < pring->num_mask; i++) { + if ((pring->prt[i].rctl == fch_r_ctl) && + (pring->prt[i].type == fch_type)) { + if (pring->prt[i].lpfc_sli_rcv_unsol_event) + (pring->prt[i].lpfc_sli_rcv_unsol_event) + (phba, pring, saveq); + return 1; + } + } + return 0; +} /** * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler @@ -1339,7 +1435,7 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, IOCB_t * irsp; WORD5 * w5p; uint32_t Rctl, Type; - uint32_t match, i; + uint32_t match; struct lpfc_iocbq *iocbq; struct lpfc_dmabuf *dmzbuf; @@ -1482,35 +1578,12 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } } - /* unSolicited Responses */ - if (pring->prt[0].profile) { - if (pring->prt[0].lpfc_sli_rcv_unsol_event) - (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, - saveq); - match = 1; - } else { - /* We must search, based on rctl / type - for the right routine */ - for (i = 0; i < pring->num_mask; i++) { - if ((pring->prt[i].rctl == Rctl) - && (pring->prt[i].type == Type)) { - if (pring->prt[i].lpfc_sli_rcv_unsol_event) - (pring->prt[i].lpfc_sli_rcv_unsol_event) - (phba, pring, saveq); - match = 1; - break; - } - } - } - if (match == 0) { - /* Unexpected Rctl / Type received */ - /* Ring handler: unexpected - Rctl Type received */ + if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0313 Ring %d handler: unexpected Rctl x%x " "Type x%x received\n", pring->ringno, Rctl, Type); - } + return 1; } @@ -1551,6 +1624,37 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, return NULL; } +/** + * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @iotag: IOCB tag. + * + * This function looks up the iocb_lookup table to get the command iocb + * corresponding to the given iotag. This function is called with the + * hbalock held. + * This function returns the command iocb object if it finds the command + * iocb else returns NULL. + **/ +static struct lpfc_iocbq * +lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, uint16_t iotag) +{ + struct lpfc_iocbq *cmd_iocb; + + if (iotag != 0 && iotag <= phba->sli.last_iotag) { + cmd_iocb = phba->sli.iocbq_lookup[iotag]; + list_del_init(&cmd_iocb->list); + pring->txcmplq_cnt--; + return cmd_iocb; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0372 iotag x%x is out off range: max iotag (x%x)\n", + iotag, phba->sli.last_iotag); + return NULL; +} + /** * lpfc_sli_process_sol_iocb - process solicited iocb completion * @phba: Pointer to HBA context object. @@ -1954,7 +2058,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { spin_unlock_irqrestore(&phba->hbalock, iflag); - lpfc_rampdown_queue_depth(phba); + phba->lpfc_rampdown_queue_depth(phba); spin_lock_irqsave(&phba->hbalock, iflag); } @@ -2068,39 +2172,215 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, } /** - * lpfc_sli_handle_slow_ring_event - Handle ring events for non-FCP rings + * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @rspiocbp: Pointer to driver response IOCB object. + * + * This function is called from the worker thread when there is a slow-path + * response IOCB to process. This function chains all the response iocbs until + * seeing the iocb with the LE bit set. The function will call + * lpfc_sli_process_sol_iocb function if the response iocb indicates a + * completion of a command iocb. The function will call the + * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. + * The function frees the resources or calls the completion handler if this + * iocb is an abort completion. The function returns NULL when the response + * iocb has the LE bit set and all the chained iocbs are processed, otherwise + * this function shall chain the iocb on to the iocb_continueq and return the + * response iocb passed in. + **/ +static struct lpfc_iocbq * +lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *rspiocbp) +{ + struct lpfc_iocbq *saveq; + struct lpfc_iocbq *cmdiocbp; + struct lpfc_iocbq *next_iocb; + IOCB_t *irsp = NULL; + uint32_t free_saveq; + uint8_t iocb_cmd_type; + lpfc_iocb_type type; + unsigned long iflag; + int rc; + + spin_lock_irqsave(&phba->hbalock, iflag); + /* First add the response iocb to the countinueq list */ + list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); + pring->iocb_continueq_cnt++; + + /* Now, determine whetehr the list is completed for processing */ + irsp = &rspiocbp->iocb; + if (irsp->ulpLe) { + /* + * By default, the driver expects to free all resources + * associated with this iocb completion. + */ + free_saveq = 1; + saveq = list_get_first(&pring->iocb_continueq, + struct lpfc_iocbq, list); + irsp = &(saveq->iocb); + list_del_init(&pring->iocb_continueq); + pring->iocb_continueq_cnt = 0; + + pring->stats.iocb_rsp++; + + /* + * If resource errors reported from HBA, reduce + * queuedepths of the SCSI device. + */ + if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && + (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + phba->lpfc_rampdown_queue_depth(phba); + spin_lock_irqsave(&phba->hbalock, iflag); + } + + if (irsp->ulpStatus) { + /* Rsp ring error: IOCB */ + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0328 Rsp Ring %d error: " + "IOCB Data: " + "x%x x%x x%x x%x " + "x%x x%x x%x x%x " + "x%x x%x x%x x%x " + "x%x x%x x%x x%x\n", + pring->ringno, + irsp->un.ulpWord[0], + irsp->un.ulpWord[1], + irsp->un.ulpWord[2], + irsp->un.ulpWord[3], + irsp->un.ulpWord[4], + irsp->un.ulpWord[5], + *(((uint32_t *) irsp) + 6), + *(((uint32_t *) irsp) + 7), + *(((uint32_t *) irsp) + 8), + *(((uint32_t *) irsp) + 9), + *(((uint32_t *) irsp) + 10), + *(((uint32_t *) irsp) + 11), + *(((uint32_t *) irsp) + 12), + *(((uint32_t *) irsp) + 13), + *(((uint32_t *) irsp) + 14), + *(((uint32_t *) irsp) + 15)); + } + + /* + * Fetch the IOCB command type and call the correct completion + * routine. Solicited and Unsolicited IOCBs on the ELS ring + * get freed back to the lpfc_iocb_list by the discovery + * kernel thread. + */ + iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; + type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); + switch (type) { + case LPFC_SOL_IOCB: + spin_unlock_irqrestore(&phba->hbalock, iflag); + rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); + spin_lock_irqsave(&phba->hbalock, iflag); + break; + + case LPFC_UNSOL_IOCB: + spin_unlock_irqrestore(&phba->hbalock, iflag); + rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); + spin_lock_irqsave(&phba->hbalock, iflag); + if (!rc) + free_saveq = 0; + break; + + case LPFC_ABORT_IOCB: + cmdiocbp = NULL; + if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) + cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, + saveq); + if (cmdiocbp) { + /* Call the specified completion routine */ + if (cmdiocbp->iocb_cmpl) { + spin_unlock_irqrestore(&phba->hbalock, + iflag); + (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, + saveq); + spin_lock_irqsave(&phba->hbalock, + iflag); + } else + __lpfc_sli_release_iocbq(phba, + cmdiocbp); + } + break; + + case LPFC_UNKNOWN_IOCB: + if (irsp->ulpCommand == CMD_ADAPTER_MSG) { + char adaptermsg[LPFC_MAX_ADPTMSG]; + memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); + memcpy(&adaptermsg[0], (uint8_t *)irsp, + MAX_MSG_DATA); + dev_warn(&((phba->pcidev)->dev), + "lpfc%d: %s\n", + phba->brd_no, adaptermsg); + } else { + /* Unknown IOCB command */ + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0335 Unknown IOCB " + "command Data: x%x " + "x%x x%x x%x\n", + irsp->ulpCommand, + irsp->ulpStatus, + irsp->ulpIoTag, + irsp->ulpContext); + } + break; + } + + if (free_saveq) { + list_for_each_entry_safe(rspiocbp, next_iocb, + &saveq->list, list) { + list_del(&rspiocbp->list); + __lpfc_sli_release_iocbq(phba, rspiocbp); + } + __lpfc_sli_release_iocbq(phba, saveq); + } + rspiocbp = NULL; + } + spin_unlock_irqrestore(&phba->hbalock, iflag); + return rspiocbp; +} + +/** + * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * @mask: Host attention register mask for this ring. * - * This function is called from the worker thread when there is a ring - * event for non-fcp rings. The caller does not hold any lock . - * The function processes each response iocb in the response ring until it - * finds an iocb with LE bit set and chains all the iocbs upto the iocb with - * LE bit set. The function will call lpfc_sli_process_sol_iocb function if the - * response iocb indicates a completion of a command iocb. The function - * will call lpfc_sli_process_unsol_iocb function if this is an unsolicited - * iocb. The function frees the resources or calls the completion handler if - * this iocb is an abort completion. The function returns 0 when the allocated - * iocbs are not freed, otherwise returns 1. + * This routine wraps the actual slow_ring event process routine from the + * API jump table function pointer from the lpfc_hba struct. **/ -int +void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, uint32_t mask) +{ + phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); +} + +/** + * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @mask: Host attention register mask for this ring. + * + * This function is called from the worker thread when there is a ring event + * for non-fcp rings. The caller does not hold any lock. The function will + * remove each response iocb in the response ring and calls the handle + * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. + **/ +static void +lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, uint32_t mask) { struct lpfc_pgp *pgp; IOCB_t *entry; IOCB_t *irsp = NULL; struct lpfc_iocbq *rspiocbp = NULL; - struct lpfc_iocbq *next_iocb; - struct lpfc_iocbq *cmdiocbp; - struct lpfc_iocbq *saveq; - uint8_t iocb_cmd_type; - lpfc_iocb_type type; - uint32_t status, free_saveq; uint32_t portRspPut, portRspMax; - int rc = 1; unsigned long iflag; + uint32_t status; pgp = &phba->port_gp[pring->ringno]; spin_lock_irqsave(&phba->hbalock, iflag); @@ -2128,7 +2408,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, phba->work_hs = HS_FFER3; lpfc_handle_eratt(phba); - return 1; + return; } rmb(); @@ -2173,138 +2453,10 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); - list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); - - pring->iocb_continueq_cnt++; - if (irsp->ulpLe) { - /* - * By default, the driver expects to free all resources - * associated with this iocb completion. - */ - free_saveq = 1; - saveq = list_get_first(&pring->iocb_continueq, - struct lpfc_iocbq, list); - irsp = &(saveq->iocb); - list_del_init(&pring->iocb_continueq); - pring->iocb_continueq_cnt = 0; - - pring->stats.iocb_rsp++; - - /* - * If resource errors reported from HBA, reduce - * queuedepths of the SCSI device. - */ - if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && - (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { - spin_unlock_irqrestore(&phba->hbalock, iflag); - lpfc_rampdown_queue_depth(phba); - spin_lock_irqsave(&phba->hbalock, iflag); - } - - if (irsp->ulpStatus) { - /* Rsp ring error: IOCB */ - lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "0328 Rsp Ring %d error: " - "IOCB Data: " - "x%x x%x x%x x%x " - "x%x x%x x%x x%x " - "x%x x%x x%x x%x " - "x%x x%x x%x x%x\n", - pring->ringno, - irsp->un.ulpWord[0], - irsp->un.ulpWord[1], - irsp->un.ulpWord[2], - irsp->un.ulpWord[3], - irsp->un.ulpWord[4], - irsp->un.ulpWord[5], - *(((uint32_t *) irsp) + 6), - *(((uint32_t *) irsp) + 7), - *(((uint32_t *) irsp) + 8), - *(((uint32_t *) irsp) + 9), - *(((uint32_t *) irsp) + 10), - *(((uint32_t *) irsp) + 11), - *(((uint32_t *) irsp) + 12), - *(((uint32_t *) irsp) + 13), - *(((uint32_t *) irsp) + 14), - *(((uint32_t *) irsp) + 15)); - } - - /* - * Fetch the IOCB command type and call the correct - * completion routine. Solicited and Unsolicited - * IOCBs on the ELS ring get freed back to the - * lpfc_iocb_list by the discovery kernel thread. - */ - iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; - type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); - if (type == LPFC_SOL_IOCB) { - spin_unlock_irqrestore(&phba->hbalock, iflag); - rc = lpfc_sli_process_sol_iocb(phba, pring, - saveq); - spin_lock_irqsave(&phba->hbalock, iflag); - } else if (type == LPFC_UNSOL_IOCB) { - spin_unlock_irqrestore(&phba->hbalock, iflag); - rc = lpfc_sli_process_unsol_iocb(phba, pring, - saveq); - spin_lock_irqsave(&phba->hbalock, iflag); - if (!rc) - free_saveq = 0; - } else if (type == LPFC_ABORT_IOCB) { - if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) && - ((cmdiocbp = - lpfc_sli_iocbq_lookup(phba, pring, - saveq)))) { - /* Call the specified completion - routine */ - if (cmdiocbp->iocb_cmpl) { - spin_unlock_irqrestore( - &phba->hbalock, - iflag); - (cmdiocbp->iocb_cmpl) (phba, - cmdiocbp, saveq); - spin_lock_irqsave( - &phba->hbalock, - iflag); - } else - __lpfc_sli_release_iocbq(phba, - cmdiocbp); - } - } else if (type == LPFC_UNKNOWN_IOCB) { - if (irsp->ulpCommand == CMD_ADAPTER_MSG) { - - char adaptermsg[LPFC_MAX_ADPTMSG]; - - memset(adaptermsg, 0, - LPFC_MAX_ADPTMSG); - memcpy(&adaptermsg[0], (uint8_t *) irsp, - MAX_MSG_DATA); - dev_warn(&((phba->pcidev)->dev), - "lpfc%d: %s\n", - phba->brd_no, adaptermsg); - } else { - /* Unknown IOCB command */ - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0335 Unknown IOCB " - "command Data: x%x " - "x%x x%x x%x\n", - irsp->ulpCommand, - irsp->ulpStatus, - irsp->ulpIoTag, - irsp->ulpContext); - } - } - - if (free_saveq) { - list_for_each_entry_safe(rspiocbp, next_iocb, - &saveq->list, list) { - list_del(&rspiocbp->list); - __lpfc_sli_release_iocbq(phba, - rspiocbp); - } - __lpfc_sli_release_iocbq(phba, saveq); - } - rspiocbp = NULL; - } + spin_unlock_irqrestore(&phba->hbalock, iflag); + /* Handle the response IOCB */ + rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); + spin_lock_irqsave(&phba->hbalock, iflag); /* * If the port response put pointer has not been updated, sync @@ -2338,7 +2490,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, } spin_unlock_irqrestore(&phba->hbalock, iflag); - return rc; + return; } /** @@ -2420,7 +2572,7 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) } /** - * lpfc_sli_brdready - Check for host status bits + * lpfc_sli_brdready_s3 - Check for sli3 host ready status * @phba: Pointer to HBA context object. * @mask: Bit mask to be checked. * @@ -2432,8 +2584,8 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) * function returns 1 when HBA fail to restart otherwise returns * zero. **/ -int -lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) +static int +lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) { uint32_t status; int i = 0; @@ -2647,7 +2799,7 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) } /** - * lpfc_sli_brdreset - Reset the HBA + * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA * @phba: Pointer to HBA context object. * * This function resets the HBA by writing HC_INITFF to the control @@ -2683,7 +2835,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) (cfg_value & ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); - psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); + psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); + /* Now toggle INITFF bit in the Host Control Register */ writel(HC_INITFF, phba->HCregaddr); mdelay(1); @@ -3289,32 +3442,20 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "0345 Resetting board due to mailbox timeout\n"); - /* - * lpfc_offline calls lpfc_sli_hba_down which will clean up - * on oustanding mailbox commands. - */ - /* If resets are disabled then set error state and return. */ - if (!phba->cfg_enable_hba_reset) { - phba->link_state = LPFC_HBA_ERROR; - return; - } - lpfc_offline_prep(phba); - lpfc_offline(phba); - lpfc_sli_brdrestart(phba); - lpfc_online(phba); - lpfc_unblock_mgmt_io(phba); - return; + + /* Reset the HBA device */ + lpfc_reset_hba(phba); } /** - * lpfc_sli_issue_mbox - Issue a mailbox command to firmware + * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware * @phba: Pointer to HBA context object. * @pmbox: Pointer to mailbox object. * @flag: Flag indicating how the mailbox need to be processed. * * This function is called by discovery code and HBA management code - * to submit a mailbox command to firmware. This function gets the - * hbalock to protect the data structures. + * to submit a mailbox command to firmware with SLI-3 interface spec. This + * function gets the hbalock to protect the data structures. * The mailbox command can be submitted in polling mode, in which case * this function will wait in a polling loop for the completion of the * mailbox. @@ -3332,8 +3473,9 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) * return codes the caller owns the mailbox command after the return of * the function. **/ -int -lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) +static int +lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, + uint32_t flag) { MAILBOX_t *mb; struct lpfc_sli *psli = &phba->sli; @@ -3349,6 +3491,10 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) spin_lock_irqsave(&phba->hbalock, drvr_flag); if (!pmbox) { /* processing mbox queue from intr_handler */ + if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + return MBX_SUCCESS; + } processing_queue = 1; phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; pmbox = lpfc_mbox_get(phba); @@ -3365,7 +3511,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT, "1806 Mbox x%x failed. No vport\n", - pmbox->mb.mbxCommand); + pmbox->u.mb.mbxCommand); dump_stack(); goto out_not_finished; } @@ -3385,21 +3531,29 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) psli = &phba->sli; - mb = &pmbox->mb; + mb = &pmbox->u.mb; status = MBX_SUCCESS; if (phba->link_state == LPFC_HBA_ERROR) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command cannot issue */ - LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):0311 Mailbox command x%x cannot " + "issue Data: x%x x%x\n", + pmbox->vport ? pmbox->vport->vpi : 0, + pmbox->u.mb.mbxCommand, psli->sli_flag, flag); goto out_not_finished; } if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); - LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2528 Mailbox command x%x cannot " + "issue Data: x%x x%x\n", + pmbox->vport ? pmbox->vport->vpi : 0, + pmbox->u.mb.mbxCommand, psli->sli_flag, flag); goto out_not_finished; } @@ -3413,14 +3567,24 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command cannot issue */ - LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2529 Mailbox command x%x " + "cannot issue Data: x%x x%x\n", + pmbox->vport ? pmbox->vport->vpi : 0, + pmbox->u.mb.mbxCommand, + psli->sli_flag, flag); goto out_not_finished; } - if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { + if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command cannot issue */ - LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2530 Mailbox command x%x " + "cannot issue Data: x%x x%x\n", + pmbox->vport ? pmbox->vport->vpi : 0, + pmbox->u.mb.mbxCommand, + psli->sli_flag, flag); goto out_not_finished; } @@ -3462,12 +3626,17 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) /* If we are not polling, we MUST be in SLI2 mode */ if (flag != MBX_POLL) { - if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) && + if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && (mb->mbxCommand != MBX_KILL_BOARD)) { psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command cannot issue */ - LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2531 Mailbox command x%x " + "cannot issue Data: x%x x%x\n", + pmbox->vport ? pmbox->vport->vpi : 0, + pmbox->u.mb.mbxCommand, + psli->sli_flag, flag); goto out_not_finished; } /* timeout active mbox command */ @@ -3506,7 +3675,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) /* next set own bit for the adapter and copy over command word */ mb->mbxOwner = OWN_CHIP; - if (psli->sli_flag & LPFC_SLI2_ACTIVE) { + if (psli->sli_flag & LPFC_SLI_ACTIVE) { /* First copy command data to host SLIM area */ lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); } else { @@ -3529,7 +3698,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) if (mb->mbxCommand == MBX_CONFIG_PORT) { /* switch over to host mailbox */ - psli->sli_flag |= LPFC_SLI2_ACTIVE; + psli->sli_flag |= LPFC_SLI_ACTIVE; } } @@ -3552,7 +3721,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) writel(CA_MBATT, phba->CAregaddr); readl(phba->CAregaddr); /* flush */ - if (psli->sli_flag & LPFC_SLI2_ACTIVE) { + if (psli->sli_flag & LPFC_SLI_ACTIVE) { /* First read mbox status word */ word0 = *((uint32_t *)phba->mbox); word0 = le32_to_cpu(word0); @@ -3591,7 +3760,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) spin_lock_irqsave(&phba->hbalock, drvr_flag); } - if (psli->sli_flag & LPFC_SLI2_ACTIVE) { + if (psli->sli_flag & LPFC_SLI_ACTIVE) { /* First copy command data */ word0 = *((uint32_t *)phba->mbox); word0 = le32_to_cpu(word0); @@ -3604,7 +3773,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) if (((slimword0 & OWN_CHIP) != OWN_CHIP) && slimmb->mbxStatus) { psli->sli_flag &= - ~LPFC_SLI2_ACTIVE; + ~LPFC_SLI_ACTIVE; word0 = slimword0; } } @@ -3616,7 +3785,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) ha_copy = readl(phba->HAregaddr); } - if (psli->sli_flag & LPFC_SLI2_ACTIVE) { + if (psli->sli_flag & LPFC_SLI_ACTIVE) { /* copy results back to user */ lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); } else { @@ -3701,35 +3870,34 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } /** - * __lpfc_sli_issue_iocb - Lockless version of lpfc_sli_issue_iocb + * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb * @phba: Pointer to HBA context object. - * @pring: Pointer to driver SLI ring object. + * @ring_number: SLI ring number to issue iocb on. * @piocb: Pointer to command iocb. * @flag: Flag indicating if this command can be put into txq. * - * __lpfc_sli_issue_iocb is used by other functions in the driver - * to issue an iocb command to the HBA. If the PCI slot is recovering - * from error state or if HBA is resetting or if LPFC_STOP_IOCB_EVENT - * flag is turned on, the function returns IOCB_ERROR. - * When the link is down, this function allows only iocbs for - * posting buffers. - * This function finds next available slot in the command ring and - * posts the command to the available slot and writes the port - * attention register to request HBA start processing new iocb. - * If there is no slot available in the ring and - * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the - * txq, otherwise the function returns IOCB_BUSY. + * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue + * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is + * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT + * flag is turned on, the function returns IOCB_ERROR. When the link is down, + * this function allows only iocbs for posting buffers. This function finds + * next available slot in the command ring and posts the command to the + * available slot and writes the port attention register to request HBA start + * processing new iocb. If there is no slot available in the ring and + * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise + * the function returns IOCB_BUSY. * - * This function is called with hbalock held. - * The function will return success after it successfully submit the - * iocb to firmware or after adding to the txq. + * This function is called with hbalock held. The function will return success + * after it successfully submit the iocb to firmware or after adding to the + * txq. **/ static int -__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, +__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_iocbq *piocb, uint32_t flag) { struct lpfc_iocbq *nextiocb; IOCB_t *iocb; + struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; if (piocb->iocb_cmpl && (!piocb->vport) && (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && @@ -3833,6 +4001,52 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, return IOCB_BUSY; } +/** + * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb + * + * This routine wraps the actual lockless version for issusing IOCB function + * pointer from the lpfc_hba struct. + * + * Return codes: + * IOCB_ERROR - Error + * IOCB_SUCCESS - Success + * IOCB_BUSY - Busy + **/ +static inline int +__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, + struct lpfc_iocbq *piocb, uint32_t flag) +{ + return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); +} + +/** + * lpfc_sli_api_table_setup - Set up sli api fucntion jump table + * @phba: The hba struct for which this call is being executed. + * @dev_grp: The HBA PCI-Device group number. + * + * This routine sets up the SLI interface API function jump table in @phba + * struct. + * Returns: 0 - success, -ENODEV - failure. + **/ +int +lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) +{ + + switch (dev_grp) { + case LPFC_PCI_DEV_LP: + phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; + phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1419 Invalid HBA PCI-device group: 0x%x\n", + dev_grp); + return -ENODEV; + break; + } + phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; + return 0; +} /** * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb @@ -3848,14 +4062,14 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, * functions which do not hold hbalock. **/ int -lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, +lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_iocbq *piocb, uint32_t flag) { unsigned long iflags; int rc; spin_lock_irqsave(&phba->hbalock, iflags); - rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag); + rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); spin_unlock_irqrestore(&phba->hbalock, iflags); return rc; @@ -5077,53 +5291,104 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, } /** - * lpfc_sli_flush_mbox_queue - mailbox queue cleanup function + * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system * @phba: Pointer to HBA context. * - * This function is called to cleanup any pending mailbox - * objects in the driver queue before bringing the HBA offline. - * This function is called while resetting the HBA. - * The function is called without any lock held. The function - * takes hbalock to update SLI data structure. - * This function returns 1 when there is an active mailbox - * command pending else returns 0. + * This function is called to shutdown the driver's mailbox sub-system. + * It first marks the mailbox sub-system is in a block state to prevent + * the asynchronous mailbox command from issued off the pending mailbox + * command queue. If the mailbox command sub-system shutdown is due to + * HBA error conditions such as EEH or ERATT, this routine shall invoke + * the mailbox sub-system flush routine to forcefully bring down the + * mailbox sub-system. Otherwise, if it is due to normal condition (such + * as with offline or HBA function reset), this routine will wait for the + * outstanding mailbox command to complete before invoking the mailbox + * sub-system flush routine to gracefully bring down mailbox sub-system. **/ -int -lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) +void +lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba) { - struct lpfc_vport *vport = phba->pport; - int i = 0; - uint32_t ha_copy; + struct lpfc_sli *psli = &phba->sli; + uint8_t actcmd = MBX_HEARTBEAT; + unsigned long timeout; - while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) { - if (i++ > LPFC_MBOX_TMO * 1000) - return 1; + spin_lock_irq(&phba->hbalock); + psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; + spin_unlock_irq(&phba->hbalock); - /* - * Call lpfc_sli_handle_mb_event only if a mailbox cmd - * did finish. This way we won't get the misleading - * "Stray Mailbox Interrupt" message. - */ + if (psli->sli_flag & LPFC_SLI_ACTIVE) { spin_lock_irq(&phba->hbalock); - ha_copy = phba->work_ha; - phba->work_ha &= ~HA_MBATT; + if (phba->sli.mbox_active) + actcmd = phba->sli.mbox_active->u.mb.mbxCommand; spin_unlock_irq(&phba->hbalock); + /* Determine how long we might wait for the active mailbox + * command to be gracefully completed by firmware. + */ + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * + 1000) + jiffies; + while (phba->sli.mbox_active) { + /* Check active mailbox complete status every 2ms */ + msleep(2); + if (time_after(jiffies, timeout)) + /* Timeout, let the mailbox flush routine to + * forcefully release active mailbox command + */ + break; + } + } + lpfc_sli_mbox_sys_flush(phba); +} - if (ha_copy & HA_MBATT) - if (lpfc_sli_handle_mb_event(phba) == 0) - i = 0; +/** + * lpfc_sli_eratt_read - read sli-3 error attention events + * @phba: Pointer to HBA context. + * + * This function is called to read the SLI3 device error attention registers + * for possible error attention events. The caller must hold the hostlock + * with spin_lock_irq(). + * + * This fucntion returns 1 when there is Error Attention in the Host Attention + * Register and returns 0 otherwise. + **/ +static int +lpfc_sli_eratt_read(struct lpfc_hba *phba) +{ + uint32_t ha_copy; - msleep(1); - } + /* Read chip Host Attention (HA) register */ + ha_copy = readl(phba->HAregaddr); + if (ha_copy & HA_ERATT) { + /* Read host status register to retrieve error event */ + lpfc_sli_read_hs(phba); - return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; + /* Check if there is a deferred error condition is active */ + if ((HS_FFER1 & phba->work_hs) && + ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | + HS_FFER6 | HS_FFER7) & phba->work_hs)) { + spin_lock_irq(&phba->hbalock); + phba->hba_flag |= DEFER_ERATT; + spin_unlock_irq(&phba->hbalock); + /* Clear all interrupt enable conditions */ + writel(0, phba->HCregaddr); + readl(phba->HCregaddr); + } + + /* Set the driver HA work bitmap */ + spin_lock_irq(&phba->hbalock); + phba->work_ha |= HA_ERATT; + /* Indicate polling handles this ERATT */ + phba->hba_flag |= HBA_ERATT_HANDLED; + spin_unlock_irq(&phba->hbalock); + return 1; + } + return 0; } /** * lpfc_sli_check_eratt - check error attention events * @phba: Pointer to HBA context. * - * This function is called form timer soft interrupt context to check HBA's + * This function is called from timer soft interrupt context to check HBA's * error attention register bit for error attention events. * * This fucntion returns 1 when there is Error Attention in the Host Attention @@ -5134,10 +5399,6 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba) { uint32_t ha_copy; - /* If PCI channel is offline, don't process it */ - if (unlikely(pci_channel_offline(phba->pcidev))) - return 0; - /* If somebody is waiting to handle an eratt, don't process it * here. The brdkill function will do this. */ @@ -5161,56 +5422,80 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba) return 0; } - /* Read chip Host Attention (HA) register */ - ha_copy = readl(phba->HAregaddr); - if (ha_copy & HA_ERATT) { - /* Read host status register to retrieve error event */ - lpfc_sli_read_hs(phba); - - /* Check if there is a deferred error condition is active */ - if ((HS_FFER1 & phba->work_hs) && - ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | - HS_FFER6 | HS_FFER7) & phba->work_hs)) { - phba->hba_flag |= DEFER_ERATT; - /* Clear all interrupt enable conditions */ - writel(0, phba->HCregaddr); - readl(phba->HCregaddr); - } - - /* Set the driver HA work bitmap */ - phba->work_ha |= HA_ERATT; - /* Indicate polling handles this ERATT */ - phba->hba_flag |= HBA_ERATT_HANDLED; + /* If PCI channel is offline, don't process it */ + if (unlikely(pci_channel_offline(phba->pcidev))) { spin_unlock_irq(&phba->hbalock); - return 1; + return 0; + } + + switch (phba->sli_rev) { + case LPFC_SLI_REV2: + case LPFC_SLI_REV3: + /* Read chip Host Attention (HA) register */ + ha_copy = lpfc_sli_eratt_read(phba); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0299 Invalid SLI revision (%d)\n", + phba->sli_rev); + ha_copy = 0; + break; } spin_unlock_irq(&phba->hbalock); + + return ha_copy; +} + +/** + * lpfc_intr_state_check - Check device state for interrupt handling + * @phba: Pointer to HBA context. + * + * This inline routine checks whether a device or its PCI slot is in a state + * that the interrupt should be handled. + * + * This function returns 0 if the device or the PCI slot is in a state that + * interrupt should be handled, otherwise -EIO. + */ +static inline int +lpfc_intr_state_check(struct lpfc_hba *phba) +{ + /* If the pci channel is offline, ignore all the interrupts */ + if (unlikely(pci_channel_offline(phba->pcidev))) + return -EIO; + + /* Update device level interrupt statistics */ + phba->sli.slistat.sli_intr++; + + /* Ignore all interrupts during initialization. */ + if (unlikely(phba->link_state < LPFC_LINK_DOWN)) + return -EIO; + return 0; } /** - * lpfc_sp_intr_handler - The slow-path interrupt handler of lpfc driver + * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device * @irq: Interrupt number. * @dev_id: The device context pointer. * * This function is directly called from the PCI layer as an interrupt - * service routine when the device is enabled with MSI-X multi-message - * interrupt mode and there are slow-path events in the HBA. However, - * when the device is enabled with either MSI or Pin-IRQ interrupt mode, - * this function is called as part of the device-level interrupt handler. - * When the PCI slot is in error recovery or the HBA is undergoing - * initialization, the interrupt handler will not process the interrupt. - * The link attention and ELS ring attention events are handled by the - * worker thread. The interrupt handler signals the worker thread and - * and returns for these events. This function is called without any - * lock held. It gets the hbalock to access and update SLI data + * service routine when device with SLI-3 interface spec is enabled with + * MSI-X multi-message interrupt mode and there are slow-path events in + * the HBA. However, when the device is enabled with either MSI or Pin-IRQ + * interrupt mode, this function is called as part of the device-level + * interrupt handler. When the PCI slot is in error recovery or the HBA + * is undergoing initialization, the interrupt handler will not process + * the interrupt. The link attention and ELS ring attention events are + * handled by the worker thread. The interrupt handler signals the worker + * thread and returns for these events. This function is called without + * any lock held. It gets the hbalock to access and update SLI data * structures. * * This function returns IRQ_HANDLED when interrupt is handled else it * returns IRQ_NONE. **/ irqreturn_t -lpfc_sp_intr_handler(int irq, void *dev_id) +lpfc_sli_sp_intr_handler(int irq, void *dev_id) { struct lpfc_hba *phba; uint32_t ha_copy; @@ -5240,13 +5525,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id) * individual interrupt handler in MSI-X multi-message interrupt mode */ if (phba->intr_type == MSIX) { - /* If the pci channel is offline, ignore all the interrupts */ - if (unlikely(pci_channel_offline(phba->pcidev))) - return IRQ_NONE; - /* Update device-level interrupt statistics */ - phba->sli.slistat.sli_intr++; - /* Ignore all interrupts during initialization. */ - if (unlikely(phba->link_state < LPFC_LINK_DOWN)) + /* Check device state for handling interrupt */ + if (lpfc_intr_state_check(phba)) return IRQ_NONE; /* Need to read HA REG for slow-path events */ spin_lock_irqsave(&phba->hbalock, iflag); @@ -5271,7 +5551,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id) * interrupt. */ if (unlikely(phba->hba_flag & DEFER_ERATT)) { - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflag); return IRQ_NONE; } @@ -5434,7 +5714,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id) LOG_MBOX | LOG_SLI, "0350 rc should have" "been MBX_BUSY"); - goto send_current_mbox; + if (rc != MBX_NOT_FINISHED) + goto send_current_mbox; } } spin_lock_irqsave( @@ -5471,29 +5752,29 @@ send_current_mbox: } return IRQ_HANDLED; -} /* lpfc_sp_intr_handler */ +} /* lpfc_sli_sp_intr_handler */ /** - * lpfc_fp_intr_handler - The fast-path interrupt handler of lpfc driver + * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. * @irq: Interrupt number. * @dev_id: The device context pointer. * * This function is directly called from the PCI layer as an interrupt - * service routine when the device is enabled with MSI-X multi-message - * interrupt mode and there is a fast-path FCP IOCB ring event in the - * HBA. However, when the device is enabled with either MSI or Pin-IRQ - * interrupt mode, this function is called as part of the device-level - * interrupt handler. When the PCI slot is in error recovery or the HBA - * is undergoing initialization, the interrupt handler will not process - * the interrupt. The SCSI FCP fast-path ring event are handled in the - * intrrupt context. This function is called without any lock held. It - * gets the hbalock to access and update SLI data structures. + * service routine when device with SLI-3 interface spec is enabled with + * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB + * ring event in the HBA. However, when the device is enabled with either + * MSI or Pin-IRQ interrupt mode, this function is called as part of the + * device-level interrupt handler. When the PCI slot is in error recovery + * or the HBA is undergoing initialization, the interrupt handler will not + * process the interrupt. The SCSI FCP fast-path ring event are handled in + * the intrrupt context. This function is called without any lock held. + * It gets the hbalock to access and update SLI data structures. * * This function returns IRQ_HANDLED when interrupt is handled else it * returns IRQ_NONE. **/ irqreturn_t -lpfc_fp_intr_handler(int irq, void *dev_id) +lpfc_sli_fp_intr_handler(int irq, void *dev_id) { struct lpfc_hba *phba; uint32_t ha_copy; @@ -5513,13 +5794,8 @@ lpfc_fp_intr_handler(int irq, void *dev_id) * individual interrupt handler in MSI-X multi-message interrupt mode */ if (phba->intr_type == MSIX) { - /* If pci channel is offline, ignore all the interrupts */ - if (unlikely(pci_channel_offline(phba->pcidev))) - return IRQ_NONE; - /* Update device-level interrupt statistics */ - phba->sli.slistat.sli_intr++; - /* Ignore all interrupts during initialization. */ - if (unlikely(phba->link_state < LPFC_LINK_DOWN)) + /* Check device state for handling interrupt */ + if (lpfc_intr_state_check(phba)) return IRQ_NONE; /* Need to read HA REG for FCP ring and other ring events */ ha_copy = readl(phba->HAregaddr); @@ -5530,7 +5806,7 @@ lpfc_fp_intr_handler(int irq, void *dev_id) * any interrupt. */ if (unlikely(phba->hba_flag & DEFER_ERATT)) { - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflag); return IRQ_NONE; } writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), @@ -5566,26 +5842,27 @@ lpfc_fp_intr_handler(int irq, void *dev_id) } } return IRQ_HANDLED; -} /* lpfc_fp_intr_handler */ +} /* lpfc_sli_fp_intr_handler */ /** - * lpfc_intr_handler - The device-level interrupt handler of lpfc driver + * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device * @irq: Interrupt number. * @dev_id: The device context pointer. * - * This function is the device-level interrupt handler called from the PCI - * layer when either MSI or Pin-IRQ interrupt mode is enabled and there is - * an event in the HBA which requires driver attention. This function - * invokes the slow-path interrupt attention handling function and fast-path - * interrupt attention handling function in turn to process the relevant - * HBA attention events. This function is called without any lock held. It - * gets the hbalock to access and update SLI data structures. + * This function is the HBA device-level interrupt handler to device with + * SLI-3 interface spec, called from the PCI layer when either MSI or + * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which + * requires driver attention. This function invokes the slow-path interrupt + * attention handling function and fast-path interrupt attention handling + * function in turn to process the relevant HBA attention events. This + * function is called without any lock held. It gets the hbalock to access + * and update SLI data structures. * * This function returns IRQ_HANDLED when interrupt is handled, else it * returns IRQ_NONE. **/ irqreturn_t -lpfc_intr_handler(int irq, void *dev_id) +lpfc_sli_intr_handler(int irq, void *dev_id) { struct lpfc_hba *phba; irqreturn_t sp_irq_rc, fp_irq_rc; @@ -5600,15 +5877,8 @@ lpfc_intr_handler(int irq, void *dev_id) if (unlikely(!phba)) return IRQ_NONE; - /* If the pci channel is offline, ignore all the interrupts. */ - if (unlikely(pci_channel_offline(phba->pcidev))) - return IRQ_NONE; - - /* Update device level interrupt statistics */ - phba->sli.slistat.sli_intr++; - - /* Ignore all interrupts during initialization. */ - if (unlikely(phba->link_state < LPFC_LINK_DOWN)) + /* Check device state for handling interrupt */ + if (lpfc_intr_state_check(phba)) return IRQ_NONE; spin_lock(&phba->hbalock); @@ -5650,7 +5920,7 @@ lpfc_intr_handler(int irq, void *dev_id) status2 >>= (4*LPFC_ELS_RING); if (status1 || (status2 & HA_RXMASK)) - sp_irq_rc = lpfc_sp_intr_handler(irq, dev_id); + sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); else sp_irq_rc = IRQ_NONE; @@ -5670,10 +5940,10 @@ lpfc_intr_handler(int irq, void *dev_id) status2 = 0; if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) - fp_irq_rc = lpfc_fp_intr_handler(irq, dev_id); + fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); else fp_irq_rc = IRQ_NONE; /* Return device-level interrupt handling status */ return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; -} /* lpfc_intr_handler */ +} /* lpfc_sli_intr_handler */ -- cgit v1.2.3 From da0436e915a5c17ee79e72c1bf978a4ebb1cbf4d Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 22 May 2009 14:51:39 -0400 Subject: [SCSI] lpfc 8.3.2 : Addition of SLI4 Interface - Base Support Adds new hardware and interface definitions. Adds new interface routines - utilizing the reorganized layout of the driver. Adds SLI-4 specific functions for attachment, initialization, teardown, etc. Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc.h | 49 +- drivers/scsi/lpfc/lpfc_attr.c | 83 +- drivers/scsi/lpfc/lpfc_crtn.h | 59 +- drivers/scsi/lpfc/lpfc_ct.c | 2 + drivers/scsi/lpfc/lpfc_debugfs.c | 2 + drivers/scsi/lpfc/lpfc_els.c | 4 +- drivers/scsi/lpfc/lpfc_hbadisc.c | 34 +- drivers/scsi/lpfc/lpfc_hw.h | 140 +- drivers/scsi/lpfc/lpfc_hw4.h | 2141 ++++++++++++++++ drivers/scsi/lpfc/lpfc_init.c | 4978 ++++++++++++++++++++++++++++++------ drivers/scsi/lpfc/lpfc_mbox.c | 2 + drivers/scsi/lpfc/lpfc_mem.c | 204 +- drivers/scsi/lpfc/lpfc_nportdisc.c | 2 + drivers/scsi/lpfc/lpfc_scsi.c | 492 ++++ drivers/scsi/lpfc/lpfc_scsi.h | 2 + drivers/scsi/lpfc/lpfc_sli.c | 1455 ++++++++++- drivers/scsi/lpfc/lpfc_sli.h | 27 +- drivers/scsi/lpfc/lpfc_sli4.h | 467 ++++ drivers/scsi/lpfc/lpfc_vport.c | 23 + 19 files changed, 9170 insertions(+), 996 deletions(-) create mode 100644 drivers/scsi/lpfc/lpfc_hw4.h create mode 100644 drivers/scsi/lpfc/lpfc_sli4.h (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 6c24c9aabe7b..13ac108a244c 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -105,9 +105,11 @@ struct lpfc_dma_pool { }; struct hbq_dmabuf { + struct lpfc_dmabuf hbuf; struct lpfc_dmabuf dbuf; uint32_t size; uint32_t tag; + struct lpfc_rcqe rcqe; }; /* Priority bit. Set value to exceed low water mark in lpfc_mem. */ @@ -141,7 +143,10 @@ typedef struct lpfc_vpd { } rev; struct { #ifdef __BIG_ENDIAN_BITFIELD - uint32_t rsvd2 :24; /* Reserved */ + uint32_t rsvd3 :19; /* Reserved */ + uint32_t cdss : 1; /* Configure Data Security SLI */ + uint32_t rsvd2 : 3; /* Reserved */ + uint32_t cbg : 1; /* Configure BlockGuard */ uint32_t cmv : 1; /* Configure Max VPIs */ uint32_t ccrp : 1; /* Config Command Ring Polling */ uint32_t csah : 1; /* Configure Synchronous Abort Handling */ @@ -159,7 +164,10 @@ typedef struct lpfc_vpd { uint32_t csah : 1; /* Configure Synchronous Abort Handling */ uint32_t ccrp : 1; /* Config Command Ring Polling */ uint32_t cmv : 1; /* Configure Max VPIs */ - uint32_t rsvd2 :24; /* Reserved */ + uint32_t cbg : 1; /* Configure BlockGuard */ + uint32_t rsvd2 : 3; /* Reserved */ + uint32_t cdss : 1; /* Configure Data Security SLI */ + uint32_t rsvd3 :19; /* Reserved */ #endif } sli3Feat; } lpfc_vpd_t; @@ -280,6 +288,9 @@ struct lpfc_vport { enum discovery_state port_state; uint16_t vpi; + uint16_t vfi; + uint8_t vfi_state; +#define LPFC_VFI_REGISTERED 0x1 uint32_t fc_flag; /* FC flags */ /* Several of these flags are HBA centric and should be moved to @@ -392,6 +403,9 @@ struct lpfc_vport { #endif uint8_t stat_data_enabled; uint8_t stat_data_blocked; + struct list_head rcv_buffer_list; + uint32_t vport_flag; +#define STATIC_VPORT 1 }; struct hbq_s { @@ -494,6 +508,7 @@ struct lpfc_hba { #define LPFC_SLI3_CRP_ENABLED 0x08 #define LPFC_SLI3_INB_ENABLED 0x10 #define LPFC_SLI3_BG_ENABLED 0x20 +#define LPFC_SLI3_DSS_ENABLED 0x40 uint32_t iocb_cmd_size; uint32_t iocb_rsp_size; @@ -507,8 +522,13 @@ struct lpfc_hba { uint32_t hba_flag; /* hba generic flags */ #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ - -#define DEFER_ERATT 0x4 /* Deferred error attention in progress */ +#define DEFER_ERATT 0x2 /* Deferred error attention in progress */ +#define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */ +#define HBA_RECEIVE_BUFFER 0x8 /* Rcv buffer posted to worker thread */ +#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ +#define FCP_XRI_ABORT_EVENT 0x20 +#define ELS_XRI_ABORT_EVENT 0x40 +#define ASYNC_EVENT 0x80 struct lpfc_dmabuf slim2p; MAILBOX_t *mbox; @@ -567,6 +587,9 @@ struct lpfc_hba { uint32_t cfg_poll; uint32_t cfg_poll_tmo; uint32_t cfg_use_msi; + uint32_t cfg_fcp_imax; + uint32_t cfg_fcp_wq_count; + uint32_t cfg_fcp_eq_count; uint32_t cfg_sg_seg_cnt; uint32_t cfg_prot_sg_seg_cnt; uint32_t cfg_sg_dma_buf_size; @@ -576,6 +599,8 @@ struct lpfc_hba { uint32_t cfg_enable_hba_reset; uint32_t cfg_enable_hba_heartbeat; uint32_t cfg_enable_bg; + uint32_t cfg_enable_fip; + uint32_t cfg_log_verbose; lpfc_vpd_t vpd; /* vital product data */ @@ -659,7 +684,8 @@ struct lpfc_hba { /* pci_mem_pools */ struct pci_pool *lpfc_scsi_dma_buf_pool; struct pci_pool *lpfc_mbuf_pool; - struct pci_pool *lpfc_hbq_pool; + struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */ + struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */ struct lpfc_dma_pool lpfc_mbuf_safety_pool; mempool_t *mbox_mem_pool; @@ -675,6 +701,14 @@ struct lpfc_hba { struct lpfc_vport *pport; /* physical lpfc_vport pointer */ uint16_t max_vpi; /* Maximum virtual nports */ #define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */ + uint16_t max_vports; /* + * For IOV HBAs max_vpi can change + * after a reset. max_vports is max + * number of vports present. This can + * be greater than max_vpi. + */ + uint16_t vpi_base; + uint16_t vfi_base; unsigned long *vpi_bmask; /* vpi allocation table */ /* Data structure used by fabric iocb scheduler */ @@ -733,6 +767,11 @@ struct lpfc_hba { /* Maximum number of events that can be outstanding at any time*/ #define LPFC_MAX_EVT_COUNT 512 atomic_t fast_event_count; + struct lpfc_fcf fcf; + uint8_t fc_map[3]; + uint8_t valid_vlan; + uint16_t vlan_id; + struct list_head fcf_conn_rec_list; }; static inline struct Scsi_Host * diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index c14f0cbdb125..82016fc672b1 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -30,8 +30,10 @@ #include #include +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -828,18 +830,37 @@ lpfc_get_hba_info(struct lpfc_hba *phba, return 0; } - if (mrpi) - *mrpi = pmb->un.varRdConfig.max_rpi; - if (arpi) - *arpi = pmb->un.varRdConfig.avail_rpi; - if (mxri) - *mxri = pmb->un.varRdConfig.max_xri; - if (axri) - *axri = pmb->un.varRdConfig.avail_xri; - if (mvpi) - *mvpi = pmb->un.varRdConfig.max_vpi; - if (avpi) - *avpi = pmb->un.varRdConfig.avail_vpi; + if (phba->sli_rev == LPFC_SLI_REV4) { + rd_config = &pmboxq->u.mqe.un.rd_config; + if (mrpi) + *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); + if (arpi) + *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) - + phba->sli4_hba.max_cfg_param.rpi_used; + if (mxri) + *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); + if (axri) + *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) - + phba->sli4_hba.max_cfg_param.xri_used; + if (mvpi) + *mvpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); + if (avpi) + *avpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - + phba->sli4_hba.max_cfg_param.vpi_used; + } else { + if (mrpi) + *mrpi = pmb->un.varRdConfig.max_rpi; + if (arpi) + *arpi = pmb->un.varRdConfig.avail_rpi; + if (mxri) + *mxri = pmb->un.varRdConfig.max_xri; + if (axri) + *axri = pmb->un.varRdConfig.avail_xri; + if (mvpi) + *mvpi = pmb->un.varRdConfig.max_vpi; + if (avpi) + *avpi = pmb->un.varRdConfig.avail_vpi; + } mempool_free(pmboxq, phba->mbox_mem_pool); return 1; @@ -2844,14 +2865,38 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255, /* # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that # support this feature -# 0 = MSI disabled +# 0 = MSI disabled (default) # 1 = MSI enabled -# 2 = MSI-X enabled (default) -# Value range is [0,2]. Default value is 2. +# 2 = MSI-X enabled +# Value range is [0,2]. Default value is 0. */ -LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " +LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or " "MSI-X (2), if possible"); +/* +# lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second +# +# Value range is [636,651042]. Default value is 10000. +*/ +LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST, + "Set the maximum number of fast-path FCP interrupts per second"); + +/* +# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues +# +# Value range is [1,31]. Default value is 4. +*/ +LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX, + "Set the number of fast-path FCP work queues, if possible"); + +/* +# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues +# +# Value range is [1,7]. Default value is 1. +*/ +LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX, + "Set the number of fast-path FCP event queues, if possible"); + /* # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. # 0 = HBA resets disabled @@ -2969,6 +3014,9 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_poll, &dev_attr_lpfc_poll_tmo, &dev_attr_lpfc_use_msi, + &dev_attr_lpfc_fcp_imax, + &dev_attr_lpfc_fcp_wq_count, + &dev_attr_lpfc_fcp_eq_count, &dev_attr_lpfc_enable_bg, &dev_attr_lpfc_soft_wwnn, &dev_attr_lpfc_soft_wwpn, @@ -4105,6 +4153,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_poll_tmo_init(phba, lpfc_poll_tmo); lpfc_enable_npiv_init(phba, lpfc_enable_npiv); lpfc_use_msi_init(phba, lpfc_use_msi); + lpfc_fcp_imax_init(phba, lpfc_fcp_imax); + lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); + lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count); lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); lpfc_enable_bg_init(phba, lpfc_enable_bg); diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index f88ce3f26190..3802e455734f 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -35,17 +35,19 @@ int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *); int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int); void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); -int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, - LPFC_MBOXQ_t *, uint32_t); +int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, + LPFC_MBOXQ_t *, uint32_t); void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); -void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); +void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *); void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); +void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *); struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); void lpfc_cleanup_rpis(struct lpfc_vport *, int); int lpfc_linkdown(struct lpfc_hba *); +void lpfc_linkdown_port(struct lpfc_vport *); void lpfc_port_link_failure(struct lpfc_vport *); void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); @@ -54,6 +56,7 @@ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *, @@ -149,15 +152,19 @@ int lpfc_online(struct lpfc_hba *); void lpfc_unblock_mgmt_io(struct lpfc_hba *); void lpfc_offline_prep(struct lpfc_hba *); void lpfc_offline(struct lpfc_hba *); +void lpfc_reset_hba(struct lpfc_hba *); int lpfc_sli_setup(struct lpfc_hba *); int lpfc_sli_queue_setup(struct lpfc_hba *); void lpfc_handle_eratt(struct lpfc_hba *); void lpfc_handle_latt(struct lpfc_hba *); -irqreturn_t lpfc_intr_handler(int, void *); -irqreturn_t lpfc_sp_intr_handler(int, void *); -irqreturn_t lpfc_fp_intr_handler(int, void *); +irqreturn_t lpfc_sli_intr_handler(int, void *); +irqreturn_t lpfc_sli_sp_intr_handler(int, void *); +irqreturn_t lpfc_sli_fp_intr_handler(int, void *); +irqreturn_t lpfc_sli4_intr_handler(int, void *); +irqreturn_t lpfc_sli4_sp_intr_handler(int, void *); +irqreturn_t lpfc_sli4_fp_intr_handler(int, void *); void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *); @@ -165,16 +172,32 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); +void __lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_mbox_dev_check(struct lpfc_hba *); int lpfc_mbox_tmo_val(struct lpfc_hba *, int); +void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *); +void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t); +void lpfc_init_vpi(struct lpfcMboxq *, uint16_t); +void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t); +void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *); +void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t); +void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *); void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *, uint32_t , LPFC_MBOXQ_t *); struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *); void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *); +struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *); +void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *); +void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, + uint16_t); +void lpfc_unregister_unused_fcf(struct lpfc_hba *); -int lpfc_mem_alloc(struct lpfc_hba *); +int lpfc_mem_alloc(struct lpfc_hba *, int align); void lpfc_mem_free(struct lpfc_hba *); +void lpfc_mem_free_all(struct lpfc_hba *); void lpfc_stop_vport_timers(struct lpfc_vport *); void lpfc_poll_timeout(unsigned long ptr); @@ -198,12 +221,13 @@ int lpfc_sli_host_down(struct lpfc_vport *); int lpfc_sli_hba_down(struct lpfc_hba *); int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); int lpfc_sli_handle_mb_event(struct lpfc_hba *); -int lpfc_sli_flush_mbox_queue(struct lpfc_hba *); +void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *); int lpfc_sli_check_eratt(struct lpfc_hba *); -int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, +void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, struct lpfc_sli_ring *, uint32_t); +int lpfc_sli4_handle_received_buffer(struct lpfc_hba *); void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); -int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, +int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *, uint32_t); void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); @@ -237,7 +261,7 @@ struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *, int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); -int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, struct lpfc_sli_ring *, +int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *, struct lpfc_iocbq *, uint32_t); void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *, @@ -254,6 +278,12 @@ void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *); const char* lpfc_info(struct Scsi_Host *); int lpfc_scan_finished(struct Scsi_Host *, unsigned long); +int lpfc_init_api_table_setup(struct lpfc_hba *, uint8_t); +int lpfc_sli_api_table_setup(struct lpfc_hba *, uint8_t); +int lpfc_scsi_api_table_setup(struct lpfc_hba *, uint8_t); +int lpfc_mbox_api_table_setup(struct lpfc_hba *, uint8_t); +int lpfc_api_table_setup(struct lpfc_hba *, uint8_t); + void lpfc_get_cfgparam(struct lpfc_hba *); void lpfc_get_vport_cfgparam(struct lpfc_vport *); int lpfc_alloc_sysfs_attr(struct lpfc_vport *); @@ -314,8 +344,15 @@ lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *); void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *); +void lpfc_create_static_vport(struct lpfc_hba *); +void lpfc_stop_hba_timers(struct lpfc_hba *); +void lpfc_stop_port(struct lpfc_hba *); +void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t); +int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); +void lpfc_start_fdiscs(struct lpfc_hba *phba); #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) #define HBA_EVENT_RSCN 5 #define HBA_EVENT_LINK_UP 2 #define HBA_EVENT_LINK_DOWN 3 + diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 4164b935ea9f..51990787796f 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -32,8 +32,10 @@ #include #include +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 5dd66925f4ca..42ef258c7d52 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -33,8 +33,10 @@ #include #include +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 8c5c3aea4a19..9fe36bf6fd14 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -28,8 +28,10 @@ #include #include +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -220,7 +222,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, icmd->un.elsreq64.myID = vport->fc_myDID; /* For ELS_REQUEST64_CR, use the VPI by default */ - icmd->ulpContext = vport->vpi; + icmd->ulpContext = vport->vpi + phba->vpi_base; icmd->ulpCt_h = 0; /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ if (elscmd == ELS_CMD_ECHO) diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 25fc96c9081f..0fc66005d545 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -29,10 +29,12 @@ #include #include +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_scsi.h" #include "lpfc.h" #include "lpfc_logmsg.h" @@ -491,6 +493,10 @@ lpfc_work_done(struct lpfc_hba *phba) phba->work_ha = 0; spin_unlock_irq(&phba->hbalock); + /* First, try to post the next mailbox command to SLI4 device */ + if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) + lpfc_sli4_post_async_mbox(phba); + if (ha_copy & HA_ERATT) /* Handle the error attention event */ lpfc_handle_eratt(phba); @@ -501,9 +507,27 @@ lpfc_work_done(struct lpfc_hba *phba) if (ha_copy & HA_LATT) lpfc_handle_latt(phba); + /* Process SLI4 events */ + if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { + if (phba->hba_flag & FCP_XRI_ABORT_EVENT) + lpfc_sli4_fcp_xri_abort_event_proc(phba); + if (phba->hba_flag & ELS_XRI_ABORT_EVENT) + lpfc_sli4_els_xri_abort_event_proc(phba); + if (phba->hba_flag & ASYNC_EVENT) + lpfc_sli4_async_event_proc(phba); + if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) { + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER; + spin_unlock_irq(&phba->hbalock); + lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); + } + if (phba->hba_flag & HBA_RECEIVE_BUFFER) + lpfc_sli4_handle_received_buffer(phba); + } + vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i <= phba->max_vpi; i++) { + for (i = 0; i <= phba->max_vports; i++) { /* * We could have no vports in array if unloading, so if * this happens then just use the pport @@ -2556,7 +2580,8 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport) * clear_la then don't send it. */ if ((phba->link_state >= LPFC_CLEAR_LA) || - (vport->port_type != LPFC_PHYSICAL_PORT)) + (vport->port_type != LPFC_PHYSICAL_PORT) || + (phba->sli_rev == LPFC_SLI_REV4)) return; /* Link up discovery */ @@ -2585,7 +2610,7 @@ lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport) regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (regvpimbox) { - lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox); + lpfc_reg_vpi(vport, regvpimbox); regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; regvpimbox->vport = vport; if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) @@ -2645,7 +2670,8 @@ lpfc_disc_start(struct lpfc_vport *vport) */ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && !(vport->fc_flag & FC_PT2PT) && - !(vport->fc_flag & FC_RSCN_MODE)) { + !(vport->fc_flag & FC_RSCN_MODE) && + (phba->sli_rev < LPFC_SLI_REV4)) { lpfc_issue_reg_vpi(phba, vport); return; } diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 4168c7b498b8..a9d64cfbe5cc 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -470,6 +470,35 @@ struct serv_parm { /* Structure is in Big Endian format */ uint8_t vendorVersion[16]; }; +/* + * Virtual Fabric Tagging Header + */ +struct fc_vft_header { + uint32_t word0; +#define fc_vft_hdr_r_ctl_SHIFT 24 +#define fc_vft_hdr_r_ctl_MASK 0xFF +#define fc_vft_hdr_r_ctl_WORD word0 +#define fc_vft_hdr_ver_SHIFT 22 +#define fc_vft_hdr_ver_MASK 0x3 +#define fc_vft_hdr_ver_WORD word0 +#define fc_vft_hdr_type_SHIFT 18 +#define fc_vft_hdr_type_MASK 0xF +#define fc_vft_hdr_type_WORD word0 +#define fc_vft_hdr_e_SHIFT 16 +#define fc_vft_hdr_e_MASK 0x1 +#define fc_vft_hdr_e_WORD word0 +#define fc_vft_hdr_priority_SHIFT 13 +#define fc_vft_hdr_priority_MASK 0x7 +#define fc_vft_hdr_priority_WORD word0 +#define fc_vft_hdr_vf_id_SHIFT 1 +#define fc_vft_hdr_vf_id_MASK 0xFFF +#define fc_vft_hdr_vf_id_WORD word0 + uint32_t word1; +#define fc_vft_hdr_hopct_SHIFT 24 +#define fc_vft_hdr_hopct_MASK 0xFF +#define fc_vft_hdr_hopct_WORD word1 +}; + /* * Extended Link Service LS_COMMAND codes (Payload Word 0) */ @@ -1152,6 +1181,9 @@ typedef struct { #define PCI_DEVICE_ID_HORNET 0xfe05 #define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11 #define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 +#define PCI_VENDOR_ID_SERVERENGINE 0x19a2 +#define PCI_DEVICE_ID_TIGERSHARK 0x0704 +#define PCI_DEVICE_ID_TIGERSHARK_S 0x0705 #define JEDEC_ID_ADDRESS 0x0080001c #define FIREFLY_JEDEC_ID 0x1ACC @@ -1342,15 +1374,21 @@ typedef struct { /* FireFly BIU registers */ #define MBX_READ_LA64 0x95 #define MBX_REG_VPI 0x96 #define MBX_UNREG_VPI 0x97 -#define MBX_REG_VNPID 0x96 -#define MBX_UNREG_VNPID 0x97 #define MBX_WRITE_WWN 0x98 #define MBX_SET_DEBUG 0x99 #define MBX_LOAD_EXP_ROM 0x9C - -#define MBX_MAX_CMDS 0x9D +#define MBX_SLI4_CONFIG 0x9B +#define MBX_SLI4_REQ_FTRS 0x9D +#define MBX_MAX_CMDS 0x9E +#define MBX_RESUME_RPI 0x9E #define MBX_SLI2_CMD_MASK 0x80 +#define MBX_REG_VFI 0x9F +#define MBX_REG_FCFI 0xA0 +#define MBX_UNREG_VFI 0xA1 +#define MBX_UNREG_FCFI 0xA2 +#define MBX_INIT_VFI 0xA3 +#define MBX_INIT_VPI 0xA4 /* IOCB Commands */ @@ -1440,6 +1478,16 @@ typedef struct { /* FireFly BIU registers */ #define CMD_IOCB_LOGENTRY_CN 0x94 #define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96 +/* Unhandled Data Security SLI Commands */ +#define DSSCMD_IWRITE64_CR 0xD8 +#define DSSCMD_IWRITE64_CX 0xD9 +#define DSSCMD_IREAD64_CR 0xDA +#define DSSCMD_IREAD64_CX 0xDB +#define DSSCMD_INVALIDATE_DEK 0xDC +#define DSSCMD_SET_KEK 0xDD +#define DSSCMD_GET_KEK_ID 0xDE +#define DSSCMD_GEN_XFER 0xDF + #define CMD_MAX_IOCB_CMD 0xE6 #define CMD_IOCB_MASK 0xff @@ -1466,6 +1514,7 @@ typedef struct { /* FireFly BIU registers */ #define MBXERR_BAD_RCV_LENGTH 14 #define MBXERR_DMA_ERROR 15 #define MBXERR_ERROR 16 +#define MBXERR_LINK_DOWN 0x33 #define MBX_NOT_FINISHED 255 #define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */ @@ -1504,32 +1553,6 @@ struct ulp_bde { #endif }; -struct ulp_bde64 { /* SLI-2 */ - union ULP_BDE_TUS { - uint32_t w; - struct { -#ifdef __BIG_ENDIAN_BITFIELD - uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED - VALUE !! */ - uint32_t bdeSize:24; /* Size of buffer (in bytes) */ -#else /* __LITTLE_ENDIAN_BITFIELD */ - uint32_t bdeSize:24; /* Size of buffer (in bytes) */ - uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED - VALUE !! */ -#endif -#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */ -#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */ -#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */ -#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */ -#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */ -#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */ -#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */ - } f; - } tus; - uint32_t addrLow; - uint32_t addrHigh; -}; - typedef struct ULP_BDL { /* SLI-2 */ #ifdef __BIG_ENDIAN_BITFIELD uint32_t bdeFlags:8; /* BDL Flags */ @@ -2287,7 +2310,7 @@ typedef struct { uint32_t rsvd3; uint32_t rsvd4; uint32_t rsvd5; - uint16_t rsvd6; + uint16_t vfi; uint16_t vpi; #else /* __LITTLE_ENDIAN */ uint32_t rsvd1; @@ -2297,7 +2320,7 @@ typedef struct { uint32_t rsvd4; uint32_t rsvd5; uint16_t vpi; - uint16_t rsvd6; + uint16_t vfi; #endif } REG_VPI_VAR; @@ -2457,7 +2480,7 @@ typedef struct { uint32_t entry_index:16; #endif - uint32_t rsvd1; + uint32_t sli4_length; uint32_t word_cnt; uint32_t resp_offset; } DUMP_VAR; @@ -2470,9 +2493,32 @@ typedef struct { #define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ #define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ +#define DMP_REGION_VPORT 0x16 /* VPort info region */ +#define DMP_VPORT_REGION_SIZE 0x200 +#define DMP_MBOX_OFFSET_WORD 0x5 + +#define DMP_REGION_FCOEPARAM 0x17 /* fcoe param region */ +#define DMP_FCOEPARAM_RGN_SIZE 0x400 + #define WAKE_UP_PARMS_REGION_ID 4 #define WAKE_UP_PARMS_WORD_SIZE 15 +struct vport_rec { + uint8_t wwpn[8]; + uint8_t wwnn[8]; +}; + +#define VPORT_INFO_SIG 0x32324752 +#define VPORT_INFO_REV_MASK 0xff +#define VPORT_INFO_REV 0x1 +#define MAX_STATIC_VPORT_COUNT 16 +struct static_vport_info { + uint32_t signature; + uint32_t rev; + struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT]; + uint32_t resvd[66]; +}; + /* Option rom version structure */ struct prog_id { #ifdef __BIG_ENDIAN_BITFIELD @@ -2697,7 +2743,9 @@ typedef struct { #endif #ifdef __BIG_ENDIAN_BITFIELD - uint32_t rsvd1 : 23; /* Reserved */ + uint32_t rsvd1 : 19; /* Reserved */ + uint32_t cdss : 1; /* Configure Data Security SLI */ + uint32_t rsvd2 : 3; /* Reserved */ uint32_t cbg : 1; /* Configure BlockGuard */ uint32_t cmv : 1; /* Configure Max VPIs */ uint32_t ccrp : 1; /* Config Command Ring Polling */ @@ -2717,10 +2765,14 @@ typedef struct { uint32_t ccrp : 1; /* Config Command Ring Polling */ uint32_t cmv : 1; /* Configure Max VPIs */ uint32_t cbg : 1; /* Configure BlockGuard */ - uint32_t rsvd1 : 23; /* Reserved */ + uint32_t rsvd2 : 3; /* Reserved */ + uint32_t cdss : 1; /* Configure Data Security SLI */ + uint32_t rsvd1 : 19; /* Reserved */ #endif #ifdef __BIG_ENDIAN_BITFIELD - uint32_t rsvd2 : 23; /* Reserved */ + uint32_t rsvd3 : 19; /* Reserved */ + uint32_t gdss : 1; /* Configure Data Security SLI */ + uint32_t rsvd4 : 3; /* Reserved */ uint32_t gbg : 1; /* Grant BlockGuard */ uint32_t gmv : 1; /* Grant Max VPIs */ uint32_t gcrp : 1; /* Grant Command Ring Polling */ @@ -2740,7 +2792,9 @@ typedef struct { uint32_t gcrp : 1; /* Grant Command Ring Polling */ uint32_t gmv : 1; /* Grant Max VPIs */ uint32_t gbg : 1; /* Grant BlockGuard */ - uint32_t rsvd2 : 23; /* Reserved */ + uint32_t rsvd4 : 3; /* Reserved */ + uint32_t gdss : 1; /* Configure Data Security SLI */ + uint32_t rsvd3 : 19; /* Reserved */ #endif #ifdef __BIG_ENDIAN_BITFIELD @@ -2753,20 +2807,20 @@ typedef struct { #ifdef __BIG_ENDIAN_BITFIELD uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ - uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ + uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */ #else /* __LITTLE_ENDIAN */ - uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ + uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */ uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ #endif - uint32_t rsvd4; /* Reserved */ + uint32_t rsvd6; /* Reserved */ #ifdef __BIG_ENDIAN_BITFIELD - uint32_t rsvd5 : 16; /* Reserved */ + uint32_t rsvd7 : 16; /* Reserved */ uint32_t max_vpi : 16; /* Max number of virt N-Ports */ #else /* __LITTLE_ENDIAN */ uint32_t max_vpi : 16; /* Max number of virt N-Ports */ - uint32_t rsvd5 : 16; /* Reserved */ + uint32_t rsvd7 : 16; /* Reserved */ #endif } CONFIG_PORT_VAR; @@ -3666,3 +3720,5 @@ lpfc_error_lost_link(IOCB_t *iocbp) #define MENLO_TIMEOUT 30 #define SETVAR_MLOMNT 0x103107 #define SETVAR_MLORST 0x103007 + +#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */ diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h new file mode 100644 index 000000000000..39c34b3ad29d --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -0,0 +1,2141 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2009 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +/* Macros to deal with bit fields. Each bit field must have 3 #defines + * associated with it (_SHIFT, _MASK, and _WORD). + * EG. For a bit field that is in the 7th bit of the "field4" field of a + * structure and is 2 bits in size the following #defines must exist: + * struct temp { + * uint32_t field1; + * uint32_t field2; + * uint32_t field3; + * uint32_t field4; + * #define example_bit_field_SHIFT 7 + * #define example_bit_field_MASK 0x03 + * #define example_bit_field_WORD field4 + * uint32_t field5; + * }; + * Then the macros below may be used to get or set the value of that field. + * EG. To get the value of the bit field from the above example: + * struct temp t1; + * value = bf_get(example_bit_field, &t1); + * And then to set that bit field: + * bf_set(example_bit_field, &t1, 2); + * Or clear that bit field: + * bf_set(example_bit_field, &t1, 0); + */ +#define bf_get(name, ptr) \ + (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK) +#define bf_set(name, ptr, value) \ + ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \ + ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT)))) + +struct dma_address { + uint32_t addr_lo; + uint32_t addr_hi; +}; + +#define LPFC_SLI4_BAR0 1 +#define LPFC_SLI4_BAR1 2 +#define LPFC_SLI4_BAR2 4 + +#define LPFC_SLI4_MBX_EMBED true +#define LPFC_SLI4_MBX_NEMBED false + +#define LPFC_SLI4_MB_WORD_COUNT 64 +#define LPFC_MAX_MQ_PAGE 8 +#define LPFC_MAX_WQ_PAGE 8 +#define LPFC_MAX_CQ_PAGE 4 +#define LPFC_MAX_EQ_PAGE 8 + +#define LPFC_VIR_FUNC_MAX 32 /* Maximum number of virtual functions */ +#define LPFC_PCI_FUNC_MAX 5 /* Maximum number of PCI functions */ +#define LPFC_VFR_PAGE_SIZE 0x1000 /* 4KB BAR2 per-VF register page size */ + +/* Define SLI4 Alignment requirements. */ +#define LPFC_ALIGN_16_BYTE 16 +#define LPFC_ALIGN_64_BYTE 64 + +/* Define SLI4 specific definitions. */ +#define LPFC_MQ_CQE_BYTE_OFFSET 256 +#define LPFC_MBX_CMD_HDR_LENGTH 16 +#define LPFC_MBX_ERROR_RANGE 0x4000 +#define LPFC_BMBX_BIT1_ADDR_HI 0x2 +#define LPFC_BMBX_BIT1_ADDR_LO 0 +#define LPFC_RPI_HDR_COUNT 64 +#define LPFC_HDR_TEMPLATE_SIZE 4096 +#define LPFC_RPI_ALLOC_ERROR 0xFFFF +#define LPFC_FCF_RECORD_WD_CNT 132 +#define LPFC_ENTIRE_FCF_DATABASE 0 +#define LPFC_DFLT_FCF_INDEX 0 + +/* Virtual function numbers */ +#define LPFC_VF0 0 +#define LPFC_VF1 1 +#define LPFC_VF2 2 +#define LPFC_VF3 3 +#define LPFC_VF4 4 +#define LPFC_VF5 5 +#define LPFC_VF6 6 +#define LPFC_VF7 7 +#define LPFC_VF8 8 +#define LPFC_VF9 9 +#define LPFC_VF10 10 +#define LPFC_VF11 11 +#define LPFC_VF12 12 +#define LPFC_VF13 13 +#define LPFC_VF14 14 +#define LPFC_VF15 15 +#define LPFC_VF16 16 +#define LPFC_VF17 17 +#define LPFC_VF18 18 +#define LPFC_VF19 19 +#define LPFC_VF20 20 +#define LPFC_VF21 21 +#define LPFC_VF22 22 +#define LPFC_VF23 23 +#define LPFC_VF24 24 +#define LPFC_VF25 25 +#define LPFC_VF26 26 +#define LPFC_VF27 27 +#define LPFC_VF28 28 +#define LPFC_VF29 29 +#define LPFC_VF30 30 +#define LPFC_VF31 31 + +/* PCI function numbers */ +#define LPFC_PCI_FUNC0 0 +#define LPFC_PCI_FUNC1 1 +#define LPFC_PCI_FUNC2 2 +#define LPFC_PCI_FUNC3 3 +#define LPFC_PCI_FUNC4 4 + +/* Active interrupt test count */ +#define LPFC_ACT_INTR_CNT 4 + +/* Delay Multiplier constant */ +#define LPFC_DMULT_CONST 651042 +#define LPFC_MIM_IMAX 636 +#define LPFC_FP_DEF_IMAX 10000 +#define LPFC_SP_DEF_IMAX 10000 + +struct ulp_bde64 { + union ULP_BDE_TUS { + uint32_t w; + struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED + VALUE !! */ + uint32_t bdeSize:24; /* Size of buffer (in bytes) */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t bdeSize:24; /* Size of buffer (in bytes) */ + uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED + VALUE !! */ +#endif +#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */ +#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */ +#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */ +#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */ +#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */ +#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */ +#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */ + } f; + } tus; + uint32_t addrLow; + uint32_t addrHigh; +}; + +struct lpfc_sli4_flags { + uint32_t word0; +#define lpfc_fip_flag_SHIFT 0 +#define lpfc_fip_flag_MASK 0x00000001 +#define lpfc_fip_flag_WORD word0 +}; + +/* event queue entry structure */ +struct lpfc_eqe { + uint32_t word0; +#define lpfc_eqe_resource_id_SHIFT 16 +#define lpfc_eqe_resource_id_MASK 0x000000FF +#define lpfc_eqe_resource_id_WORD word0 +#define lpfc_eqe_minor_code_SHIFT 4 +#define lpfc_eqe_minor_code_MASK 0x00000FFF +#define lpfc_eqe_minor_code_WORD word0 +#define lpfc_eqe_major_code_SHIFT 1 +#define lpfc_eqe_major_code_MASK 0x00000007 +#define lpfc_eqe_major_code_WORD word0 +#define lpfc_eqe_valid_SHIFT 0 +#define lpfc_eqe_valid_MASK 0x00000001 +#define lpfc_eqe_valid_WORD word0 +}; + +/* completion queue entry structure (common fields for all cqe types) */ +struct lpfc_cqe { + uint32_t reserved0; + uint32_t reserved1; + uint32_t reserved2; + uint32_t word3; +#define lpfc_cqe_valid_SHIFT 31 +#define lpfc_cqe_valid_MASK 0x00000001 +#define lpfc_cqe_valid_WORD word3 +#define lpfc_cqe_code_SHIFT 16 +#define lpfc_cqe_code_MASK 0x000000FF +#define lpfc_cqe_code_WORD word3 +}; + +/* Completion Queue Entry Status Codes */ +#define CQE_STATUS_SUCCESS 0x0 +#define CQE_STATUS_FCP_RSP_FAILURE 0x1 +#define CQE_STATUS_REMOTE_STOP 0x2 +#define CQE_STATUS_LOCAL_REJECT 0x3 +#define CQE_STATUS_NPORT_RJT 0x4 +#define CQE_STATUS_FABRIC_RJT 0x5 +#define CQE_STATUS_NPORT_BSY 0x6 +#define CQE_STATUS_FABRIC_BSY 0x7 +#define CQE_STATUS_INTERMED_RSP 0x8 +#define CQE_STATUS_LS_RJT 0x9 +#define CQE_STATUS_CMD_REJECT 0xb +#define CQE_STATUS_FCP_TGT_LENCHECK 0xc +#define CQE_STATUS_NEED_BUFF_ENTRY 0xf + +/* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */ +#define CQE_HW_STATUS_NO_ERR 0x0 +#define CQE_HW_STATUS_UNDERRUN 0x1 +#define CQE_HW_STATUS_OVERRUN 0x2 + +/* Completion Queue Entry Codes */ +#define CQE_CODE_COMPL_WQE 0x1 +#define CQE_CODE_RELEASE_WQE 0x2 +#define CQE_CODE_RECEIVE 0x4 +#define CQE_CODE_XRI_ABORTED 0x5 + +/* completion queue entry for wqe completions */ +struct lpfc_wcqe_complete { + uint32_t word0; +#define lpfc_wcqe_c_request_tag_SHIFT 16 +#define lpfc_wcqe_c_request_tag_MASK 0x0000FFFF +#define lpfc_wcqe_c_request_tag_WORD word0 +#define lpfc_wcqe_c_status_SHIFT 8 +#define lpfc_wcqe_c_status_MASK 0x000000FF +#define lpfc_wcqe_c_status_WORD word0 +#define lpfc_wcqe_c_hw_status_SHIFT 0 +#define lpfc_wcqe_c_hw_status_MASK 0x000000FF +#define lpfc_wcqe_c_hw_status_WORD word0 + uint32_t total_data_placed; + uint32_t parameter; + uint32_t word3; +#define lpfc_wcqe_c_valid_SHIFT lpfc_cqe_valid_SHIFT +#define lpfc_wcqe_c_valid_MASK lpfc_cqe_valid_MASK +#define lpfc_wcqe_c_valid_WORD lpfc_cqe_valid_WORD +#define lpfc_wcqe_c_xb_SHIFT 28 +#define lpfc_wcqe_c_xb_MASK 0x00000001 +#define lpfc_wcqe_c_xb_WORD word3 +#define lpfc_wcqe_c_pv_SHIFT 27 +#define lpfc_wcqe_c_pv_MASK 0x00000001 +#define lpfc_wcqe_c_pv_WORD word3 +#define lpfc_wcqe_c_priority_SHIFT 24 +#define lpfc_wcqe_c_priority_MASK 0x00000007 +#define lpfc_wcqe_c_priority_WORD word3 +#define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT +#define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK +#define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD +}; + +/* completion queue entry for wqe release */ +struct lpfc_wcqe_release { + uint32_t reserved0; + uint32_t reserved1; + uint32_t word2; +#define lpfc_wcqe_r_wq_id_SHIFT 16 +#define lpfc_wcqe_r_wq_id_MASK 0x0000FFFF +#define lpfc_wcqe_r_wq_id_WORD word2 +#define lpfc_wcqe_r_wqe_index_SHIFT 0 +#define lpfc_wcqe_r_wqe_index_MASK 0x0000FFFF +#define lpfc_wcqe_r_wqe_index_WORD word2 + uint32_t word3; +#define lpfc_wcqe_r_valid_SHIFT lpfc_cqe_valid_SHIFT +#define lpfc_wcqe_r_valid_MASK lpfc_cqe_valid_MASK +#define lpfc_wcqe_r_valid_WORD lpfc_cqe_valid_WORD +#define lpfc_wcqe_r_code_SHIFT lpfc_cqe_code_SHIFT +#define lpfc_wcqe_r_code_MASK lpfc_cqe_code_MASK +#define lpfc_wcqe_r_code_WORD lpfc_cqe_code_WORD +}; + +struct sli4_wcqe_xri_aborted { + uint32_t word0; +#define lpfc_wcqe_xa_status_SHIFT 8 +#define lpfc_wcqe_xa_status_MASK 0x000000FF +#define lpfc_wcqe_xa_status_WORD word0 + uint32_t parameter; + uint32_t word2; +#define lpfc_wcqe_xa_remote_xid_SHIFT 16 +#define lpfc_wcqe_xa_remote_xid_MASK 0x0000FFFF +#define lpfc_wcqe_xa_remote_xid_WORD word2 +#define lpfc_wcqe_xa_xri_SHIFT 0 +#define lpfc_wcqe_xa_xri_MASK 0x0000FFFF +#define lpfc_wcqe_xa_xri_WORD word2 + uint32_t word3; +#define lpfc_wcqe_xa_valid_SHIFT lpfc_cqe_valid_SHIFT +#define lpfc_wcqe_xa_valid_MASK lpfc_cqe_valid_MASK +#define lpfc_wcqe_xa_valid_WORD lpfc_cqe_valid_WORD +#define lpfc_wcqe_xa_ia_SHIFT 30 +#define lpfc_wcqe_xa_ia_MASK 0x00000001 +#define lpfc_wcqe_xa_ia_WORD word3 +#define CQE_XRI_ABORTED_IA_REMOTE 0 +#define CQE_XRI_ABORTED_IA_LOCAL 1 +#define lpfc_wcqe_xa_br_SHIFT 29 +#define lpfc_wcqe_xa_br_MASK 0x00000001 +#define lpfc_wcqe_xa_br_WORD word3 +#define CQE_XRI_ABORTED_BR_BA_ACC 0 +#define CQE_XRI_ABORTED_BR_BA_RJT 1 +#define lpfc_wcqe_xa_eo_SHIFT 28 +#define lpfc_wcqe_xa_eo_MASK 0x00000001 +#define lpfc_wcqe_xa_eo_WORD word3 +#define CQE_XRI_ABORTED_EO_REMOTE 0 +#define CQE_XRI_ABORTED_EO_LOCAL 1 +#define lpfc_wcqe_xa_code_SHIFT lpfc_cqe_code_SHIFT +#define lpfc_wcqe_xa_code_MASK lpfc_cqe_code_MASK +#define lpfc_wcqe_xa_code_WORD lpfc_cqe_code_WORD +}; + +/* completion queue entry structure for rqe completion */ +struct lpfc_rcqe { + uint32_t word0; +#define lpfc_rcqe_bindex_SHIFT 16 +#define lpfc_rcqe_bindex_MASK 0x0000FFF +#define lpfc_rcqe_bindex_WORD word0 +#define lpfc_rcqe_status_SHIFT 8 +#define lpfc_rcqe_status_MASK 0x000000FF +#define lpfc_rcqe_status_WORD word0 +#define FC_STATUS_RQ_SUCCESS 0x10 /* Async receive successful */ +#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */ +#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */ +#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */ + uint32_t reserved1; + uint32_t word2; +#define lpfc_rcqe_length_SHIFT 16 +#define lpfc_rcqe_length_MASK 0x0000FFFF +#define lpfc_rcqe_length_WORD word2 +#define lpfc_rcqe_rq_id_SHIFT 6 +#define lpfc_rcqe_rq_id_MASK 0x000003FF +#define lpfc_rcqe_rq_id_WORD word2 +#define lpfc_rcqe_fcf_id_SHIFT 0 +#define lpfc_rcqe_fcf_id_MASK 0x0000003F +#define lpfc_rcqe_fcf_id_WORD word2 + uint32_t word3; +#define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT +#define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK +#define lpfc_rcqe_valid_WORD lpfc_cqe_valid_WORD +#define lpfc_rcqe_port_SHIFT 30 +#define lpfc_rcqe_port_MASK 0x00000001 +#define lpfc_rcqe_port_WORD word3 +#define lpfc_rcqe_hdr_length_SHIFT 24 +#define lpfc_rcqe_hdr_length_MASK 0x0000001F +#define lpfc_rcqe_hdr_length_WORD word3 +#define lpfc_rcqe_code_SHIFT lpfc_cqe_code_SHIFT +#define lpfc_rcqe_code_MASK lpfc_cqe_code_MASK +#define lpfc_rcqe_code_WORD lpfc_cqe_code_WORD +#define lpfc_rcqe_eof_SHIFT 8 +#define lpfc_rcqe_eof_MASK 0x000000FF +#define lpfc_rcqe_eof_WORD word3 +#define FCOE_EOFn 0x41 +#define FCOE_EOFt 0x42 +#define FCOE_EOFni 0x49 +#define FCOE_EOFa 0x50 +#define lpfc_rcqe_sof_SHIFT 0 +#define lpfc_rcqe_sof_MASK 0x000000FF +#define lpfc_rcqe_sof_WORD word3 +#define FCOE_SOFi2 0x2d +#define FCOE_SOFi3 0x2e +#define FCOE_SOFn2 0x35 +#define FCOE_SOFn3 0x36 +}; + +struct lpfc_wqe_generic{ + struct ulp_bde64 bde; + uint32_t word3; + uint32_t word4; + uint32_t word5; + uint32_t word6; +#define lpfc_wqe_gen_context_SHIFT 16 +#define lpfc_wqe_gen_context_MASK 0x0000FFFF +#define lpfc_wqe_gen_context_WORD word6 +#define lpfc_wqe_gen_xri_SHIFT 0 +#define lpfc_wqe_gen_xri_MASK 0x0000FFFF +#define lpfc_wqe_gen_xri_WORD word6 + uint32_t word7; +#define lpfc_wqe_gen_lnk_SHIFT 23 +#define lpfc_wqe_gen_lnk_MASK 0x00000001 +#define lpfc_wqe_gen_lnk_WORD word7 +#define lpfc_wqe_gen_erp_SHIFT 22 +#define lpfc_wqe_gen_erp_MASK 0x00000001 +#define lpfc_wqe_gen_erp_WORD word7 +#define lpfc_wqe_gen_pu_SHIFT 20 +#define lpfc_wqe_gen_pu_MASK 0x00000003 +#define lpfc_wqe_gen_pu_WORD word7 +#define lpfc_wqe_gen_class_SHIFT 16 +#define lpfc_wqe_gen_class_MASK 0x00000007 +#define lpfc_wqe_gen_class_WORD word7 +#define lpfc_wqe_gen_command_SHIFT 8 +#define lpfc_wqe_gen_command_MASK 0x000000FF +#define lpfc_wqe_gen_command_WORD word7 +#define lpfc_wqe_gen_status_SHIFT 4 +#define lpfc_wqe_gen_status_MASK 0x0000000F +#define lpfc_wqe_gen_status_WORD word7 +#define lpfc_wqe_gen_ct_SHIFT 2 +#define lpfc_wqe_gen_ct_MASK 0x00000007 +#define lpfc_wqe_gen_ct_WORD word7 + uint32_t abort_tag; + uint32_t word9; +#define lpfc_wqe_gen_request_tag_SHIFT 0 +#define lpfc_wqe_gen_request_tag_MASK 0x0000FFFF +#define lpfc_wqe_gen_request_tag_WORD word9 + uint32_t word10; +#define lpfc_wqe_gen_ccp_SHIFT 24 +#define lpfc_wqe_gen_ccp_MASK 0x000000FF +#define lpfc_wqe_gen_ccp_WORD word10 +#define lpfc_wqe_gen_ccpe_SHIFT 23 +#define lpfc_wqe_gen_ccpe_MASK 0x00000001 +#define lpfc_wqe_gen_ccpe_WORD word10 +#define lpfc_wqe_gen_pv_SHIFT 19 +#define lpfc_wqe_gen_pv_MASK 0x00000001 +#define lpfc_wqe_gen_pv_WORD word10 +#define lpfc_wqe_gen_pri_SHIFT 16 +#define lpfc_wqe_gen_pri_MASK 0x00000007 +#define lpfc_wqe_gen_pri_WORD word10 + uint32_t word11; +#define lpfc_wqe_gen_cq_id_SHIFT 16 +#define lpfc_wqe_gen_cq_id_MASK 0x000003FF +#define lpfc_wqe_gen_cq_id_WORD word11 +#define LPFC_WQE_CQ_ID_DEFAULT 0x3ff +#define lpfc_wqe_gen_wqec_SHIFT 7 +#define lpfc_wqe_gen_wqec_MASK 0x00000001 +#define lpfc_wqe_gen_wqec_WORD word11 +#define lpfc_wqe_gen_cmd_type_SHIFT 0 +#define lpfc_wqe_gen_cmd_type_MASK 0x0000000F +#define lpfc_wqe_gen_cmd_type_WORD word11 + uint32_t payload[4]; +}; + +struct lpfc_rqe { + uint32_t address_hi; + uint32_t address_lo; +}; + +/* buffer descriptors */ +struct lpfc_bde4 { + uint32_t addr_hi; + uint32_t addr_lo; + uint32_t word2; +#define lpfc_bde4_last_SHIFT 31 +#define lpfc_bde4_last_MASK 0x00000001 +#define lpfc_bde4_last_WORD word2 +#define lpfc_bde4_sge_offset_SHIFT 0 +#define lpfc_bde4_sge_offset_MASK 0x000003FF +#define lpfc_bde4_sge_offset_WORD word2 + uint32_t word3; +#define lpfc_bde4_length_SHIFT 0 +#define lpfc_bde4_length_MASK 0x000000FF +#define lpfc_bde4_length_WORD word3 +}; + +struct lpfc_register { + uint32_t word0; +}; + +#define LPFC_UERR_STATUS_HI 0x00A4 +#define LPFC_UERR_STATUS_LO 0x00A0 +#define LPFC_ONLINE0 0x00B0 +#define LPFC_ONLINE1 0x00B4 +#define LPFC_SCRATCHPAD 0x0058 + +/* BAR0 Registers */ +#define LPFC_HST_STATE 0x00AC +#define lpfc_hst_state_perr_SHIFT 31 +#define lpfc_hst_state_perr_MASK 0x1 +#define lpfc_hst_state_perr_WORD word0 +#define lpfc_hst_state_sfi_SHIFT 30 +#define lpfc_hst_state_sfi_MASK 0x1 +#define lpfc_hst_state_sfi_WORD word0 +#define lpfc_hst_state_nip_SHIFT 29 +#define lpfc_hst_state_nip_MASK 0x1 +#define lpfc_hst_state_nip_WORD word0 +#define lpfc_hst_state_ipc_SHIFT 28 +#define lpfc_hst_state_ipc_MASK 0x1 +#define lpfc_hst_state_ipc_WORD word0 +#define lpfc_hst_state_xrom_SHIFT 27 +#define lpfc_hst_state_xrom_MASK 0x1 +#define lpfc_hst_state_xrom_WORD word0 +#define lpfc_hst_state_dl_SHIFT 26 +#define lpfc_hst_state_dl_MASK 0x1 +#define lpfc_hst_state_dl_WORD word0 +#define lpfc_hst_state_port_status_SHIFT 0 +#define lpfc_hst_state_port_status_MASK 0xFFFF +#define lpfc_hst_state_port_status_WORD word0 + +#define LPFC_POST_STAGE_POWER_ON_RESET 0x0000 +#define LPFC_POST_STAGE_AWAITING_HOST_RDY 0x0001 +#define LPFC_POST_STAGE_HOST_RDY 0x0002 +#define LPFC_POST_STAGE_BE_RESET 0x0003 +#define LPFC_POST_STAGE_SEEPROM_CS_START 0x0100 +#define LPFC_POST_STAGE_SEEPROM_CS_DONE 0x0101 +#define LPFC_POST_STAGE_DDR_CONFIG_START 0x0200 +#define LPFC_POST_STAGE_DDR_CONFIG_DONE 0x0201 +#define LPFC_POST_STAGE_DDR_CALIBRATE_START 0x0300 +#define LPFC_POST_STAGE_DDR_CALIBRATE_DONE 0x0301 +#define LPFC_POST_STAGE_DDR_TEST_START 0x0400 +#define LPFC_POST_STAGE_DDR_TEST_DONE 0x0401 +#define LPFC_POST_STAGE_REDBOOT_INIT_START 0x0600 +#define LPFC_POST_STAGE_REDBOOT_INIT_DONE 0x0601 +#define LPFC_POST_STAGE_FW_IMAGE_LOAD_START 0x0700 +#define LPFC_POST_STAGE_FW_IMAGE_LOAD_DONE 0x0701 +#define LPFC_POST_STAGE_ARMFW_START 0x0800 +#define LPFC_POST_STAGE_DHCP_QUERY_START 0x0900 +#define LPFC_POST_STAGE_DHCP_QUERY_DONE 0x0901 +#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_START 0x0A00 +#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_DONE 0x0A01 +#define LPFC_POST_STAGE_RC_OPTION_SET 0x0B00 +#define LPFC_POST_STAGE_SWITCH_LINK 0x0B01 +#define LPFC_POST_STAGE_SEND_ICDS_MESSAGE 0x0B02 +#define LPFC_POST_STAGE_PERFROM_TFTP 0x0B03 +#define LPFC_POST_STAGE_PARSE_XML 0x0B04 +#define LPFC_POST_STAGE_DOWNLOAD_IMAGE 0x0B05 +#define LPFC_POST_STAGE_FLASH_IMAGE 0x0B06 +#define LPFC_POST_STAGE_RC_DONE 0x0B07 +#define LPFC_POST_STAGE_REBOOT_SYSTEM 0x0B08 +#define LPFC_POST_STAGE_MAC_ADDRESS 0x0C00 +#define LPFC_POST_STAGE_ARMFW_READY 0xC000 +#define LPFC_POST_STAGE_ARMFW_UE 0xF000 + +#define lpfc_scratchpad_slirev_SHIFT 4 +#define lpfc_scratchpad_slirev_MASK 0xF +#define lpfc_scratchpad_slirev_WORD word0 +#define lpfc_scratchpad_chiptype_SHIFT 8 +#define lpfc_scratchpad_chiptype_MASK 0xFF +#define lpfc_scratchpad_chiptype_WORD word0 +#define lpfc_scratchpad_featurelevel1_SHIFT 16 +#define lpfc_scratchpad_featurelevel1_MASK 0xFF +#define lpfc_scratchpad_featurelevel1_WORD word0 +#define lpfc_scratchpad_featurelevel2_SHIFT 24 +#define lpfc_scratchpad_featurelevel2_MASK 0xFF +#define lpfc_scratchpad_featurelevel2_WORD word0 + +/* BAR1 Registers */ +#define LPFC_IMR_MASK_ALL 0xFFFFFFFF +#define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF + +#define LPFC_HST_ISR0 0x0C18 +#define LPFC_HST_ISR1 0x0C1C +#define LPFC_HST_ISR2 0x0C20 +#define LPFC_HST_ISR3 0x0C24 +#define LPFC_HST_ISR4 0x0C28 + +#define LPFC_HST_IMR0 0x0C48 +#define LPFC_HST_IMR1 0x0C4C +#define LPFC_HST_IMR2 0x0C50 +#define LPFC_HST_IMR3 0x0C54 +#define LPFC_HST_IMR4 0x0C58 + +#define LPFC_HST_ISCR0 0x0C78 +#define LPFC_HST_ISCR1 0x0C7C +#define LPFC_HST_ISCR2 0x0C80 +#define LPFC_HST_ISCR3 0x0C84 +#define LPFC_HST_ISCR4 0x0C88 + +#define LPFC_SLI4_INTR0 BIT0 +#define LPFC_SLI4_INTR1 BIT1 +#define LPFC_SLI4_INTR2 BIT2 +#define LPFC_SLI4_INTR3 BIT3 +#define LPFC_SLI4_INTR4 BIT4 +#define LPFC_SLI4_INTR5 BIT5 +#define LPFC_SLI4_INTR6 BIT6 +#define LPFC_SLI4_INTR7 BIT7 +#define LPFC_SLI4_INTR8 BIT8 +#define LPFC_SLI4_INTR9 BIT9 +#define LPFC_SLI4_INTR10 BIT10 +#define LPFC_SLI4_INTR11 BIT11 +#define LPFC_SLI4_INTR12 BIT12 +#define LPFC_SLI4_INTR13 BIT13 +#define LPFC_SLI4_INTR14 BIT14 +#define LPFC_SLI4_INTR15 BIT15 +#define LPFC_SLI4_INTR16 BIT16 +#define LPFC_SLI4_INTR17 BIT17 +#define LPFC_SLI4_INTR18 BIT18 +#define LPFC_SLI4_INTR19 BIT19 +#define LPFC_SLI4_INTR20 BIT20 +#define LPFC_SLI4_INTR21 BIT21 +#define LPFC_SLI4_INTR22 BIT22 +#define LPFC_SLI4_INTR23 BIT23 +#define LPFC_SLI4_INTR24 BIT24 +#define LPFC_SLI4_INTR25 BIT25 +#define LPFC_SLI4_INTR26 BIT26 +#define LPFC_SLI4_INTR27 BIT27 +#define LPFC_SLI4_INTR28 BIT28 +#define LPFC_SLI4_INTR29 BIT29 +#define LPFC_SLI4_INTR30 BIT30 +#define LPFC_SLI4_INTR31 BIT31 + +/* BAR2 Registers */ +#define LPFC_RQ_DOORBELL 0x00A0 +#define lpfc_rq_doorbell_num_posted_SHIFT 16 +#define lpfc_rq_doorbell_num_posted_MASK 0x3FFF +#define lpfc_rq_doorbell_num_posted_WORD word0 +#define LPFC_RQ_POST_BATCH 8 /* RQEs to post at one time */ +#define lpfc_rq_doorbell_id_SHIFT 0 +#define lpfc_rq_doorbell_id_MASK 0x03FF +#define lpfc_rq_doorbell_id_WORD word0 + +#define LPFC_WQ_DOORBELL 0x0040 +#define lpfc_wq_doorbell_num_posted_SHIFT 24 +#define lpfc_wq_doorbell_num_posted_MASK 0x00FF +#define lpfc_wq_doorbell_num_posted_WORD word0 +#define lpfc_wq_doorbell_index_SHIFT 16 +#define lpfc_wq_doorbell_index_MASK 0x00FF +#define lpfc_wq_doorbell_index_WORD word0 +#define lpfc_wq_doorbell_id_SHIFT 0 +#define lpfc_wq_doorbell_id_MASK 0xFFFF +#define lpfc_wq_doorbell_id_WORD word0 + +#define LPFC_EQCQ_DOORBELL 0x0120 +#define lpfc_eqcq_doorbell_arm_SHIFT 29 +#define lpfc_eqcq_doorbell_arm_MASK 0x0001 +#define lpfc_eqcq_doorbell_arm_WORD word0 +#define lpfc_eqcq_doorbell_num_released_SHIFT 16 +#define lpfc_eqcq_doorbell_num_released_MASK 0x1FFF +#define lpfc_eqcq_doorbell_num_released_WORD word0 +#define lpfc_eqcq_doorbell_qt_SHIFT 10 +#define lpfc_eqcq_doorbell_qt_MASK 0x0001 +#define lpfc_eqcq_doorbell_qt_WORD word0 +#define LPFC_QUEUE_TYPE_COMPLETION 0 +#define LPFC_QUEUE_TYPE_EVENT 1 +#define lpfc_eqcq_doorbell_eqci_SHIFT 9 +#define lpfc_eqcq_doorbell_eqci_MASK 0x0001 +#define lpfc_eqcq_doorbell_eqci_WORD word0 +#define lpfc_eqcq_doorbell_cqid_SHIFT 0 +#define lpfc_eqcq_doorbell_cqid_MASK 0x03FF +#define lpfc_eqcq_doorbell_cqid_WORD word0 +#define lpfc_eqcq_doorbell_eqid_SHIFT 0 +#define lpfc_eqcq_doorbell_eqid_MASK 0x01FF +#define lpfc_eqcq_doorbell_eqid_WORD word0 + +#define LPFC_BMBX 0x0160 +#define lpfc_bmbx_addr_SHIFT 2 +#define lpfc_bmbx_addr_MASK 0x3FFFFFFF +#define lpfc_bmbx_addr_WORD word0 +#define lpfc_bmbx_hi_SHIFT 1 +#define lpfc_bmbx_hi_MASK 0x0001 +#define lpfc_bmbx_hi_WORD word0 +#define lpfc_bmbx_rdy_SHIFT 0 +#define lpfc_bmbx_rdy_MASK 0x0001 +#define lpfc_bmbx_rdy_WORD word0 + +#define LPFC_MQ_DOORBELL 0x0140 +#define lpfc_mq_doorbell_num_posted_SHIFT 16 +#define lpfc_mq_doorbell_num_posted_MASK 0x3FFF +#define lpfc_mq_doorbell_num_posted_WORD word0 +#define lpfc_mq_doorbell_id_SHIFT 0 +#define lpfc_mq_doorbell_id_MASK 0x03FF +#define lpfc_mq_doorbell_id_WORD word0 + +struct lpfc_sli4_cfg_mhdr { + uint32_t word1; +#define lpfc_mbox_hdr_emb_SHIFT 0 +#define lpfc_mbox_hdr_emb_MASK 0x00000001 +#define lpfc_mbox_hdr_emb_WORD word1 +#define lpfc_mbox_hdr_sge_cnt_SHIFT 3 +#define lpfc_mbox_hdr_sge_cnt_MASK 0x0000001F +#define lpfc_mbox_hdr_sge_cnt_WORD word1 + uint32_t payload_length; + uint32_t tag_lo; + uint32_t tag_hi; + uint32_t reserved5; +}; + +union lpfc_sli4_cfg_shdr { + struct { + uint32_t word6; +#define lpfc_mbox_hdr_opcode_SHIFT 0 +#define lpfc_mbox_hdr_opcode_MASK 0x000000FF +#define lpfc_mbox_hdr_opcode_WORD word6 +#define lpfc_mbox_hdr_subsystem_SHIFT 8 +#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF +#define lpfc_mbox_hdr_subsystem_WORD word6 +#define lpfc_mbox_hdr_port_number_SHIFT 16 +#define lpfc_mbox_hdr_port_number_MASK 0x000000FF +#define lpfc_mbox_hdr_port_number_WORD word6 +#define lpfc_mbox_hdr_domain_SHIFT 24 +#define lpfc_mbox_hdr_domain_MASK 0x000000FF +#define lpfc_mbox_hdr_domain_WORD word6 + uint32_t timeout; + uint32_t request_length; + uint32_t reserved9; + } request; + struct { + uint32_t word6; +#define lpfc_mbox_hdr_opcode_SHIFT 0 +#define lpfc_mbox_hdr_opcode_MASK 0x000000FF +#define lpfc_mbox_hdr_opcode_WORD word6 +#define lpfc_mbox_hdr_subsystem_SHIFT 8 +#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF +#define lpfc_mbox_hdr_subsystem_WORD word6 +#define lpfc_mbox_hdr_domain_SHIFT 24 +#define lpfc_mbox_hdr_domain_MASK 0x000000FF +#define lpfc_mbox_hdr_domain_WORD word6 + uint32_t word7; +#define lpfc_mbox_hdr_status_SHIFT 0 +#define lpfc_mbox_hdr_status_MASK 0x000000FF +#define lpfc_mbox_hdr_status_WORD word7 +#define lpfc_mbox_hdr_add_status_SHIFT 8 +#define lpfc_mbox_hdr_add_status_MASK 0x000000FF +#define lpfc_mbox_hdr_add_status_WORD word7 + uint32_t response_length; + uint32_t actual_response_length; + } response; +}; + +/* Mailbox structures */ +struct mbox_header { + struct lpfc_sli4_cfg_mhdr cfg_mhdr; + union lpfc_sli4_cfg_shdr cfg_shdr; +}; + +/* Subsystem Definitions */ +#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1 +#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC + +/* Device Specific Definitions */ + +/* The HOST ENDIAN defines are in Big Endian format. */ +#define HOST_ENDIAN_LOW_WORD0 0xFF3412FF +#define HOST_ENDIAN_HIGH_WORD1 0xFF7856FF + +/* Common Opcodes */ +#define LPFC_MBOX_OPCODE_CQ_CREATE 0x0C +#define LPFC_MBOX_OPCODE_EQ_CREATE 0x0D +#define LPFC_MBOX_OPCODE_MQ_CREATE 0x15 +#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20 +#define LPFC_MBOX_OPCODE_NOP 0x21 +#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35 +#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36 +#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37 +#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D + +/* FCoE Opcodes */ +#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01 +#define LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY 0x02 +#define LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES 0x03 +#define LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES 0x04 +#define LPFC_MBOX_OPCODE_FCOE_RQ_CREATE 0x05 +#define LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY 0x06 +#define LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE 0x08 +#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09 +#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A +#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B + +/* Mailbox command structures */ +struct eq_context { + uint32_t word0; +#define lpfc_eq_context_size_SHIFT 31 +#define lpfc_eq_context_size_MASK 0x00000001 +#define lpfc_eq_context_size_WORD word0 +#define LPFC_EQE_SIZE_4 0x0 +#define LPFC_EQE_SIZE_16 0x1 +#define lpfc_eq_context_valid_SHIFT 29 +#define lpfc_eq_context_valid_MASK 0x00000001 +#define lpfc_eq_context_valid_WORD word0 + uint32_t word1; +#define lpfc_eq_context_count_SHIFT 26 +#define lpfc_eq_context_count_MASK 0x00000003 +#define lpfc_eq_context_count_WORD word1 +#define LPFC_EQ_CNT_256 0x0 +#define LPFC_EQ_CNT_512 0x1 +#define LPFC_EQ_CNT_1024 0x2 +#define LPFC_EQ_CNT_2048 0x3 +#define LPFC_EQ_CNT_4096 0x4 + uint32_t word2; +#define lpfc_eq_context_delay_multi_SHIFT 13 +#define lpfc_eq_context_delay_multi_MASK 0x000003FF +#define lpfc_eq_context_delay_multi_WORD word2 + uint32_t reserved3; +}; + +struct sgl_page_pairs { + uint32_t sgl_pg0_addr_lo; + uint32_t sgl_pg0_addr_hi; + uint32_t sgl_pg1_addr_lo; + uint32_t sgl_pg1_addr_hi; +}; + +struct lpfc_mbx_post_sgl_pages { + struct mbox_header header; + uint32_t word0; +#define lpfc_post_sgl_pages_xri_SHIFT 0 +#define lpfc_post_sgl_pages_xri_MASK 0x0000FFFF +#define lpfc_post_sgl_pages_xri_WORD word0 +#define lpfc_post_sgl_pages_xricnt_SHIFT 16 +#define lpfc_post_sgl_pages_xricnt_MASK 0x0000FFFF +#define lpfc_post_sgl_pages_xricnt_WORD word0 + struct sgl_page_pairs sgl_pg_pairs[1]; +}; + +/* word0 of page-1 struct shares the same SHIFT/MASK/WORD defines as above */ +struct lpfc_mbx_post_uembed_sgl_page1 { + union lpfc_sli4_cfg_shdr cfg_shdr; + uint32_t word0; + struct sgl_page_pairs sgl_pg_pairs; +}; + +struct lpfc_mbx_sge { + uint32_t pa_lo; + uint32_t pa_hi; + uint32_t length; +}; + +struct lpfc_mbx_nembed_cmd { + struct lpfc_sli4_cfg_mhdr cfg_mhdr; +#define LPFC_SLI4_MBX_SGE_MAX_PAGES 19 + struct lpfc_mbx_sge sge[LPFC_SLI4_MBX_SGE_MAX_PAGES]; +}; + +struct lpfc_mbx_nembed_sge_virt { + void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES]; +}; + +struct lpfc_mbx_eq_create { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_eq_create_num_pages_SHIFT 0 +#define lpfc_mbx_eq_create_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_eq_create_num_pages_WORD word0 + struct eq_context context; + struct dma_address page[LPFC_MAX_EQ_PAGE]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_eq_create_q_id_SHIFT 0 +#define lpfc_mbx_eq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_eq_create_q_id_WORD word0 + } response; + } u; +}; + +struct lpfc_mbx_eq_destroy { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_eq_destroy_q_id_SHIFT 0 +#define lpfc_mbx_eq_destroy_q_id_MASK 0x0000FFFF +#define lpfc_mbx_eq_destroy_q_id_WORD word0 + } request; + struct { + uint32_t word0; + } response; + } u; +}; + +struct lpfc_mbx_nop { + struct mbox_header header; + uint32_t context[2]; +}; + +struct cq_context { + uint32_t word0; +#define lpfc_cq_context_event_SHIFT 31 +#define lpfc_cq_context_event_MASK 0x00000001 +#define lpfc_cq_context_event_WORD word0 +#define lpfc_cq_context_valid_SHIFT 29 +#define lpfc_cq_context_valid_MASK 0x00000001 +#define lpfc_cq_context_valid_WORD word0 +#define lpfc_cq_context_count_SHIFT 27 +#define lpfc_cq_context_count_MASK 0x00000003 +#define lpfc_cq_context_count_WORD word0 +#define LPFC_CQ_CNT_256 0x0 +#define LPFC_CQ_CNT_512 0x1 +#define LPFC_CQ_CNT_1024 0x2 + uint32_t word1; +#define lpfc_cq_eq_id_SHIFT 22 +#define lpfc_cq_eq_id_MASK 0x000000FF +#define lpfc_cq_eq_id_WORD word1 + uint32_t reserved0; + uint32_t reserved1; +}; + +struct lpfc_mbx_cq_create { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_cq_create_num_pages_SHIFT 0 +#define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_num_pages_WORD word0 + struct cq_context context; + struct dma_address page[LPFC_MAX_CQ_PAGE]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_cq_create_q_id_SHIFT 0 +#define lpfc_mbx_cq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_q_id_WORD word0 + } response; + } u; +}; + +struct lpfc_mbx_cq_destroy { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_cq_destroy_q_id_SHIFT 0 +#define lpfc_mbx_cq_destroy_q_id_MASK 0x0000FFFF +#define lpfc_mbx_cq_destroy_q_id_WORD word0 + } request; + struct { + uint32_t word0; + } response; + } u; +}; + +struct wq_context { + uint32_t reserved0; + uint32_t reserved1; + uint32_t reserved2; + uint32_t reserved3; +}; + +struct lpfc_mbx_wq_create { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_wq_create_num_pages_SHIFT 0 +#define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_wq_create_num_pages_WORD word0 +#define lpfc_mbx_wq_create_cq_id_SHIFT 16 +#define lpfc_mbx_wq_create_cq_id_MASK 0x0000FFFF +#define lpfc_mbx_wq_create_cq_id_WORD word0 + struct dma_address page[LPFC_MAX_WQ_PAGE]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_wq_create_q_id_SHIFT 0 +#define lpfc_mbx_wq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_wq_create_q_id_WORD word0 + } response; + } u; +}; + +struct lpfc_mbx_wq_destroy { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_wq_destroy_q_id_SHIFT 0 +#define lpfc_mbx_wq_destroy_q_id_MASK 0x0000FFFF +#define lpfc_mbx_wq_destroy_q_id_WORD word0 + } request; + struct { + uint32_t word0; + } response; + } u; +}; + +#define LPFC_HDR_BUF_SIZE 128 +#define LPFC_DATA_BUF_SIZE 4096 +struct rq_context { + uint32_t word0; +#define lpfc_rq_context_rq_size_SHIFT 16 +#define lpfc_rq_context_rq_size_MASK 0x0000000F +#define lpfc_rq_context_rq_size_WORD word0 +#define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */ +#define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */ +#define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */ +#define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */ + uint32_t reserved1; + uint32_t word2; +#define lpfc_rq_context_cq_id_SHIFT 16 +#define lpfc_rq_context_cq_id_MASK 0x000003FF +#define lpfc_rq_context_cq_id_WORD word2 +#define lpfc_rq_context_buf_size_SHIFT 0 +#define lpfc_rq_context_buf_size_MASK 0x0000FFFF +#define lpfc_rq_context_buf_size_WORD word2 + uint32_t reserved3; +}; + +struct lpfc_mbx_rq_create { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_rq_create_num_pages_SHIFT 0 +#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_num_pages_WORD word0 + struct rq_context context; + struct dma_address page[LPFC_MAX_WQ_PAGE]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_rq_create_q_id_SHIFT 0 +#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_q_id_WORD word0 + } response; + } u; +}; + +struct lpfc_mbx_rq_destroy { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_rq_destroy_q_id_SHIFT 0 +#define lpfc_mbx_rq_destroy_q_id_MASK 0x0000FFFF +#define lpfc_mbx_rq_destroy_q_id_WORD word0 + } request; + struct { + uint32_t word0; + } response; + } u; +}; + +struct mq_context { + uint32_t word0; +#define lpfc_mq_context_cq_id_SHIFT 22 +#define lpfc_mq_context_cq_id_MASK 0x000003FF +#define lpfc_mq_context_cq_id_WORD word0 +#define lpfc_mq_context_count_SHIFT 16 +#define lpfc_mq_context_count_MASK 0x0000000F +#define lpfc_mq_context_count_WORD word0 +#define LPFC_MQ_CNT_16 0x5 +#define LPFC_MQ_CNT_32 0x6 +#define LPFC_MQ_CNT_64 0x7 +#define LPFC_MQ_CNT_128 0x8 + uint32_t word1; +#define lpfc_mq_context_valid_SHIFT 31 +#define lpfc_mq_context_valid_MASK 0x00000001 +#define lpfc_mq_context_valid_WORD word1 + uint32_t reserved2; + uint32_t reserved3; +}; + +struct lpfc_mbx_mq_create { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_mq_create_num_pages_SHIFT 0 +#define lpfc_mbx_mq_create_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_mq_create_num_pages_WORD word0 + struct mq_context context; + struct dma_address page[LPFC_MAX_MQ_PAGE]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_mq_create_q_id_SHIFT 0 +#define lpfc_mbx_mq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_mq_create_q_id_WORD word0 + } response; + } u; +}; + +struct lpfc_mbx_mq_destroy { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_mq_destroy_q_id_SHIFT 0 +#define lpfc_mbx_mq_destroy_q_id_MASK 0x0000FFFF +#define lpfc_mbx_mq_destroy_q_id_WORD word0 + } request; + struct { + uint32_t word0; + } response; + } u; +}; + +struct lpfc_mbx_post_hdr_tmpl { + struct mbox_header header; + uint32_t word10; +#define lpfc_mbx_post_hdr_tmpl_rpi_offset_SHIFT 0 +#define lpfc_mbx_post_hdr_tmpl_rpi_offset_MASK 0x0000FFFF +#define lpfc_mbx_post_hdr_tmpl_rpi_offset_WORD word10 +#define lpfc_mbx_post_hdr_tmpl_page_cnt_SHIFT 16 +#define lpfc_mbx_post_hdr_tmpl_page_cnt_MASK 0x0000FFFF +#define lpfc_mbx_post_hdr_tmpl_page_cnt_WORD word10 + uint32_t rpi_paddr_lo; + uint32_t rpi_paddr_hi; +}; + +struct sli4_sge { /* SLI-4 */ + uint32_t addr_hi; + uint32_t addr_lo; + + uint32_t word2; +#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/ +#define lpfc_sli4_sge_offset_MASK 0x00FFFFFF +#define lpfc_sli4_sge_offset_WORD word2 +#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets + this flag !! */ +#define lpfc_sli4_sge_last_MASK 0x00000001 +#define lpfc_sli4_sge_last_WORD word2 + uint32_t word3; +#define lpfc_sli4_sge_len_SHIFT 0 +#define lpfc_sli4_sge_len_MASK 0x0001FFFF +#define lpfc_sli4_sge_len_WORD word3 +}; + +struct fcf_record { + uint32_t max_rcv_size; + uint32_t fka_adv_period; + uint32_t fip_priority; + uint32_t word3; +#define lpfc_fcf_record_mac_0_SHIFT 0 +#define lpfc_fcf_record_mac_0_MASK 0x000000FF +#define lpfc_fcf_record_mac_0_WORD word3 +#define lpfc_fcf_record_mac_1_SHIFT 8 +#define lpfc_fcf_record_mac_1_MASK 0x000000FF +#define lpfc_fcf_record_mac_1_WORD word3 +#define lpfc_fcf_record_mac_2_SHIFT 16 +#define lpfc_fcf_record_mac_2_MASK 0x000000FF +#define lpfc_fcf_record_mac_2_WORD word3 +#define lpfc_fcf_record_mac_3_SHIFT 24 +#define lpfc_fcf_record_mac_3_MASK 0x000000FF +#define lpfc_fcf_record_mac_3_WORD word3 + uint32_t word4; +#define lpfc_fcf_record_mac_4_SHIFT 0 +#define lpfc_fcf_record_mac_4_MASK 0x000000FF +#define lpfc_fcf_record_mac_4_WORD word4 +#define lpfc_fcf_record_mac_5_SHIFT 8 +#define lpfc_fcf_record_mac_5_MASK 0x000000FF +#define lpfc_fcf_record_mac_5_WORD word4 +#define lpfc_fcf_record_fcf_avail_SHIFT 16 +#define lpfc_fcf_record_fcf_avail_MASK 0x000000FF +#define lpfc_fcf_record_fc_avail_WORD word4 +#define lpfc_fcf_record_mac_addr_prov_SHIFT 24 +#define lpfc_fcf_record_mac_addr_prov_MASK 0x000000FF +#define lpfc_fcf_record_mac_addr_prov_WORD word4 +#define LPFC_FCF_FPMA 1 /* Fabric Provided MAC Address */ +#define LPFC_FCF_SPMA 2 /* Server Provided MAC Address */ + uint32_t word5; +#define lpfc_fcf_record_fab_name_0_SHIFT 0 +#define lpfc_fcf_record_fab_name_0_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_0_WORD word5 +#define lpfc_fcf_record_fab_name_1_SHIFT 8 +#define lpfc_fcf_record_fab_name_1_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_1_WORD word5 +#define lpfc_fcf_record_fab_name_2_SHIFT 16 +#define lpfc_fcf_record_fab_name_2_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_2_WORD word5 +#define lpfc_fcf_record_fab_name_3_SHIFT 24 +#define lpfc_fcf_record_fab_name_3_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_3_WORD word5 + uint32_t word6; +#define lpfc_fcf_record_fab_name_4_SHIFT 0 +#define lpfc_fcf_record_fab_name_4_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_4_WORD word6 +#define lpfc_fcf_record_fab_name_5_SHIFT 8 +#define lpfc_fcf_record_fab_name_5_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_5_WORD word6 +#define lpfc_fcf_record_fab_name_6_SHIFT 16 +#define lpfc_fcf_record_fab_name_6_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_6_WORD word6 +#define lpfc_fcf_record_fab_name_7_SHIFT 24 +#define lpfc_fcf_record_fab_name_7_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_7_WORD word6 + uint32_t word7; +#define lpfc_fcf_record_fc_map_0_SHIFT 0 +#define lpfc_fcf_record_fc_map_0_MASK 0x000000FF +#define lpfc_fcf_record_fc_map_0_WORD word7 +#define lpfc_fcf_record_fc_map_1_SHIFT 8 +#define lpfc_fcf_record_fc_map_1_MASK 0x000000FF +#define lpfc_fcf_record_fc_map_1_WORD word7 +#define lpfc_fcf_record_fc_map_2_SHIFT 16 +#define lpfc_fcf_record_fc_map_2_MASK 0x000000FF +#define lpfc_fcf_record_fc_map_2_WORD word7 +#define lpfc_fcf_record_fcf_valid_SHIFT 24 +#define lpfc_fcf_record_fcf_valid_MASK 0x000000FF +#define lpfc_fcf_record_fcf_valid_WORD word7 + uint32_t word8; +#define lpfc_fcf_record_fcf_index_SHIFT 0 +#define lpfc_fcf_record_fcf_index_MASK 0x0000FFFF +#define lpfc_fcf_record_fcf_index_WORD word8 +#define lpfc_fcf_record_fcf_state_SHIFT 16 +#define lpfc_fcf_record_fcf_state_MASK 0x0000FFFF +#define lpfc_fcf_record_fcf_state_WORD word8 + uint8_t vlan_bitmap[512]; +}; + +struct lpfc_mbx_read_fcf_tbl { + union lpfc_sli4_cfg_shdr cfg_shdr; + union { + struct { + uint32_t word10; +#define lpfc_mbx_read_fcf_tbl_indx_SHIFT 0 +#define lpfc_mbx_read_fcf_tbl_indx_MASK 0x0000FFFF +#define lpfc_mbx_read_fcf_tbl_indx_WORD word10 + } request; + struct { + uint32_t eventag; + } response; + } u; + uint32_t word11; +#define lpfc_mbx_read_fcf_tbl_nxt_vindx_SHIFT 0 +#define lpfc_mbx_read_fcf_tbl_nxt_vindx_MASK 0x0000FFFF +#define lpfc_mbx_read_fcf_tbl_nxt_vindx_WORD word11 +}; + +struct lpfc_mbx_add_fcf_tbl_entry { + union lpfc_sli4_cfg_shdr cfg_shdr; + uint32_t word10; +#define lpfc_mbx_add_fcf_tbl_fcfi_SHIFT 0 +#define lpfc_mbx_add_fcf_tbl_fcfi_MASK 0x0000FFFF +#define lpfc_mbx_add_fcf_tbl_fcfi_WORD word10 + struct lpfc_mbx_sge fcf_sge; +}; + +struct lpfc_mbx_del_fcf_tbl_entry { + struct mbox_header header; + uint32_t word10; +#define lpfc_mbx_del_fcf_tbl_count_SHIFT 0 +#define lpfc_mbx_del_fcf_tbl_count_MASK 0x0000FFFF +#define lpfc_mbx_del_fcf_tbl_count_WORD word10 +#define lpfc_mbx_del_fcf_tbl_index_SHIFT 16 +#define lpfc_mbx_del_fcf_tbl_index_MASK 0x0000FFFF +#define lpfc_mbx_del_fcf_tbl_index_WORD word10 +}; + +/* Status field for embedded SLI_CONFIG mailbox command */ +#define STATUS_SUCCESS 0x0 +#define STATUS_FAILED 0x1 +#define STATUS_ILLEGAL_REQUEST 0x2 +#define STATUS_ILLEGAL_FIELD 0x3 +#define STATUS_INSUFFICIENT_BUFFER 0x4 +#define STATUS_UNAUTHORIZED_REQUEST 0x5 +#define STATUS_FLASHROM_SAVE_FAILED 0x17 +#define STATUS_FLASHROM_RESTORE_FAILED 0x18 +#define STATUS_ICCBINDEX_ALLOC_FAILED 0x1a +#define STATUS_IOCTLHANDLE_ALLOC_FAILED 0x1b +#define STATUS_INVALID_PHY_ADDR_FROM_OSM 0x1c +#define STATUS_INVALID_PHY_ADDR_LEN_FROM_OSM 0x1d +#define STATUS_ASSERT_FAILED 0x1e +#define STATUS_INVALID_SESSION 0x1f +#define STATUS_INVALID_CONNECTION 0x20 +#define STATUS_BTL_PATH_EXCEEDS_OSM_LIMIT 0x21 +#define STATUS_BTL_NO_FREE_SLOT_PATH 0x24 +#define STATUS_BTL_NO_FREE_SLOT_TGTID 0x25 +#define STATUS_OSM_DEVSLOT_NOT_FOUND 0x26 +#define STATUS_FLASHROM_READ_FAILED 0x27 +#define STATUS_POLL_IOCTL_TIMEOUT 0x28 +#define STATUS_ERROR_ACITMAIN 0x2a +#define STATUS_REBOOT_REQUIRED 0x2c +#define STATUS_FCF_IN_USE 0x3a + +struct lpfc_mbx_sli4_config { + struct mbox_header header; +}; + +struct lpfc_mbx_init_vfi { + uint32_t word1; +#define lpfc_init_vfi_vr_SHIFT 31 +#define lpfc_init_vfi_vr_MASK 0x00000001 +#define lpfc_init_vfi_vr_WORD word1 +#define lpfc_init_vfi_vt_SHIFT 30 +#define lpfc_init_vfi_vt_MASK 0x00000001 +#define lpfc_init_vfi_vt_WORD word1 +#define lpfc_init_vfi_vf_SHIFT 29 +#define lpfc_init_vfi_vf_MASK 0x00000001 +#define lpfc_init_vfi_vf_WORD word1 +#define lpfc_init_vfi_vfi_SHIFT 0 +#define lpfc_init_vfi_vfi_MASK 0x0000FFFF +#define lpfc_init_vfi_vfi_WORD word1 + uint32_t word2; +#define lpfc_init_vfi_fcfi_SHIFT 0 +#define lpfc_init_vfi_fcfi_MASK 0x0000FFFF +#define lpfc_init_vfi_fcfi_WORD word2 + uint32_t word3; +#define lpfc_init_vfi_pri_SHIFT 13 +#define lpfc_init_vfi_pri_MASK 0x00000007 +#define lpfc_init_vfi_pri_WORD word3 +#define lpfc_init_vfi_vf_id_SHIFT 1 +#define lpfc_init_vfi_vf_id_MASK 0x00000FFF +#define lpfc_init_vfi_vf_id_WORD word3 + uint32_t word4; +#define lpfc_init_vfi_hop_count_SHIFT 24 +#define lpfc_init_vfi_hop_count_MASK 0x000000FF +#define lpfc_init_vfi_hop_count_WORD word4 +}; + +struct lpfc_mbx_reg_vfi { + uint32_t word1; +#define lpfc_reg_vfi_vp_SHIFT 28 +#define lpfc_reg_vfi_vp_MASK 0x00000001 +#define lpfc_reg_vfi_vp_WORD word1 +#define lpfc_reg_vfi_vfi_SHIFT 0 +#define lpfc_reg_vfi_vfi_MASK 0x0000FFFF +#define lpfc_reg_vfi_vfi_WORD word1 + uint32_t word2; +#define lpfc_reg_vfi_vpi_SHIFT 16 +#define lpfc_reg_vfi_vpi_MASK 0x0000FFFF +#define lpfc_reg_vfi_vpi_WORD word2 +#define lpfc_reg_vfi_fcfi_SHIFT 0 +#define lpfc_reg_vfi_fcfi_MASK 0x0000FFFF +#define lpfc_reg_vfi_fcfi_WORD word2 + uint32_t word3_rsvd; + uint32_t word4_rsvd; + struct ulp_bde64 bde; + uint32_t word8_rsvd; + uint32_t word9_rsvd; + uint32_t word10; +#define lpfc_reg_vfi_nport_id_SHIFT 0 +#define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF +#define lpfc_reg_vfi_nport_id_WORD word10 +}; + +struct lpfc_mbx_init_vpi { + uint32_t word1; +#define lpfc_init_vpi_vfi_SHIFT 16 +#define lpfc_init_vpi_vfi_MASK 0x0000FFFF +#define lpfc_init_vpi_vfi_WORD word1 +#define lpfc_init_vpi_vpi_SHIFT 0 +#define lpfc_init_vpi_vpi_MASK 0x0000FFFF +#define lpfc_init_vpi_vpi_WORD word1 +}; + +struct lpfc_mbx_read_vpi { + uint32_t word1_rsvd; + uint32_t word2; +#define lpfc_mbx_read_vpi_vnportid_SHIFT 0 +#define lpfc_mbx_read_vpi_vnportid_MASK 0x00FFFFFF +#define lpfc_mbx_read_vpi_vnportid_WORD word2 + uint32_t word3_rsvd; + uint32_t word4; +#define lpfc_mbx_read_vpi_acq_alpa_SHIFT 0 +#define lpfc_mbx_read_vpi_acq_alpa_MASK 0x000000FF +#define lpfc_mbx_read_vpi_acq_alpa_WORD word4 +#define lpfc_mbx_read_vpi_pb_SHIFT 15 +#define lpfc_mbx_read_vpi_pb_MASK 0x00000001 +#define lpfc_mbx_read_vpi_pb_WORD word4 +#define lpfc_mbx_read_vpi_spec_alpa_SHIFT 16 +#define lpfc_mbx_read_vpi_spec_alpa_MASK 0x000000FF +#define lpfc_mbx_read_vpi_spec_alpa_WORD word4 +#define lpfc_mbx_read_vpi_ns_SHIFT 30 +#define lpfc_mbx_read_vpi_ns_MASK 0x00000001 +#define lpfc_mbx_read_vpi_ns_WORD word4 +#define lpfc_mbx_read_vpi_hl_SHIFT 31 +#define lpfc_mbx_read_vpi_hl_MASK 0x00000001 +#define lpfc_mbx_read_vpi_hl_WORD word4 + uint32_t word5_rsvd; + uint32_t word6; +#define lpfc_mbx_read_vpi_vpi_SHIFT 0 +#define lpfc_mbx_read_vpi_vpi_MASK 0x0000FFFF +#define lpfc_mbx_read_vpi_vpi_WORD word6 + uint32_t word7; +#define lpfc_mbx_read_vpi_mac_0_SHIFT 0 +#define lpfc_mbx_read_vpi_mac_0_MASK 0x000000FF +#define lpfc_mbx_read_vpi_mac_0_WORD word7 +#define lpfc_mbx_read_vpi_mac_1_SHIFT 8 +#define lpfc_mbx_read_vpi_mac_1_MASK 0x000000FF +#define lpfc_mbx_read_vpi_mac_1_WORD word7 +#define lpfc_mbx_read_vpi_mac_2_SHIFT 16 +#define lpfc_mbx_read_vpi_mac_2_MASK 0x000000FF +#define lpfc_mbx_read_vpi_mac_2_WORD word7 +#define lpfc_mbx_read_vpi_mac_3_SHIFT 24 +#define lpfc_mbx_read_vpi_mac_3_MASK 0x000000FF +#define lpfc_mbx_read_vpi_mac_3_WORD word7 + uint32_t word8; +#define lpfc_mbx_read_vpi_mac_4_SHIFT 0 +#define lpfc_mbx_read_vpi_mac_4_MASK 0x000000FF +#define lpfc_mbx_read_vpi_mac_4_WORD word8 +#define lpfc_mbx_read_vpi_mac_5_SHIFT 8 +#define lpfc_mbx_read_vpi_mac_5_MASK 0x000000FF +#define lpfc_mbx_read_vpi_mac_5_WORD word8 +#define lpfc_mbx_read_vpi_vlan_tag_SHIFT 16 +#define lpfc_mbx_read_vpi_vlan_tag_MASK 0x00000FFF +#define lpfc_mbx_read_vpi_vlan_tag_WORD word8 +#define lpfc_mbx_read_vpi_vv_SHIFT 28 +#define lpfc_mbx_read_vpi_vv_MASK 0x0000001 +#define lpfc_mbx_read_vpi_vv_WORD word8 +}; + +struct lpfc_mbx_unreg_vfi { + uint32_t word1_rsvd; + uint32_t word2; +#define lpfc_unreg_vfi_vfi_SHIFT 0 +#define lpfc_unreg_vfi_vfi_MASK 0x0000FFFF +#define lpfc_unreg_vfi_vfi_WORD word2 +}; + +struct lpfc_mbx_resume_rpi { + uint32_t word1; +#define lpfc_resume_rpi_rpi_SHIFT 0 +#define lpfc_resume_rpi_rpi_MASK 0x0000FFFF +#define lpfc_resume_rpi_rpi_WORD word1 + uint32_t event_tag; + uint32_t word3_rsvd; + uint32_t word4_rsvd; + uint32_t word5_rsvd; + uint32_t word6; +#define lpfc_resume_rpi_vpi_SHIFT 0 +#define lpfc_resume_rpi_vpi_MASK 0x0000FFFF +#define lpfc_resume_rpi_vpi_WORD word6 +#define lpfc_resume_rpi_vfi_SHIFT 16 +#define lpfc_resume_rpi_vfi_MASK 0x0000FFFF +#define lpfc_resume_rpi_vfi_WORD word6 +}; + +#define REG_FCF_INVALID_QID 0xFFFF +struct lpfc_mbx_reg_fcfi { + uint32_t word1; +#define lpfc_reg_fcfi_info_index_SHIFT 0 +#define lpfc_reg_fcfi_info_index_MASK 0x0000FFFF +#define lpfc_reg_fcfi_info_index_WORD word1 +#define lpfc_reg_fcfi_fcfi_SHIFT 16 +#define lpfc_reg_fcfi_fcfi_MASK 0x0000FFFF +#define lpfc_reg_fcfi_fcfi_WORD word1 + uint32_t word2; +#define lpfc_reg_fcfi_rq_id1_SHIFT 0 +#define lpfc_reg_fcfi_rq_id1_MASK 0x0000FFFF +#define lpfc_reg_fcfi_rq_id1_WORD word2 +#define lpfc_reg_fcfi_rq_id0_SHIFT 16 +#define lpfc_reg_fcfi_rq_id0_MASK 0x0000FFFF +#define lpfc_reg_fcfi_rq_id0_WORD word2 + uint32_t word3; +#define lpfc_reg_fcfi_rq_id3_SHIFT 0 +#define lpfc_reg_fcfi_rq_id3_MASK 0x0000FFFF +#define lpfc_reg_fcfi_rq_id3_WORD word3 +#define lpfc_reg_fcfi_rq_id2_SHIFT 16 +#define lpfc_reg_fcfi_rq_id2_MASK 0x0000FFFF +#define lpfc_reg_fcfi_rq_id2_WORD word3 + uint32_t word4; +#define lpfc_reg_fcfi_type_match0_SHIFT 24 +#define lpfc_reg_fcfi_type_match0_MASK 0x000000FF +#define lpfc_reg_fcfi_type_match0_WORD word4 +#define lpfc_reg_fcfi_type_mask0_SHIFT 16 +#define lpfc_reg_fcfi_type_mask0_MASK 0x000000FF +#define lpfc_reg_fcfi_type_mask0_WORD word4 +#define lpfc_reg_fcfi_rctl_match0_SHIFT 8 +#define lpfc_reg_fcfi_rctl_match0_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_match0_WORD word4 +#define lpfc_reg_fcfi_rctl_mask0_SHIFT 0 +#define lpfc_reg_fcfi_rctl_mask0_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_mask0_WORD word4 + uint32_t word5; +#define lpfc_reg_fcfi_type_match1_SHIFT 24 +#define lpfc_reg_fcfi_type_match1_MASK 0x000000FF +#define lpfc_reg_fcfi_type_match1_WORD word5 +#define lpfc_reg_fcfi_type_mask1_SHIFT 16 +#define lpfc_reg_fcfi_type_mask1_MASK 0x000000FF +#define lpfc_reg_fcfi_type_mask1_WORD word5 +#define lpfc_reg_fcfi_rctl_match1_SHIFT 8 +#define lpfc_reg_fcfi_rctl_match1_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_match1_WORD word5 +#define lpfc_reg_fcfi_rctl_mask1_SHIFT 0 +#define lpfc_reg_fcfi_rctl_mask1_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_mask1_WORD word5 + uint32_t word6; +#define lpfc_reg_fcfi_type_match2_SHIFT 24 +#define lpfc_reg_fcfi_type_match2_MASK 0x000000FF +#define lpfc_reg_fcfi_type_match2_WORD word6 +#define lpfc_reg_fcfi_type_mask2_SHIFT 16 +#define lpfc_reg_fcfi_type_mask2_MASK 0x000000FF +#define lpfc_reg_fcfi_type_mask2_WORD word6 +#define lpfc_reg_fcfi_rctl_match2_SHIFT 8 +#define lpfc_reg_fcfi_rctl_match2_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_match2_WORD word6 +#define lpfc_reg_fcfi_rctl_mask2_SHIFT 0 +#define lpfc_reg_fcfi_rctl_mask2_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_mask2_WORD word6 + uint32_t word7; +#define lpfc_reg_fcfi_type_match3_SHIFT 24 +#define lpfc_reg_fcfi_type_match3_MASK 0x000000FF +#define lpfc_reg_fcfi_type_match3_WORD word7 +#define lpfc_reg_fcfi_type_mask3_SHIFT 16 +#define lpfc_reg_fcfi_type_mask3_MASK 0x000000FF +#define lpfc_reg_fcfi_type_mask3_WORD word7 +#define lpfc_reg_fcfi_rctl_match3_SHIFT 8 +#define lpfc_reg_fcfi_rctl_match3_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_match3_WORD word7 +#define lpfc_reg_fcfi_rctl_mask3_SHIFT 0 +#define lpfc_reg_fcfi_rctl_mask3_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_mask3_WORD word7 + uint32_t word8; +#define lpfc_reg_fcfi_mam_SHIFT 13 +#define lpfc_reg_fcfi_mam_MASK 0x00000003 +#define lpfc_reg_fcfi_mam_WORD word8 +#define LPFC_MAM_BOTH 0 /* Both SPMA and FPMA */ +#define LPFC_MAM_SPMA 1 /* Server Provided MAC Address */ +#define LPFC_MAM_FPMA 2 /* Fabric Provided MAC Address */ +#define lpfc_reg_fcfi_vv_SHIFT 12 +#define lpfc_reg_fcfi_vv_MASK 0x00000001 +#define lpfc_reg_fcfi_vv_WORD word8 +#define lpfc_reg_fcfi_vlan_tag_SHIFT 0 +#define lpfc_reg_fcfi_vlan_tag_MASK 0x00000FFF +#define lpfc_reg_fcfi_vlan_tag_WORD word8 +}; + +struct lpfc_mbx_unreg_fcfi { + uint32_t word1_rsv; + uint32_t word2; +#define lpfc_unreg_fcfi_SHIFT 0 +#define lpfc_unreg_fcfi_MASK 0x0000FFFF +#define lpfc_unreg_fcfi_WORD word2 +}; + +struct lpfc_mbx_read_rev { + uint32_t word1; +#define lpfc_mbx_rd_rev_sli_lvl_SHIFT 16 +#define lpfc_mbx_rd_rev_sli_lvl_MASK 0x0000000F +#define lpfc_mbx_rd_rev_sli_lvl_WORD word1 +#define lpfc_mbx_rd_rev_fcoe_SHIFT 20 +#define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001 +#define lpfc_mbx_rd_rev_fcoe_WORD word1 +#define lpfc_mbx_rd_rev_vpd_SHIFT 29 +#define lpfc_mbx_rd_rev_vpd_MASK 0x00000001 +#define lpfc_mbx_rd_rev_vpd_WORD word1 + uint32_t first_hw_rev; + uint32_t second_hw_rev; + uint32_t word4_rsvd; + uint32_t third_hw_rev; + uint32_t word6; +#define lpfc_mbx_rd_rev_fcph_low_SHIFT 0 +#define lpfc_mbx_rd_rev_fcph_low_MASK 0x000000FF +#define lpfc_mbx_rd_rev_fcph_low_WORD word6 +#define lpfc_mbx_rd_rev_fcph_high_SHIFT 8 +#define lpfc_mbx_rd_rev_fcph_high_MASK 0x000000FF +#define lpfc_mbx_rd_rev_fcph_high_WORD word6 +#define lpfc_mbx_rd_rev_ftr_lvl_low_SHIFT 16 +#define lpfc_mbx_rd_rev_ftr_lvl_low_MASK 0x000000FF +#define lpfc_mbx_rd_rev_ftr_lvl_low_WORD word6 +#define lpfc_mbx_rd_rev_ftr_lvl_high_SHIFT 24 +#define lpfc_mbx_rd_rev_ftr_lvl_high_MASK 0x000000FF +#define lpfc_mbx_rd_rev_ftr_lvl_high_WORD word6 + uint32_t word7_rsvd; + uint32_t fw_id_rev; + uint8_t fw_name[16]; + uint32_t ulp_fw_id_rev; + uint8_t ulp_fw_name[16]; + uint32_t word18_47_rsvd[30]; + uint32_t word48; +#define lpfc_mbx_rd_rev_avail_len_SHIFT 0 +#define lpfc_mbx_rd_rev_avail_len_MASK 0x00FFFFFF +#define lpfc_mbx_rd_rev_avail_len_WORD word48 + uint32_t vpd_paddr_low; + uint32_t vpd_paddr_high; + uint32_t avail_vpd_len; + uint32_t rsvd_52_63[12]; +}; + +struct lpfc_mbx_read_config { + uint32_t word1; +#define lpfc_mbx_rd_conf_max_bbc_SHIFT 0 +#define lpfc_mbx_rd_conf_max_bbc_MASK 0x000000FF +#define lpfc_mbx_rd_conf_max_bbc_WORD word1 +#define lpfc_mbx_rd_conf_init_bbc_SHIFT 8 +#define lpfc_mbx_rd_conf_init_bbc_MASK 0x000000FF +#define lpfc_mbx_rd_conf_init_bbc_WORD word1 + uint32_t word2; +#define lpfc_mbx_rd_conf_nport_did_SHIFT 0 +#define lpfc_mbx_rd_conf_nport_did_MASK 0x00FFFFFF +#define lpfc_mbx_rd_conf_nport_did_WORD word2 +#define lpfc_mbx_rd_conf_topology_SHIFT 24 +#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF +#define lpfc_mbx_rd_conf_topology_WORD word2 + uint32_t word3; +#define lpfc_mbx_rd_conf_ao_SHIFT 0 +#define lpfc_mbx_rd_conf_ao_MASK 0x00000001 +#define lpfc_mbx_rd_conf_ao_WORD word3 +#define lpfc_mbx_rd_conf_bb_scn_SHIFT 8 +#define lpfc_mbx_rd_conf_bb_scn_MASK 0x0000000F +#define lpfc_mbx_rd_conf_bb_scn_WORD word3 +#define lpfc_mbx_rd_conf_cbb_scn_SHIFT 12 +#define lpfc_mbx_rd_conf_cbb_scn_MASK 0x0000000F +#define lpfc_mbx_rd_conf_cbb_scn_WORD word3 +#define lpfc_mbx_rd_conf_mc_SHIFT 29 +#define lpfc_mbx_rd_conf_mc_MASK 0x00000001 +#define lpfc_mbx_rd_conf_mc_WORD word3 + uint32_t word4; +#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0 +#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_e_d_tov_WORD word4 + uint32_t word5; +#define lpfc_mbx_rd_conf_lp_tov_SHIFT 0 +#define lpfc_mbx_rd_conf_lp_tov_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_lp_tov_WORD word5 + uint32_t word6; +#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0 +#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_r_a_tov_WORD word6 + uint32_t word7; +#define lpfc_mbx_rd_conf_r_t_tov_SHIFT 0 +#define lpfc_mbx_rd_conf_r_t_tov_MASK 0x000000FF +#define lpfc_mbx_rd_conf_r_t_tov_WORD word7 + uint32_t word8; +#define lpfc_mbx_rd_conf_al_tov_SHIFT 0 +#define lpfc_mbx_rd_conf_al_tov_MASK 0x0000000F +#define lpfc_mbx_rd_conf_al_tov_WORD word8 + uint32_t word9; +#define lpfc_mbx_rd_conf_lmt_SHIFT 0 +#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_lmt_WORD word9 + uint32_t word10; +#define lpfc_mbx_rd_conf_max_alpa_SHIFT 0 +#define lpfc_mbx_rd_conf_max_alpa_MASK 0x000000FF +#define lpfc_mbx_rd_conf_max_alpa_WORD word10 + uint32_t word11_rsvd; + uint32_t word12; +#define lpfc_mbx_rd_conf_xri_base_SHIFT 0 +#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_xri_base_WORD word12 +#define lpfc_mbx_rd_conf_xri_count_SHIFT 16 +#define lpfc_mbx_rd_conf_xri_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_xri_count_WORD word12 + uint32_t word13; +#define lpfc_mbx_rd_conf_rpi_base_SHIFT 0 +#define lpfc_mbx_rd_conf_rpi_base_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_rpi_base_WORD word13 +#define lpfc_mbx_rd_conf_rpi_count_SHIFT 16 +#define lpfc_mbx_rd_conf_rpi_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_rpi_count_WORD word13 + uint32_t word14; +#define lpfc_mbx_rd_conf_vpi_base_SHIFT 0 +#define lpfc_mbx_rd_conf_vpi_base_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_vpi_base_WORD word14 +#define lpfc_mbx_rd_conf_vpi_count_SHIFT 16 +#define lpfc_mbx_rd_conf_vpi_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_vpi_count_WORD word14 + uint32_t word15; +#define lpfc_mbx_rd_conf_vfi_base_SHIFT 0 +#define lpfc_mbx_rd_conf_vfi_base_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_vfi_base_WORD word15 +#define lpfc_mbx_rd_conf_vfi_count_SHIFT 16 +#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_vfi_count_WORD word15 + uint32_t word16; +#define lpfc_mbx_rd_conf_fcfi_base_SHIFT 0 +#define lpfc_mbx_rd_conf_fcfi_base_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_fcfi_base_WORD word16 +#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16 +#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_fcfi_count_WORD word16 + uint32_t word17; +#define lpfc_mbx_rd_conf_rq_count_SHIFT 0 +#define lpfc_mbx_rd_conf_rq_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_rq_count_WORD word17 +#define lpfc_mbx_rd_conf_eq_count_SHIFT 16 +#define lpfc_mbx_rd_conf_eq_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_eq_count_WORD word17 + uint32_t word18; +#define lpfc_mbx_rd_conf_wq_count_SHIFT 0 +#define lpfc_mbx_rd_conf_wq_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_wq_count_WORD word18 +#define lpfc_mbx_rd_conf_cq_count_SHIFT 16 +#define lpfc_mbx_rd_conf_cq_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_cq_count_WORD word18 +}; + +struct lpfc_mbx_request_features { + uint32_t word1; +#define lpfc_mbx_rq_ftr_qry_SHIFT 0 +#define lpfc_mbx_rq_ftr_qry_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_qry_WORD word1 + uint32_t word2; +#define lpfc_mbx_rq_ftr_rq_iaab_SHIFT 0 +#define lpfc_mbx_rq_ftr_rq_iaab_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_iaab_WORD word2 +#define lpfc_mbx_rq_ftr_rq_npiv_SHIFT 1 +#define lpfc_mbx_rq_ftr_rq_npiv_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_npiv_WORD word2 +#define lpfc_mbx_rq_ftr_rq_dif_SHIFT 2 +#define lpfc_mbx_rq_ftr_rq_dif_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_dif_WORD word2 +#define lpfc_mbx_rq_ftr_rq_vf_SHIFT 3 +#define lpfc_mbx_rq_ftr_rq_vf_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_vf_WORD word2 +#define lpfc_mbx_rq_ftr_rq_fcpi_SHIFT 4 +#define lpfc_mbx_rq_ftr_rq_fcpi_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_fcpi_WORD word2 +#define lpfc_mbx_rq_ftr_rq_fcpt_SHIFT 5 +#define lpfc_mbx_rq_ftr_rq_fcpt_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_fcpt_WORD word2 +#define lpfc_mbx_rq_ftr_rq_fcpc_SHIFT 6 +#define lpfc_mbx_rq_ftr_rq_fcpc_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_fcpc_WORD word2 +#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7 +#define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_ifip_WORD word2 + uint32_t word3; +#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0 +#define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_iaab_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_npiv_SHIFT 1 +#define lpfc_mbx_rq_ftr_rsp_npiv_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_npiv_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_dif_SHIFT 2 +#define lpfc_mbx_rq_ftr_rsp_dif_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_dif_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_vf_SHIFT 3 +#define lpfc_mbx_rq_ftr_rsp_vf__MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_vf_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_fcpi_SHIFT 4 +#define lpfc_mbx_rq_ftr_rsp_fcpi_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_fcpi_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_fcpt_SHIFT 5 +#define lpfc_mbx_rq_ftr_rsp_fcpt_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_fcpt_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_fcpc_SHIFT 6 +#define lpfc_mbx_rq_ftr_rsp_fcpc_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_fcpc_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7 +#define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3 +}; + +/* Mailbox Completion Queue Error Messages */ +#define MB_CQE_STATUS_SUCCESS 0x0 +#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1 +#define MB_CQE_STATUS_INVALID_PARAMETER 0x2 +#define MB_CQE_STATUS_INSUFFICIENT_RESOURCES 0x3 +#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4 +#define MB_CQE_STATUS_DMA_FAILED 0x5 + +/* mailbox queue entry structure */ +struct lpfc_mqe { + uint32_t word0; +#define lpfc_mqe_status_SHIFT 16 +#define lpfc_mqe_status_MASK 0x0000FFFF +#define lpfc_mqe_status_WORD word0 +#define lpfc_mqe_command_SHIFT 8 +#define lpfc_mqe_command_MASK 0x000000FF +#define lpfc_mqe_command_WORD word0 + union { + uint32_t mb_words[LPFC_SLI4_MB_WORD_COUNT - 1]; + /* sli4 mailbox commands */ + struct lpfc_mbx_sli4_config sli4_config; + struct lpfc_mbx_init_vfi init_vfi; + struct lpfc_mbx_reg_vfi reg_vfi; + struct lpfc_mbx_reg_vfi unreg_vfi; + struct lpfc_mbx_init_vpi init_vpi; + struct lpfc_mbx_resume_rpi resume_rpi; + struct lpfc_mbx_read_fcf_tbl read_fcf_tbl; + struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry; + struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry; + struct lpfc_mbx_reg_fcfi reg_fcfi; + struct lpfc_mbx_unreg_fcfi unreg_fcfi; + struct lpfc_mbx_mq_create mq_create; + struct lpfc_mbx_eq_create eq_create; + struct lpfc_mbx_cq_create cq_create; + struct lpfc_mbx_wq_create wq_create; + struct lpfc_mbx_rq_create rq_create; + struct lpfc_mbx_mq_destroy mq_destroy; + struct lpfc_mbx_eq_destroy eq_destroy; + struct lpfc_mbx_cq_destroy cq_destroy; + struct lpfc_mbx_wq_destroy wq_destroy; + struct lpfc_mbx_rq_destroy rq_destroy; + struct lpfc_mbx_post_sgl_pages post_sgl_pages; + struct lpfc_mbx_nembed_cmd nembed_cmd; + struct lpfc_mbx_read_rev read_rev; + struct lpfc_mbx_read_vpi read_vpi; + struct lpfc_mbx_read_config rd_config; + struct lpfc_mbx_request_features req_ftrs; + struct lpfc_mbx_post_hdr_tmpl hdr_tmpl; + struct lpfc_mbx_nop nop; + } un; +}; + +struct lpfc_mcqe { + uint32_t word0; +#define lpfc_mcqe_status_SHIFT 0 +#define lpfc_mcqe_status_MASK 0x0000FFFF +#define lpfc_mcqe_status_WORD word0 +#define lpfc_mcqe_ext_status_SHIFT 16 +#define lpfc_mcqe_ext_status_MASK 0x0000FFFF +#define lpfc_mcqe_ext_status_WORD word0 + uint32_t mcqe_tag0; + uint32_t mcqe_tag1; + uint32_t trailer; +#define lpfc_trailer_valid_SHIFT 31 +#define lpfc_trailer_valid_MASK 0x00000001 +#define lpfc_trailer_valid_WORD trailer +#define lpfc_trailer_async_SHIFT 30 +#define lpfc_trailer_async_MASK 0x00000001 +#define lpfc_trailer_async_WORD trailer +#define lpfc_trailer_hpi_SHIFT 29 +#define lpfc_trailer_hpi_MASK 0x00000001 +#define lpfc_trailer_hpi_WORD trailer +#define lpfc_trailer_completed_SHIFT 28 +#define lpfc_trailer_completed_MASK 0x00000001 +#define lpfc_trailer_completed_WORD trailer +#define lpfc_trailer_consumed_SHIFT 27 +#define lpfc_trailer_consumed_MASK 0x00000001 +#define lpfc_trailer_consumed_WORD trailer +#define lpfc_trailer_type_SHIFT 16 +#define lpfc_trailer_type_MASK 0x000000FF +#define lpfc_trailer_type_WORD trailer +#define lpfc_trailer_code_SHIFT 8 +#define lpfc_trailer_code_MASK 0x000000FF +#define lpfc_trailer_code_WORD trailer +#define LPFC_TRAILER_CODE_LINK 0x1 +#define LPFC_TRAILER_CODE_FCOE 0x2 +#define LPFC_TRAILER_CODE_DCBX 0x3 +}; + +struct lpfc_acqe_link { + uint32_t word0; +#define lpfc_acqe_link_speed_SHIFT 24 +#define lpfc_acqe_link_speed_MASK 0x000000FF +#define lpfc_acqe_link_speed_WORD word0 +#define LPFC_ASYNC_LINK_SPEED_ZERO 0x0 +#define LPFC_ASYNC_LINK_SPEED_10MBPS 0x1 +#define LPFC_ASYNC_LINK_SPEED_100MBPS 0x2 +#define LPFC_ASYNC_LINK_SPEED_1GBPS 0x3 +#define LPFC_ASYNC_LINK_SPEED_10GBPS 0x4 +#define lpfc_acqe_link_duplex_SHIFT 16 +#define lpfc_acqe_link_duplex_MASK 0x000000FF +#define lpfc_acqe_link_duplex_WORD word0 +#define LPFC_ASYNC_LINK_DUPLEX_NONE 0x0 +#define LPFC_ASYNC_LINK_DUPLEX_HALF 0x1 +#define LPFC_ASYNC_LINK_DUPLEX_FULL 0x2 +#define lpfc_acqe_link_status_SHIFT 8 +#define lpfc_acqe_link_status_MASK 0x000000FF +#define lpfc_acqe_link_status_WORD word0 +#define LPFC_ASYNC_LINK_STATUS_DOWN 0x0 +#define LPFC_ASYNC_LINK_STATUS_UP 0x1 +#define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN 0x2 +#define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP 0x3 +#define lpfc_acqe_link_physical_SHIFT 0 +#define lpfc_acqe_link_physical_MASK 0x000000FF +#define lpfc_acqe_link_physical_WORD word0 +#define LPFC_ASYNC_LINK_PORT_A 0x0 +#define LPFC_ASYNC_LINK_PORT_B 0x1 + uint32_t word1; +#define lpfc_acqe_link_fault_SHIFT 0 +#define lpfc_acqe_link_fault_MASK 0x000000FF +#define lpfc_acqe_link_fault_WORD word1 +#define LPFC_ASYNC_LINK_FAULT_NONE 0x0 +#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1 +#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2 + uint32_t event_tag; + uint32_t trailer; +}; + +struct lpfc_acqe_fcoe { + uint32_t fcf_index; + uint32_t word1; +#define lpfc_acqe_fcoe_fcf_count_SHIFT 0 +#define lpfc_acqe_fcoe_fcf_count_MASK 0x0000FFFF +#define lpfc_acqe_fcoe_fcf_count_WORD word1 +#define lpfc_acqe_fcoe_event_type_SHIFT 16 +#define lpfc_acqe_fcoe_event_type_MASK 0x0000FFFF +#define lpfc_acqe_fcoe_event_type_WORD word1 +#define LPFC_FCOE_EVENT_TYPE_NEW_FCF 0x1 +#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2 +#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3 + uint32_t event_tag; + uint32_t trailer; +}; + +struct lpfc_acqe_dcbx { + uint32_t tlv_ttl; + uint32_t reserved; + uint32_t event_tag; + uint32_t trailer; +}; + +/* + * Define the bootstrap mailbox (bmbx) region used to communicate + * mailbox command between the host and port. The mailbox consists + * of a payload area of 256 bytes and a completion queue of length + * 16 bytes. + */ +struct lpfc_bmbx_create { + struct lpfc_mqe mqe; + struct lpfc_mcqe mcqe; +}; + +#define SGL_ALIGN_SZ 64 +#define SGL_PAGE_SIZE 4096 +/* align SGL addr on a size boundary - adjust address up */ +#define NO_XRI ((uint16_t)-1) +struct wqe_common { + uint32_t word6; +#define wqe_xri_SHIFT 0 +#define wqe_xri_MASK 0x0000FFFF +#define wqe_xri_WORD word6 +#define wqe_ctxt_tag_SHIFT 16 +#define wqe_ctxt_tag_MASK 0x0000FFFF +#define wqe_ctxt_tag_WORD word6 + uint32_t word7; +#define wqe_ct_SHIFT 2 +#define wqe_ct_MASK 0x00000003 +#define wqe_ct_WORD word7 +#define wqe_status_SHIFT 4 +#define wqe_status_MASK 0x0000000f +#define wqe_status_WORD word7 +#define wqe_cmnd_SHIFT 8 +#define wqe_cmnd_MASK 0x000000ff +#define wqe_cmnd_WORD word7 +#define wqe_class_SHIFT 16 +#define wqe_class_MASK 0x00000007 +#define wqe_class_WORD word7 +#define wqe_pu_SHIFT 20 +#define wqe_pu_MASK 0x00000003 +#define wqe_pu_WORD word7 +#define wqe_erp_SHIFT 22 +#define wqe_erp_MASK 0x00000001 +#define wqe_erp_WORD word7 +#define wqe_lnk_SHIFT 23 +#define wqe_lnk_MASK 0x00000001 +#define wqe_lnk_WORD word7 +#define wqe_tmo_SHIFT 24 +#define wqe_tmo_MASK 0x000000ff +#define wqe_tmo_WORD word7 + uint32_t abort_tag; /* word 8 in WQE */ + uint32_t word9; +#define wqe_reqtag_SHIFT 0 +#define wqe_reqtag_MASK 0x0000FFFF +#define wqe_reqtag_WORD word9 +#define wqe_rcvoxid_SHIFT 16 +#define wqe_rcvoxid_MASK 0x0000FFFF +#define wqe_rcvoxid_WORD word9 + uint32_t word10; +#define wqe_pri_SHIFT 16 +#define wqe_pri_MASK 0x00000007 +#define wqe_pri_WORD word10 +#define wqe_pv_SHIFT 19 +#define wqe_pv_MASK 0x00000001 +#define wqe_pv_WORD word10 +#define wqe_xc_SHIFT 21 +#define wqe_xc_MASK 0x00000001 +#define wqe_xc_WORD word10 +#define wqe_ccpe_SHIFT 23 +#define wqe_ccpe_MASK 0x00000001 +#define wqe_ccpe_WORD word10 +#define wqe_ccp_SHIFT 24 +#define wqe_ccp_MASK 0x000000ff +#define wqe_ccp_WORD word10 + uint32_t word11; +#define wqe_cmd_type_SHIFT 0 +#define wqe_cmd_type_MASK 0x0000000f +#define wqe_cmd_type_WORD word11 +#define wqe_wqec_SHIFT 7 +#define wqe_wqec_MASK 0x00000001 +#define wqe_wqec_WORD word11 +#define wqe_cqid_SHIFT 16 +#define wqe_cqid_MASK 0x000003ff +#define wqe_cqid_WORD word11 +}; + +struct wqe_did { + uint32_t word5; +#define wqe_els_did_SHIFT 0 +#define wqe_els_did_MASK 0x00FFFFFF +#define wqe_els_did_WORD word5 +#define wqe_xmit_bls_ar_SHIFT 30 +#define wqe_xmit_bls_ar_MASK 0x00000001 +#define wqe_xmit_bls_ar_WORD word5 +#define wqe_xmit_bls_xo_SHIFT 31 +#define wqe_xmit_bls_xo_MASK 0x00000001 +#define wqe_xmit_bls_xo_WORD word5 +}; + +struct els_request64_wqe { + struct ulp_bde64 bde; + uint32_t payload_len; + uint32_t word4; +#define els_req64_sid_SHIFT 0 +#define els_req64_sid_MASK 0x00FFFFFF +#define els_req64_sid_WORD word4 +#define els_req64_sp_SHIFT 24 +#define els_req64_sp_MASK 0x00000001 +#define els_req64_sp_WORD word4 +#define els_req64_vf_SHIFT 25 +#define els_req64_vf_MASK 0x00000001 +#define els_req64_vf_WORD word4 + struct wqe_did wqe_dest; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t word12; +#define els_req64_vfid_SHIFT 1 +#define els_req64_vfid_MASK 0x00000FFF +#define els_req64_vfid_WORD word12 +#define els_req64_pri_SHIFT 13 +#define els_req64_pri_MASK 0x00000007 +#define els_req64_pri_WORD word12 + uint32_t word13; +#define els_req64_hopcnt_SHIFT 24 +#define els_req64_hopcnt_MASK 0x000000ff +#define els_req64_hopcnt_WORD word13 + uint32_t reserved[2]; +}; + +struct xmit_els_rsp64_wqe { + struct ulp_bde64 bde; + uint32_t rsvd3; + uint32_t rsvd4; + struct wqe_did wqe_dest; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; +}; + +struct xmit_bls_rsp64_wqe { + uint32_t payload0; + uint32_t word1; +#define xmit_bls_rsp64_rxid_SHIFT 0 +#define xmit_bls_rsp64_rxid_MASK 0x0000ffff +#define xmit_bls_rsp64_rxid_WORD word1 +#define xmit_bls_rsp64_oxid_SHIFT 16 +#define xmit_bls_rsp64_oxid_MASK 0x0000ffff +#define xmit_bls_rsp64_oxid_WORD word1 + uint32_t word2; +#define xmit_bls_rsp64_seqcntlo_SHIFT 0 +#define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff +#define xmit_bls_rsp64_seqcntlo_WORD word2 +#define xmit_bls_rsp64_seqcnthi_SHIFT 16 +#define xmit_bls_rsp64_seqcnthi_MASK 0x0000ffff +#define xmit_bls_rsp64_seqcnthi_WORD word2 + uint32_t rsrvd3; + uint32_t rsrvd4; + struct wqe_did wqe_dest; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; +}; +struct wqe_rctl_dfctl { + uint32_t word5; +#define wqe_si_SHIFT 2 +#define wqe_si_MASK 0x000000001 +#define wqe_si_WORD word5 +#define wqe_la_SHIFT 3 +#define wqe_la_MASK 0x000000001 +#define wqe_la_WORD word5 +#define wqe_ls_SHIFT 7 +#define wqe_ls_MASK 0x000000001 +#define wqe_ls_WORD word5 +#define wqe_dfctl_SHIFT 8 +#define wqe_dfctl_MASK 0x0000000ff +#define wqe_dfctl_WORD word5 +#define wqe_type_SHIFT 16 +#define wqe_type_MASK 0x0000000ff +#define wqe_type_WORD word5 +#define wqe_rctl_SHIFT 24 +#define wqe_rctl_MASK 0x0000000ff +#define wqe_rctl_WORD word5 +}; + +struct xmit_seq64_wqe { + struct ulp_bde64 bde; + uint32_t paylaod_offset; + uint32_t relative_offset; + struct wqe_rctl_dfctl wge_ctl; + struct wqe_common wqe_com; /* words 6-11 */ + /* Note: word10 different REVISIT */ + uint32_t xmit_len; + uint32_t rsvd_12_15[3]; +}; +struct xmit_bcast64_wqe { + struct ulp_bde64 bde; + uint32_t paylaod_len; + uint32_t rsvd4; + struct wqe_rctl_dfctl wge_ctl; /* word 5 */ + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; +}; + +struct gen_req64_wqe { + struct ulp_bde64 bde; + uint32_t command_len; + uint32_t payload_len; + struct wqe_rctl_dfctl wge_ctl; /* word 5 */ + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; +}; + +struct create_xri_wqe { + uint32_t rsrvd[5]; /* words 0-4 */ + struct wqe_did wqe_dest; /* word 5 */ + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; /* word 12-15 */ +}; + +#define T_REQUEST_TAG 3 +#define T_XRI_TAG 1 + +struct abort_cmd_wqe { + uint32_t rsrvd[3]; + uint32_t word3; +#define abort_cmd_ia_SHIFT 0 +#define abort_cmd_ia_MASK 0x000000001 +#define abort_cmd_ia_WORD word3 +#define abort_cmd_criteria_SHIFT 8 +#define abort_cmd_criteria_MASK 0x0000000ff +#define abort_cmd_criteria_WORD word3 + uint32_t rsrvd4; + uint32_t rsrvd5; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; /* word 12-15 */ +}; + +struct fcp_iwrite64_wqe { + struct ulp_bde64 bde; + uint32_t payload_len; + uint32_t total_xfer_len; + uint32_t initial_xfer_len; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; /* word 12-15 */ +}; + +struct fcp_iread64_wqe { + struct ulp_bde64 bde; + uint32_t payload_len; /* word 3 */ + uint32_t total_xfer_len; /* word 4 */ + uint32_t rsrvd5; /* word 5 */ + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; /* word 12-15 */ +}; + +struct fcp_icmnd64_wqe { + struct ulp_bde64 bde; /* words 0-2 */ + uint32_t rsrvd[3]; /* words 3-5 */ + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; /* word 12-15 */ +}; + + +union lpfc_wqe { + uint32_t words[16]; + struct lpfc_wqe_generic generic; + struct fcp_icmnd64_wqe fcp_icmd; + struct fcp_iread64_wqe fcp_iread; + struct fcp_iwrite64_wqe fcp_iwrite; + struct abort_cmd_wqe abort_cmd; + struct create_xri_wqe create_xri; + struct xmit_bcast64_wqe xmit_bcast64; + struct xmit_seq64_wqe xmit_sequence; + struct xmit_bls_rsp64_wqe xmit_bls_rsp; + struct xmit_els_rsp64_wqe xmit_els_rsp; + struct els_request64_wqe els_req; + struct gen_req64_wqe gen_req; +}; + +#define FCP_COMMAND 0x0 +#define FCP_COMMAND_DATA_OUT 0x1 +#define ELS_COMMAND_NON_FIP 0xC +#define ELS_COMMAND_FIP 0xD +#define OTHER_COMMAND 0x8 + diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 3f06ce2becf5..e9e4a1df8989 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -34,8 +34,10 @@ #include #include +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -51,9 +53,23 @@ char *_dump_buf_dif; unsigned long _dump_buf_dif_order; spinlock_t _dump_buf_lock; -static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); static int lpfc_post_rcv_buf(struct lpfc_hba *); +static int lpfc_sli4_queue_create(struct lpfc_hba *); +static void lpfc_sli4_queue_destroy(struct lpfc_hba *); +static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); +static int lpfc_setup_endian_order(struct lpfc_hba *); +static int lpfc_sli4_read_config(struct lpfc_hba *); +static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); +static void lpfc_free_sgl_list(struct lpfc_hba *); +static int lpfc_init_sgl_list(struct lpfc_hba *); +static int lpfc_init_active_sgl_array(struct lpfc_hba *); +static void lpfc_free_active_sgl(struct lpfc_hba *); +static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); +static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); +static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); +static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); +static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); static struct scsi_transport_template *lpfc_transport_template = NULL; static struct scsi_transport_template *lpfc_vport_transport_template = NULL; @@ -646,6 +662,77 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba) return 0; } +/** + * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset + * @phba: pointer to lpfc HBA data structure. + * + * This routine will do uninitialization after the HBA is reset when bring + * down the SLI Layer. + * + * Return codes + * 0 - sucess. + * Any other value - error. + **/ +static int +lpfc_hba_down_post_s4(struct lpfc_hba *phba) +{ + struct lpfc_scsi_buf *psb, *psb_next; + LIST_HEAD(aborts); + int ret; + unsigned long iflag = 0; + ret = lpfc_hba_down_post_s3(phba); + if (ret) + return ret; + /* At this point in time the HBA is either reset or DOA. Either + * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be + * on the lpfc_sgl_list so that it can either be freed if the + * driver is unloading or reposted if the driver is restarting + * the port. + */ + spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ + /* scsl_buf_list */ + /* abts_sgl_list_lock required because worker thread uses this + * list. + */ + spin_lock(&phba->sli4_hba.abts_sgl_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, + &phba->sli4_hba.lpfc_sgl_list); + spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); + /* abts_scsi_buf_list_lock required because worker thread uses this + * list. + */ + spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, + &aborts); + spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); + spin_unlock_irq(&phba->hbalock); + + list_for_each_entry_safe(psb, psb_next, &aborts, list) { + psb->pCmd = NULL; + psb->status = IOSTAT_SUCCESS; + } + spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); + list_splice(&aborts, &phba->lpfc_scsi_buf_list); + spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + return 0; +} + +/** + * lpfc_hba_down_post - Wrapper func for hba down post routine + * @phba: pointer to lpfc HBA data structure. + * + * This routine wraps the actual SLI3 or SLI4 routine for performing + * uninitialization after the HBA is reset when bring down the SLI Layer. + * + * Return codes + * 0 - sucess. + * Any other value - error. + **/ +int +lpfc_hba_down_post(struct lpfc_hba *phba) +{ + return (*phba->lpfc_hba_down_post)(phba); +} /** * lpfc_hb_timeout - The HBA-timer timeout handler @@ -852,6 +939,25 @@ lpfc_offline_eratt(struct lpfc_hba *phba) return; } +/** + * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention + * @phba: pointer to lpfc hba data structure. + * + * This routine is called to bring a SLI4 HBA offline when HBA hardware error + * other than Port Error 6 has been detected. + **/ +static void +lpfc_sli4_offline_eratt(struct lpfc_hba *phba) +{ + lpfc_offline_prep(phba); + lpfc_offline(phba); + lpfc_sli4_brdreset(phba); + lpfc_hba_down_post(phba); + lpfc_sli4_post_status_check(phba); + lpfc_unblock_mgmt_io(phba); + phba->link_state = LPFC_HBA_ERROR; +} + /** * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler * @phba: pointer to lpfc hba data structure. @@ -1056,6 +1162,65 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) return; } +/** + * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to handle the SLI4 HBA hardware error attention + * conditions. + **/ +static void +lpfc_handle_eratt_s4(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport = phba->pport; + uint32_t event_data; + struct Scsi_Host *shost; + + /* If the pci channel is offline, ignore possible errors, since + * we cannot communicate with the pci card anyway. + */ + if (pci_channel_offline(phba->pcidev)) + return; + /* If resets are disabled then leave the HBA alone and return */ + if (!phba->cfg_enable_hba_reset) + return; + + /* Send an internal error event to mgmt application */ + lpfc_board_errevt_to_mgmt(phba); + + /* For now, the actual action for SLI4 device handling is not + * specified yet, just treated it as adaptor hardware failure + */ + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n", + phba->work_status[0], phba->work_status[1]); + + event_data = FC_REG_DUMP_EVENT; + shost = lpfc_shost_from_vport(vport); + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(event_data), (char *) &event_data, + SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); + + lpfc_sli4_offline_eratt(phba); +} + +/** + * lpfc_handle_eratt - Wrapper func for handling hba error attention + * @phba: pointer to lpfc HBA data structure. + * + * This routine wraps the actual SLI3 or SLI4 hba error attention handling + * routine from the API jump table function pointer from the lpfc_hba struct. + * + * Return codes + * 0 - sucess. + * Any other value - error. + **/ +void +lpfc_handle_eratt(struct lpfc_hba *phba) +{ + (*phba->lpfc_handle_eratt)(phba); +} + /** * lpfc_handle_latt - The HBA link event handler * @phba: pointer to lpfc hba data structure. @@ -1312,6 +1477,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) uint16_t dev_id = phba->pcidev->device; int max_speed; int GE = 0; + int oneConnect = 0; /* default is not a oneConnect */ struct { char * name; int max_speed; @@ -1457,6 +1623,14 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) case PCI_DEVICE_ID_PROTEUS_S: m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; break; + case PCI_DEVICE_ID_TIGERSHARK: + oneConnect = 1; + m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"}; + break; + case PCI_DEVICE_ID_TIGERSHARK_S: + oneConnect = 1; + m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"}; + break; default: m = (typeof(m)){ NULL }; break; @@ -1464,13 +1638,24 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) if (mdp && mdp[0] == '\0') snprintf(mdp, 79,"%s", m.name); - if (descp && descp[0] == '\0') - snprintf(descp, 255, - "Emulex %s %d%s %s %s", - m.name, m.max_speed, - (GE) ? "GE" : "Gb", - m.bus, - (GE) ? "FCoE Adapter" : "Fibre Channel Adapter"); + /* oneConnect hba requires special processing, they are all initiators + * and we put the port number on the end + */ + if (descp && descp[0] == '\0') { + if (oneConnect) + snprintf(descp, 255, + "Emulex OneConnect %s, FCoE Initiator, Port %s", + m.name, + phba->Port); + else + snprintf(descp, 255, + "Emulex %s %d%s %s %s", + m.name, m.max_speed, + (GE) ? "GE" : "Gb", + m.bus, + (GE) ? "FCoE Adapter" : + "Fibre Channel Adapter"); + } } /** @@ -1911,14 +2096,21 @@ lpfc_online(struct lpfc_hba *phba) return 1; } - if (lpfc_sli_hba_setup(phba)) { /* Initialize the HBA */ - lpfc_unblock_mgmt_io(phba); - return 1; + if (phba->sli_rev == LPFC_SLI_REV4) { + if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ + lpfc_unblock_mgmt_io(phba); + return 1; + } + } else { + if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ + lpfc_unblock_mgmt_io(phba); + return 1; + } } vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { struct Scsi_Host *shost; shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); @@ -1980,11 +2172,12 @@ lpfc_offline_prep(struct lpfc_hba * phba) /* Issue an unreg_login to all nodes on all vports */ vports = lpfc_create_vport_work_array(phba); if (vports != NULL) { - for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { struct Scsi_Host *shost; if (vports[i]->load_flag & FC_UNLOADING) continue; + vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; shost = lpfc_shost_from_vport(vports[i]); list_for_each_entry_safe(ndlp, next_ndlp, &vports[i]->fc_nodes, @@ -2029,11 +2222,11 @@ lpfc_offline(struct lpfc_hba *phba) if (phba->pport->fc_flag & FC_OFFLINE_MODE) return; - /* stop all timers associated with this hba */ - lpfc_stop_phba_timers(phba); + /* stop port and all timers associated with this hba */ + lpfc_stop_port(phba); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) lpfc_stop_vport_timers(vports[i]); lpfc_destroy_vport_work_array(phba, vports); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, @@ -2046,7 +2239,7 @@ lpfc_offline(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); vports[i]->work_port_events = 0; @@ -2139,6 +2332,10 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) shost->max_lun = vport->cfg_max_luns; shost->this_id = -1; shost->max_cmd_len = 16; + if (phba->sli_rev == LPFC_SLI_REV4) { + shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE; + shost->sg_tablesize = phba->cfg_sg_seg_cnt; + } /* * Set initial can_queue value since 0 is no longer supported and @@ -2156,6 +2353,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) /* Initialize all internally managed lists. */ INIT_LIST_HEAD(&vport->fc_nodes); + INIT_LIST_HEAD(&vport->rcv_buffer_list); spin_lock_init(&vport->work_port_lock); init_timer(&vport->fc_disctmo); @@ -2347,192 +2545,501 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost) } /** - * lpfc_enable_msix - Enable MSI-X interrupt mode + * lpfc_stop_port_s3 - Stop SLI3 device port * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to enable the MSI-X interrupt vectors. The kernel - * function pci_enable_msix() is called to enable the MSI-X vectors. Note that - * pci_enable_msix(), once invoked, enables either all or nothing, depending - * on the current availability of PCI vector resources. The device driver is - * responsible for calling the individual request_irq() to register each MSI-X - * vector with a interrupt handler, which is done in this function. Note that - * later when device is unloading, the driver should always call free_irq() - * on all MSI-X vectors it has done request_irq() on before calling - * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device - * will be left with MSI-X enabled and leaks its vectors. - * - * Return codes - * 0 - sucessful - * other values - error + * This routine is invoked to stop an SLI3 device port, it stops the device + * from generating interrupts and stops the device driver's timers for the + * device. **/ -static int -lpfc_enable_msix(struct lpfc_hba *phba) +static void +lpfc_stop_port_s3(struct lpfc_hba *phba) { - int rc, i; - LPFC_MBOXQ_t *pmb; + /* Clear all interrupt enable conditions */ + writel(0, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + /* Clear all pending interrupts */ + writel(0xffffffff, phba->HAregaddr); + readl(phba->HAregaddr); /* flush */ - /* Set up MSI-X multi-message vectors */ - for (i = 0; i < LPFC_MSIX_VECTORS; i++) - phba->msix_entries[i].entry = i; + /* Reset some HBA SLI setup states */ + lpfc_stop_hba_timers(phba); + phba->pport->work_port_events = 0; +} - /* Configure MSI-X capability structure */ - rc = pci_enable_msix(phba->pcidev, phba->msix_entries, - ARRAY_SIZE(phba->msix_entries)); - if (rc) { - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0420 PCI enable MSI-X failed (%d)\n", rc); - goto msi_fail_out; - } else - for (i = 0; i < LPFC_MSIX_VECTORS; i++) - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0477 MSI-X entry[%d]: vector=x%x " - "message=%d\n", i, - phba->msix_entries[i].vector, - phba->msix_entries[i].entry); - /* - * Assign MSI-X vectors to interrupt handlers - */ +/** + * lpfc_stop_port_s4 - Stop SLI4 device port + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to stop an SLI4 device port, it stops the device + * from generating interrupts and stops the device driver's timers for the + * device. + **/ +static void +lpfc_stop_port_s4(struct lpfc_hba *phba) +{ + /* Reset some HBA SLI4 setup states */ + lpfc_stop_hba_timers(phba); + phba->pport->work_port_events = 0; + phba->sli4_hba.intr_enable = 0; + /* Hard clear it for now, shall have more graceful way to wait later */ + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; +} - /* vector-0 is associated to slow-path handler */ - rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler, - IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba); - if (rc) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "0421 MSI-X slow-path request_irq failed " - "(%d)\n", rc); - goto msi_fail_out; - } +/** + * lpfc_stop_port - Wrapper function for stopping hba port + * @phba: Pointer to HBA context object. + * + * This routine wraps the actual SLI3 or SLI4 hba stop port routine from + * the API jump table function pointer from the lpfc_hba struct. + **/ +void +lpfc_stop_port(struct lpfc_hba *phba) +{ + phba->lpfc_stop_port(phba); +} - /* vector-1 is associated to fast-path handler */ - rc = request_irq(phba->msix_entries[1].vector, &lpfc_fp_intr_handler, - IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba); +/** + * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to remove the driver default fcf record from + * the port. This routine currently acts on FCF Index 0. + * + **/ +void +lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba) +{ + int rc = 0; + LPFC_MBOXQ_t *mboxq; + struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record; + uint32_t mbox_tmo, req_len; + uint32_t shdr_status, shdr_add_status; - if (rc) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "0429 MSI-X fast-path request_irq failed " - "(%d)\n", rc); - goto irq_fail_out; + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2020 Failed to allocate mbox for ADD_FCF cmd\n"); + return; } + req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) - + sizeof(struct lpfc_sli4_cfg_mhdr); + rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_DELETE_FCF, + req_len, LPFC_SLI4_MBX_EMBED); /* - * Configure HBA MSI-X attention conditions to messages + * In phase 1, there is a single FCF index, 0. In phase2, the driver + * supports multiple FCF indices. */ - pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry; + bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1); + bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record, + phba->fcf.fcf_indx); - if (!pmb) { - rc = -ENOMEM; - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0474 Unable to allocate memory for issuing " - "MBOX_CONFIG_MSI command\n"); - goto mem_fail_out; + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + else { + mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); } - rc = lpfc_config_msi(phba, pmb); - if (rc) - goto mbx_fail_out; - rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); - if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, - "0351 Config MSI mailbox command failed, " - "mbxCmd x%x, mbxStatus x%x\n", - pmb->mb.mbxCommand, pmb->mb.mbxStatus); - goto mbx_fail_out; + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr_status = bf_get(lpfc_mbox_hdr_status, + &del_fcf_record->header.cfg_shdr.response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, + &del_fcf_record->header.cfg_shdr.response); + if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2516 DEL FCF of default FCF Index failed " + "mbx status x%x, status x%x add_status x%x\n", + rc, shdr_status, shdr_add_status); } - - /* Free memory allocated for mailbox command */ - mempool_free(pmb, phba->mbox_mem_pool); - return rc; - -mbx_fail_out: - /* Free memory allocated for mailbox command */ - mempool_free(pmb, phba->mbox_mem_pool); - -mem_fail_out: - /* free the irq already requested */ - free_irq(phba->msix_entries[1].vector, phba); - -irq_fail_out: - /* free the irq already requested */ - free_irq(phba->msix_entries[0].vector, phba); - -msi_fail_out: - /* Unconfigure MSI-X capability structure */ - pci_disable_msix(phba->pcidev); - return rc; + if (rc != MBX_TIMEOUT) + mempool_free(mboxq, phba->mbox_mem_pool); } /** - * lpfc_disable_msix - Disable MSI-X interrupt mode + * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code * @phba: pointer to lpfc hba data structure. + * @acqe_link: pointer to the async link completion queue entry. * - * This routine is invoked to release the MSI-X vectors and then disable the - * MSI-X interrupt mode. + * This routine is to parse the SLI4 link-attention link fault code and + * translate it into the base driver's read link attention mailbox command + * status. + * + * Return: Link-attention status in terms of base driver's coding. **/ -static void -lpfc_disable_msix(struct lpfc_hba *phba) +static uint16_t +lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, + struct lpfc_acqe_link *acqe_link) { - int i; + uint16_t latt_fault; - /* Free up MSI-X multi-message vectors */ - for (i = 0; i < LPFC_MSIX_VECTORS; i++) - free_irq(phba->msix_entries[i].vector, phba); - /* Disable MSI-X */ - pci_disable_msix(phba->pcidev); + switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { + case LPFC_ASYNC_LINK_FAULT_NONE: + case LPFC_ASYNC_LINK_FAULT_LOCAL: + case LPFC_ASYNC_LINK_FAULT_REMOTE: + latt_fault = 0; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0398 Invalid link fault code: x%x\n", + bf_get(lpfc_acqe_link_fault, acqe_link)); + latt_fault = MBXERR_ERROR; + break; + } + return latt_fault; } /** - * lpfc_enable_msi - Enable MSI interrupt mode + * lpfc_sli4_parse_latt_type - Parse sli4 link attention type * @phba: pointer to lpfc hba data structure. + * @acqe_link: pointer to the async link completion queue entry. * - * This routine is invoked to enable the MSI interrupt mode. The kernel - * function pci_enable_msi() is called to enable the MSI vector. The - * device driver is responsible for calling the request_irq() to register - * MSI vector with a interrupt the handler, which is done in this function. + * This routine is to parse the SLI4 link attention type and translate it + * into the base driver's link attention type coding. * - * Return codes - * 0 - sucessful - * other values - error - */ -static int -lpfc_enable_msi(struct lpfc_hba *phba) + * Return: Link attention type in terms of base driver's coding. + **/ +static uint8_t +lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, + struct lpfc_acqe_link *acqe_link) { - int rc; - - rc = pci_enable_msi(phba->pcidev); - if (!rc) - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0462 PCI enable MSI mode success.\n"); - else { - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0471 PCI enable MSI mode failed (%d)\n", rc); - return rc; - } + uint8_t att_type; - rc = request_irq(phba->pcidev->irq, lpfc_intr_handler, - IRQF_SHARED, LPFC_DRIVER_NAME, phba); - if (rc) { - pci_disable_msi(phba->pcidev); - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "0478 MSI request_irq failed (%d)\n", rc); + switch (bf_get(lpfc_acqe_link_status, acqe_link)) { + case LPFC_ASYNC_LINK_STATUS_DOWN: + case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: + att_type = AT_LINK_DOWN; + break; + case LPFC_ASYNC_LINK_STATUS_UP: + /* Ignore physical link up events - wait for logical link up */ + att_type = AT_RESERVED; + break; + case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: + att_type = AT_LINK_UP; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0399 Invalid link attention type: x%x\n", + bf_get(lpfc_acqe_link_status, acqe_link)); + att_type = AT_RESERVED; + break; } - return rc; + return att_type; } /** - * lpfc_disable_msi - Disable MSI interrupt mode + * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed * @phba: pointer to lpfc hba data structure. + * @acqe_link: pointer to the async link completion queue entry. * - * This routine is invoked to disable the MSI interrupt mode. The driver - * calls free_irq() on MSI vector it has done request_irq() on before - * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and - * a device will be left with MSI enabled and leaks its vector. - */ - -static void -lpfc_disable_msi(struct lpfc_hba *phba) + * This routine is to parse the SLI4 link-attention link speed and translate + * it into the base driver's link-attention link speed coding. + * + * Return: Link-attention link speed in terms of base driver's coding. + **/ +static uint8_t +lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, + struct lpfc_acqe_link *acqe_link) { - free_irq(phba->pcidev->irq, phba); - pci_disable_msi(phba->pcidev); + uint8_t link_speed; + + switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { + case LPFC_ASYNC_LINK_SPEED_ZERO: + link_speed = LA_UNKNW_LINK; + break; + case LPFC_ASYNC_LINK_SPEED_10MBPS: + link_speed = LA_UNKNW_LINK; + break; + case LPFC_ASYNC_LINK_SPEED_100MBPS: + link_speed = LA_UNKNW_LINK; + break; + case LPFC_ASYNC_LINK_SPEED_1GBPS: + link_speed = LA_1GHZ_LINK; + break; + case LPFC_ASYNC_LINK_SPEED_10GBPS: + link_speed = LA_10GHZ_LINK; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0483 Invalid link-attention link speed: x%x\n", + bf_get(lpfc_acqe_link_speed, acqe_link)); + link_speed = LA_UNKNW_LINK; + break; + } + return link_speed; +} + +/** + * lpfc_sli4_async_link_evt - Process the asynchronous link event + * @phba: pointer to lpfc hba data structure. + * @acqe_link: pointer to the async link completion queue entry. + * + * This routine is to handle the SLI4 asynchronous link event. + **/ +static void +lpfc_sli4_async_link_evt(struct lpfc_hba *phba, + struct lpfc_acqe_link *acqe_link) +{ + struct lpfc_dmabuf *mp; + LPFC_MBOXQ_t *pmb; + MAILBOX_t *mb; + READ_LA_VAR *la; + uint8_t att_type; + + att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); + if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP) + return; + pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0395 The mboxq allocation failed\n"); + return; + } + mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!mp) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0396 The lpfc_dmabuf allocation failed\n"); + goto out_free_pmb; + } + mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); + if (!mp->virt) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0397 The mbuf allocation failed\n"); + goto out_free_dmabuf; + } + + /* Cleanup any outstanding ELS commands */ + lpfc_els_flush_all_cmd(phba); + + /* Block ELS IOCBs until we have done process link event */ + phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; + + /* Update link event statistics */ + phba->sli.slistat.link_event++; + + /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */ + lpfc_read_la(phba, pmb, mp); + pmb->vport = phba->pport; + + /* Parse and translate status field */ + mb = &pmb->u.mb; + mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); + + /* Parse and translate link attention fields */ + la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; + la->eventTag = acqe_link->event_tag; + la->attType = att_type; + la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link); + + /* Fake the the following irrelvant fields */ + la->topology = TOPOLOGY_PT_PT; + la->granted_AL_PA = 0; + la->il = 0; + la->pb = 0; + la->fa = 0; + la->mm = 0; + + /* Keep the link status for extra SLI4 state machine reference */ + phba->sli4_hba.link_state.speed = + bf_get(lpfc_acqe_link_speed, acqe_link); + phba->sli4_hba.link_state.duplex = + bf_get(lpfc_acqe_link_duplex, acqe_link); + phba->sli4_hba.link_state.status = + bf_get(lpfc_acqe_link_status, acqe_link); + phba->sli4_hba.link_state.physical = + bf_get(lpfc_acqe_link_physical, acqe_link); + phba->sli4_hba.link_state.fault = + bf_get(lpfc_acqe_link_fault, acqe_link); + + /* Invoke the lpfc_handle_latt mailbox command callback function */ + lpfc_mbx_cmpl_read_la(phba, pmb); + return; + +out_free_dmabuf: + kfree(mp); +out_free_pmb: + mempool_free(pmb, phba->mbox_mem_pool); +} + +/** + * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event + * @phba: pointer to lpfc hba data structure. + * @acqe_link: pointer to the async fcoe completion queue entry. + * + * This routine is to handle the SLI4 asynchronous fcoe event. + **/ +static void +lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, + struct lpfc_acqe_fcoe *acqe_fcoe) +{ + uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); + int rc; + + switch (event_type) { + case LPFC_FCOE_EVENT_TYPE_NEW_FCF: + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "2546 New FCF found index 0x%x tag 0x%x \n", + acqe_fcoe->fcf_index, + acqe_fcoe->event_tag); + /* + * If the current FCF is in discovered state, + * do nothing. + */ + spin_lock_irq(&phba->hbalock); + if (phba->fcf.fcf_flag & FCF_DISCOVERED) { + spin_unlock_irq(&phba->hbalock); + break; + } + spin_unlock_irq(&phba->hbalock); + + /* Read the FCF table and re-discover SAN. */ + rc = lpfc_sli4_read_fcf_record(phba, + LPFC_FCOE_FCF_GET_FIRST); + if (rc) + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "2547 Read FCF record failed 0x%x\n", + rc); + break; + + case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2548 FCF Table full count 0x%x tag 0x%x \n", + bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe), + acqe_fcoe->event_tag); + break; + + case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "2549 FCF disconnected fron network index 0x%x" + " tag 0x%x \n", acqe_fcoe->fcf_index, + acqe_fcoe->event_tag); + /* If the event is not for currently used fcf do nothing */ + if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index) + break; + /* + * Currently, driver support only one FCF - so treat this as + * a link down. + */ + lpfc_linkdown(phba); + /* Unregister FCF if no devices connected to it */ + lpfc_unregister_unused_fcf(phba); + break; + + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0288 Unknown FCoE event type 0x%x event tag " + "0x%x\n", event_type, acqe_fcoe->event_tag); + break; + } +} + +/** + * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event + * @phba: pointer to lpfc hba data structure. + * @acqe_link: pointer to the async dcbx completion queue entry. + * + * This routine is to handle the SLI4 asynchronous dcbx event. + **/ +static void +lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, + struct lpfc_acqe_dcbx *acqe_dcbx) +{ + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0290 The SLI4 DCBX asynchronous event is not " + "handled yet\n"); +} + +/** + * lpfc_sli4_async_event_proc - Process all the pending asynchronous event + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked by the worker thread to process all the pending + * SLI4 asynchronous events. + **/ +void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event; + + /* First, declare the async event has been handled */ + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~ASYNC_EVENT; + spin_unlock_irq(&phba->hbalock); + /* Now, handle all the async events */ + while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { + /* Get the first event from the head of the event queue */ + spin_lock_irq(&phba->hbalock); + list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, + cq_event, struct lpfc_cq_event, list); + spin_unlock_irq(&phba->hbalock); + /* Process the asynchronous event */ + switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { + case LPFC_TRAILER_CODE_LINK: + lpfc_sli4_async_link_evt(phba, + &cq_event->cqe.acqe_link); + break; + case LPFC_TRAILER_CODE_FCOE: + lpfc_sli4_async_fcoe_evt(phba, + &cq_event->cqe.acqe_fcoe); + break; + case LPFC_TRAILER_CODE_DCBX: + lpfc_sli4_async_dcbx_evt(phba, + &cq_event->cqe.acqe_dcbx); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "1804 Invalid asynchrous event code: " + "x%x\n", bf_get(lpfc_trailer_code, + &cq_event->cqe.mcqe_cmpl)); + break; + } + /* Free the completion event processed to the free pool */ + lpfc_sli4_cq_event_release(phba, cq_event); + } +} + +/** + * lpfc_api_table_setup - Set up per hba pci-device group func api jump table + * @phba: pointer to lpfc hba data structure. + * @dev_grp: The HBA PCI-Device group number. + * + * This routine is invoked to set up the per HBA PCI-Device group function + * API jump table entries. + * + * Return: 0 if success, otherwise -ENODEV + **/ +int +lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) +{ + int rc; + + /* Set up lpfc PCI-device group */ + phba->pci_dev_grp = dev_grp; + + /* The LPFC_PCI_DEV_OC uses SLI4 */ + if (dev_grp == LPFC_PCI_DEV_OC) + phba->sli_rev = LPFC_SLI_REV4; + + /* Set up device INIT API function jump table */ + rc = lpfc_init_api_table_setup(phba, dev_grp); + if (rc) + return -ENODEV; + /* Set up SCSI API function jump table */ + rc = lpfc_scsi_api_table_setup(phba, dev_grp); + if (rc) + return -ENODEV; + /* Set up SLI API function jump table */ + rc = lpfc_sli_api_table_setup(phba, dev_grp); + if (rc) + return -ENODEV; + /* Set up MBOX API function jump table */ + rc = lpfc_mbox_api_table_setup(phba, dev_grp); + if (rc) + return -ENODEV; + + return 0; } /** @@ -2764,102 +3271,393 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) } /** - * lpfc_init_api_table_setup - Set up init api fucntion jump table - * @phba: The hba struct for which this call is being executed. - * @dev_grp: The HBA PCI-Device group number. - * - * This routine sets up the device INIT interface API function jump table - * in @phba struct. - * - * Returns: 0 - success, -ENODEV - failure. - **/ -int -lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) -{ - switch (dev_grp) { - case LPFC_PCI_DEV_LP: - phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; - phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; - phba->lpfc_stop_port = lpfc_stop_port_s3; - break; - default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1431 Invalid HBA PCI-device group: 0x%x\n", - dev_grp); - return -ENODEV; - break; - } - return 0; -} - -/** - * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. + * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to set up the driver internal resources before the - * device specific resource setup to support the HBA device it attached to. + * This routine is invoked to set up the driver internal resources specific to + * support the SLI-4 HBA device it attached to. * * Return codes - * 0 - sucessful - * other values - error + * 0 - sucessful + * other values - error **/ static int -lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) +lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) { + struct lpfc_sli *psli; + int rc; + int i, hbq_count; + + /* Before proceed, wait for POST done and device ready */ + rc = lpfc_sli4_post_status_check(phba); + if (rc) + return -ENODEV; + /* - * Driver resources common to all SLI revisions + * Initialize timers used by driver */ - atomic_set(&phba->fast_event_count, 0); - spin_lock_init(&phba->hbalock); - /* Initialize ndlp management spinlock */ - spin_lock_init(&phba->ndlp_lock); + /* Heartbeat timer */ + init_timer(&phba->hb_tmofunc); + phba->hb_tmofunc.function = lpfc_hb_timeout; + phba->hb_tmofunc.data = (unsigned long)phba; - INIT_LIST_HEAD(&phba->port_list); - INIT_LIST_HEAD(&phba->work_list); - init_waitqueue_head(&phba->wait_4_mlo_m_q); + psli = &phba->sli; + /* MBOX heartbeat timer */ + init_timer(&psli->mbox_tmo); + psli->mbox_tmo.function = lpfc_mbox_timeout; + psli->mbox_tmo.data = (unsigned long) phba; + /* Fabric block timer */ + init_timer(&phba->fabric_block_timer); + phba->fabric_block_timer.function = lpfc_fabric_block_timeout; + phba->fabric_block_timer.data = (unsigned long) phba; + /* EA polling mode timer */ + init_timer(&phba->eratt_poll); + phba->eratt_poll.function = lpfc_poll_eratt; + phba->eratt_poll.data = (unsigned long) phba; + /* + * We need to do a READ_CONFIG mailbox command here before + * calling lpfc_get_cfgparam. For VFs this will report the + * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. + * All of the resources allocated + * for this Port are tied to these values. + */ + /* Get all the module params for configuring this host */ + lpfc_get_cfgparam(phba); + phba->max_vpi = LPFC_MAX_VPI; + /* This will be set to correct value after the read_config mbox */ + phba->max_vports = 0; - /* Initialize the wait queue head for the kernel thread */ - init_waitqueue_head(&phba->work_waitq); + /* Program the default value of vlan_id and fc_map */ + phba->valid_vlan = 0; + phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; + phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; + phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; - /* Initialize the scsi buffer list used by driver for scsi IO */ - spin_lock_init(&phba->scsi_buf_list_lock); - INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); + /* + * Since the sg_tablesize is module parameter, the sg_dma_buf_size + * used to create the sg_dma_buf_pool must be dynamically calculated. + * 2 segments are added since the IOCB needs a command and response bde. + * To insure that the scsi sgl does not cross a 4k page boundary only + * sgl sizes of 1k, 2k, 4k, and 8k are supported. + * Table of sgl sizes and seg_cnt: + * sgl size, sg_seg_cnt total seg + * 1k 50 52 + * 2k 114 116 + * 4k 242 244 + * 8k 498 500 + * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024 + * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048 + * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096 + * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192 + */ + if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT) + phba->cfg_sg_seg_cnt = 50; + else if (phba->cfg_sg_seg_cnt <= 114) + phba->cfg_sg_seg_cnt = 114; + else if (phba->cfg_sg_seg_cnt <= 242) + phba->cfg_sg_seg_cnt = 242; + else + phba->cfg_sg_seg_cnt = 498; - /* Initialize the fabric iocb list */ - INIT_LIST_HEAD(&phba->fabric_iocb_list); + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp); + phba->cfg_sg_dma_buf_size += + ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); - /* Initialize list to save ELS buffers */ - INIT_LIST_HEAD(&phba->elsbuf); + /* Initialize buffer queue management fields */ + hbq_count = lpfc_sli_hbq_count(); + for (i = 0; i < hbq_count; ++i) + INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); + INIT_LIST_HEAD(&phba->rb_pend_list); + phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; + phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; - /* Initialize FCF connection rec list */ - INIT_LIST_HEAD(&phba->fcf_conn_rec_list); + /* + * Initialize the SLI Layer to run with lpfc SLI4 HBAs. + */ + /* Initialize the Abort scsi buffer list used by driver */ + spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); + /* This abort list used by worker thread */ + spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); - return 0; -} + /* + * Initialize dirver internal slow-path work queues + */ -/** - * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to set up the driver internal resources after the - * device specific resource setup to support the HBA device it attached to. - * - * Return codes - * 0 - sucessful - * other values - error - **/ -static int -lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) -{ - int error; + /* Driver internel slow-path CQ Event pool */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); + /* Response IOCB work queue list */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue); + /* Asynchronous event CQ Event work queue list */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); + /* Fast-path XRI aborted CQ Event work queue list */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); + /* Slow-path XRI aborted CQ Event work queue list */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); + /* Receive queue CQ Event work queue list */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); + + /* Initialize the driver internal SLI layer lists. */ + lpfc_sli_setup(phba); + lpfc_sli_queue_setup(phba); - /* Startup the kernel thread for this host adapter. */ - phba->worker_thread = kthread_run(lpfc_do_work, phba, - "lpfc_worker_%d", phba->brd_no); - if (IS_ERR(phba->worker_thread)) { - error = PTR_ERR(phba->worker_thread); - return error; + /* Allocate device driver memory */ + rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); + if (rc) + return -ENOMEM; + + /* Create the bootstrap mailbox command */ + rc = lpfc_create_bootstrap_mbox(phba); + if (unlikely(rc)) + goto out_free_mem; + + /* Set up the host's endian order with the device. */ + rc = lpfc_setup_endian_order(phba); + if (unlikely(rc)) + goto out_free_bsmbx; + + /* Set up the hba's configuration parameters. */ + rc = lpfc_sli4_read_config(phba); + if (unlikely(rc)) + goto out_free_bsmbx; + + /* Perform a function reset */ + rc = lpfc_pci_function_reset(phba); + if (unlikely(rc)) + goto out_free_bsmbx; + + /* Create all the SLI4 queues */ + rc = lpfc_sli4_queue_create(phba); + if (rc) + goto out_free_bsmbx; + + /* Create driver internal CQE event pool */ + rc = lpfc_sli4_cq_event_pool_create(phba); + if (rc) + goto out_destroy_queue; + + /* Initialize and populate the iocb list per host */ + rc = lpfc_init_sgl_list(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1400 Failed to initialize sgl list.\n"); + goto out_destroy_cq_event_pool; + } + rc = lpfc_init_active_sgl_array(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1430 Failed to initialize sgl list.\n"); + goto out_free_sgl_list; + } + + rc = lpfc_sli4_init_rpi_hdrs(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1432 Failed to initialize rpi headers.\n"); + goto out_free_active_sgl; + } + + phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * + phba->cfg_fcp_eq_count), GFP_KERNEL); + if (!phba->sli4_hba.fcp_eq_hdl) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2572 Failed allocate memory for fast-path " + "per-EQ handle array\n"); + goto out_remove_rpi_hdrs; + } + + phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * + phba->sli4_hba.cfg_eqn), GFP_KERNEL); + if (!phba->sli4_hba.msix_entries) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2573 Failed allocate memory for msi-x " + "interrupt vector entries\n"); + goto out_free_fcp_eq_hdl; + } + + return rc; + +out_free_fcp_eq_hdl: + kfree(phba->sli4_hba.fcp_eq_hdl); +out_remove_rpi_hdrs: + lpfc_sli4_remove_rpi_hdrs(phba); +out_free_active_sgl: + lpfc_free_active_sgl(phba); +out_free_sgl_list: + lpfc_free_sgl_list(phba); +out_destroy_cq_event_pool: + lpfc_sli4_cq_event_pool_destroy(phba); +out_destroy_queue: + lpfc_sli4_queue_destroy(phba); +out_free_bsmbx: + lpfc_destroy_bootstrap_mbox(phba); +out_free_mem: + lpfc_mem_free(phba); + return rc; +} + +/** + * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset the driver internal resources set up + * specific for supporting the SLI-4 HBA device it attached to. + **/ +static void +lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) +{ + struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; + + /* unregister default FCFI from the HBA */ + lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi); + + /* Free the default FCR table */ + lpfc_sli_remove_dflt_fcf(phba); + + /* Free memory allocated for msi-x interrupt vector entries */ + kfree(phba->sli4_hba.msix_entries); + + /* Free memory allocated for fast-path work queue handles */ + kfree(phba->sli4_hba.fcp_eq_hdl); + + /* Free the allocated rpi headers. */ + lpfc_sli4_remove_rpi_hdrs(phba); + + /* Free the ELS sgl list */ + lpfc_free_active_sgl(phba); + lpfc_free_sgl_list(phba); + + /* Free the SCSI sgl management array */ + kfree(phba->sli4_hba.lpfc_scsi_psb_array); + + /* Free the SLI4 queues */ + lpfc_sli4_queue_destroy(phba); + + /* Free the completion queue EQ event pool */ + lpfc_sli4_cq_event_release_all(phba); + lpfc_sli4_cq_event_pool_destroy(phba); + + /* Reset SLI4 HBA FCoE function */ + lpfc_pci_function_reset(phba); + + /* Free the bsmbx region. */ + lpfc_destroy_bootstrap_mbox(phba); + + /* Free the SLI Layer memory with SLI4 HBAs */ + lpfc_mem_free_all(phba); + + /* Free the current connect table */ + list_for_each_entry_safe(conn_entry, next_conn_entry, + &phba->fcf_conn_rec_list, list) + kfree(conn_entry); + + return; +} + +/** + * lpfc_init_api_table_setup - Set up init api fucntion jump table + * @phba: The hba struct for which this call is being executed. + * @dev_grp: The HBA PCI-Device group number. + * + * This routine sets up the device INIT interface API function jump table + * in @phba struct. + * + * Returns: 0 - success, -ENODEV - failure. + **/ +int +lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) +{ + switch (dev_grp) { + case LPFC_PCI_DEV_LP: + phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; + phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; + phba->lpfc_stop_port = lpfc_stop_port_s3; + break; + case LPFC_PCI_DEV_OC: + phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; + phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; + phba->lpfc_stop_port = lpfc_stop_port_s4; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1431 Invalid HBA PCI-device group: 0x%x\n", + dev_grp); + return -ENODEV; + break; + } + return 0; +} + +/** + * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up the driver internal resources before the + * device specific resource setup to support the HBA device it attached to. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) +{ + /* + * Driver resources common to all SLI revisions + */ + atomic_set(&phba->fast_event_count, 0); + spin_lock_init(&phba->hbalock); + + /* Initialize ndlp management spinlock */ + spin_lock_init(&phba->ndlp_lock); + + INIT_LIST_HEAD(&phba->port_list); + INIT_LIST_HEAD(&phba->work_list); + init_waitqueue_head(&phba->wait_4_mlo_m_q); + + /* Initialize the wait queue head for the kernel thread */ + init_waitqueue_head(&phba->work_waitq); + + /* Initialize the scsi buffer list used by driver for scsi IO */ + spin_lock_init(&phba->scsi_buf_list_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); + + /* Initialize the fabric iocb list */ + INIT_LIST_HEAD(&phba->fabric_iocb_list); + + /* Initialize list to save ELS buffers */ + INIT_LIST_HEAD(&phba->elsbuf); + + /* Initialize FCF connection rec list */ + INIT_LIST_HEAD(&phba->fcf_conn_rec_list); + + return 0; +} + +/** + * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up the driver internal resources after the + * device specific resource setup to support the HBA device it attached to. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) +{ + int error; + + /* Startup the kernel thread for this host adapter. */ + phba->worker_thread = kthread_run(lpfc_do_work, phba, + "lpfc_worker_%d", phba->brd_no); + if (IS_ERR(phba->worker_thread)) { + error = PTR_ERR(phba->worker_thread); + return error; } return 0; @@ -2956,92 +3754,432 @@ out_free_iocbq: } /** - * lpfc_hba_alloc - Allocate driver hba data structure for a device. - * @pdev: pointer to pci device data structure. - * - * This routine is invoked to allocate the driver hba data structure for an - * HBA device. If the allocation is successful, the phba reference to the - * PCI device data structure is set. + * lpfc_free_sgl_list - Free sgl list. + * @phba: pointer to lpfc hba data structure. * - * Return codes - * pointer to @phba - sucessful - * NULL - error + * This routine is invoked to free the driver's sgl list and memory. **/ -static struct lpfc_hba * -lpfc_hba_alloc(struct pci_dev *pdev) +static void +lpfc_free_sgl_list(struct lpfc_hba *phba) { - struct lpfc_hba *phba; - - /* Allocate memory for HBA structure */ - phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); - if (!phba) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1417 Failed to allocate hba struct.\n"); - return NULL; - } + struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; + LIST_HEAD(sglq_list); + int rc = 0; - /* Set reference to PCI device in HBA structure */ - phba->pcidev = pdev; + spin_lock_irq(&phba->hbalock); + list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); + spin_unlock_irq(&phba->hbalock); - /* Assign an unused board number */ - phba->brd_no = lpfc_get_instance(); - if (phba->brd_no < 0) { - kfree(phba); - return NULL; + list_for_each_entry_safe(sglq_entry, sglq_next, + &sglq_list, list) { + list_del(&sglq_entry->list); + lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); + kfree(sglq_entry); + phba->sli4_hba.total_sglq_bufs--; + } + rc = lpfc_sli4_remove_all_sgl_pages(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2005 Unable to deregister pages from HBA: %x", rc); } + kfree(phba->sli4_hba.lpfc_els_sgl_array); +} - return phba; +/** + * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate the driver's active sgl memory. + * This array will hold the sglq_entry's for active IOs. + **/ +static int +lpfc_init_active_sgl_array(struct lpfc_hba *phba) +{ + int size; + size = sizeof(struct lpfc_sglq *); + size *= phba->sli4_hba.max_cfg_param.max_xri; + + phba->sli4_hba.lpfc_sglq_active_list = + kzalloc(size, GFP_KERNEL); + if (!phba->sli4_hba.lpfc_sglq_active_list) + return -ENOMEM; + return 0; } /** - * lpfc_hba_free - Free driver hba data structure with a device. + * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to free the driver hba data structure with an - * HBA device. + * This routine is invoked to walk through the array of active sglq entries + * and free all of the resources. + * This is just a place holder for now. **/ static void -lpfc_hba_free(struct lpfc_hba *phba) +lpfc_free_active_sgl(struct lpfc_hba *phba) { - /* Release the driver assigned board number */ - idr_remove(&lpfc_hba_index, phba->brd_no); - - kfree(phba); - return; + kfree(phba->sli4_hba.lpfc_sglq_active_list); } /** - * lpfc_create_shost - Create hba physical port with associated scsi host. + * lpfc_init_sgl_list - Allocate and initialize sgl list. * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to create HBA physical port and associate a SCSI - * host with it. + * This routine is invoked to allocate and initizlize the driver's sgl + * list and set up the sgl xritag tag array accordingly. * * Return codes - * 0 - sucessful - * other values - error + * 0 - sucessful + * other values - error **/ static int -lpfc_create_shost(struct lpfc_hba *phba) +lpfc_init_sgl_list(struct lpfc_hba *phba) { - struct lpfc_vport *vport; - struct Scsi_Host *shost; + struct lpfc_sglq *sglq_entry = NULL; + int i; + int els_xri_cnt; + + els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2400 lpfc_init_sgl_list els %d.\n", + els_xri_cnt); + /* Initialize and populate the sglq list per host/VF. */ + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); + + /* Sanity check on XRI management */ + if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2562 No room left for SCSI XRI allocation: " + "max_xri=%d, els_xri=%d\n", + phba->sli4_hba.max_cfg_param.max_xri, + els_xri_cnt); + return -ENOMEM; + } - /* Initialize HBA FC structure */ - phba->fc_edtov = FF_DEF_EDTOV; - phba->fc_ratov = FF_DEF_RATOV; - phba->fc_altov = FF_DEF_ALTOV; - phba->fc_arbtov = FF_DEF_ARBTOV; + /* Allocate memory for the ELS XRI management array */ + phba->sli4_hba.lpfc_els_sgl_array = + kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), + GFP_KERNEL); - vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); - if (!vport) - return -ENODEV; + if (!phba->sli4_hba.lpfc_els_sgl_array) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2401 Failed to allocate memory for ELS " + "XRI management array of size %d.\n", + els_xri_cnt); + return -ENOMEM; + } - shost = lpfc_shost_from_vport(vport); - phba->pport = vport; - lpfc_debugfs_initialize(vport); - /* Put reference to SCSI host to driver's device private data */ - pci_set_drvdata(phba->pcidev, shost); + /* Keep the SCSI XRI into the XRI management array */ + phba->sli4_hba.scsi_xri_max = + phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; + phba->sli4_hba.scsi_xri_cnt = 0; + + phba->sli4_hba.lpfc_scsi_psb_array = + kzalloc((sizeof(struct lpfc_scsi_buf *) * + phba->sli4_hba.scsi_xri_max), GFP_KERNEL); + + if (!phba->sli4_hba.lpfc_scsi_psb_array) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2563 Failed to allocate memory for SCSI " + "XRI management array of size %d.\n", + phba->sli4_hba.scsi_xri_max); + kfree(phba->sli4_hba.lpfc_els_sgl_array); + return -ENOMEM; + } + + for (i = 0; i < els_xri_cnt; i++) { + sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); + if (sglq_entry == NULL) { + printk(KERN_ERR "%s: only allocated %d sgls of " + "expected %d count. Unloading driver.\n", + __func__, i, els_xri_cnt); + goto out_free_mem; + } + + sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba); + if (sglq_entry->sli4_xritag == NO_XRI) { + kfree(sglq_entry); + printk(KERN_ERR "%s: failed to allocate XRI.\n" + "Unloading driver.\n", __func__); + goto out_free_mem; + } + sglq_entry->buff_type = GEN_BUFF_TYPE; + sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); + if (sglq_entry->virt == NULL) { + kfree(sglq_entry); + printk(KERN_ERR "%s: failed to allocate mbuf.\n" + "Unloading driver.\n", __func__); + goto out_free_mem; + } + sglq_entry->sgl = sglq_entry->virt; + memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); + + /* The list order is used by later block SGL registraton */ + spin_lock_irq(&phba->hbalock); + list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); + phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; + phba->sli4_hba.total_sglq_bufs++; + spin_unlock_irq(&phba->hbalock); + } + return 0; + +out_free_mem: + kfree(phba->sli4_hba.lpfc_scsi_psb_array); + lpfc_free_sgl_list(phba); + return -ENOMEM; +} + +/** + * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to post rpi header templates to the + * HBA consistent with the SLI-4 interface spec. This routine + * posts a PAGE_SIZE memory region to the port to hold up to + * PAGE_SIZE modulo 64 rpi context headers. + * No locks are held here because this is an initialization routine + * called only from probe or lpfc_online when interrupts are not + * enabled and the driver is reinitializing the device. + * + * Return codes + * 0 - sucessful + * ENOMEM - No availble memory + * EIO - The mailbox failed to complete successfully. + **/ +int +lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) +{ + int rc = 0; + int longs; + uint16_t rpi_count; + struct lpfc_rpi_hdr *rpi_hdr; + + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); + + /* + * Provision an rpi bitmask range for discovery. The total count + * is the difference between max and base + 1. + */ + rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + + phba->sli4_hba.max_cfg_param.max_rpi - 1; + + longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; + phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), + GFP_KERNEL); + if (!phba->sli4_hba.rpi_bmask) + return -ENOMEM; + + rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); + if (!rpi_hdr) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0391 Error during rpi post operation\n"); + lpfc_sli4_remove_rpis(phba); + rc = -ENODEV; + } + + return rc; +} + +/** + * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate a single 4KB memory region to + * support rpis and stores them in the phba. This single region + * provides support for up to 64 rpis. The region is used globally + * by the device. + * + * Returns: + * A valid rpi hdr on success. + * A NULL pointer on any failure. + **/ +struct lpfc_rpi_hdr * +lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) +{ + uint16_t rpi_limit, curr_rpi_range; + struct lpfc_dmabuf *dmabuf; + struct lpfc_rpi_hdr *rpi_hdr; + + rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + + phba->sli4_hba.max_cfg_param.max_rpi - 1; + + spin_lock_irq(&phba->hbalock); + curr_rpi_range = phba->sli4_hba.next_rpi; + spin_unlock_irq(&phba->hbalock); + + /* + * The port has a limited number of rpis. The increment here + * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value + * and to allow the full max_rpi range per port. + */ + if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) + return NULL; + + /* + * First allocate the protocol header region for the port. The + * port expects a 4KB DMA-mapped memory region that is 4K aligned. + */ + dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!dmabuf) + return NULL; + + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, + LPFC_HDR_TEMPLATE_SIZE, + &dmabuf->phys, + GFP_KERNEL); + if (!dmabuf->virt) { + rpi_hdr = NULL; + goto err_free_dmabuf; + } + + memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); + if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { + rpi_hdr = NULL; + goto err_free_coherent; + } + + /* Save the rpi header data for cleanup later. */ + rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); + if (!rpi_hdr) + goto err_free_coherent; + + rpi_hdr->dmabuf = dmabuf; + rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; + rpi_hdr->page_count = 1; + spin_lock_irq(&phba->hbalock); + rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; + list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); + + /* + * The next_rpi stores the next module-64 rpi value to post + * in any subsequent rpi memory region postings. + */ + phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT; + spin_unlock_irq(&phba->hbalock); + return rpi_hdr; + + err_free_coherent: + dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, + dmabuf->virt, dmabuf->phys); + err_free_dmabuf: + kfree(dmabuf); + return NULL; +} + +/** + * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to remove all memory resources allocated + * to support rpis. This routine presumes the caller has released all + * rpis consumed by fabric or port logins and is prepared to have + * the header pages removed. + **/ +void +lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) +{ + struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; + + list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, + &phba->sli4_hba.lpfc_rpi_hdr_list, list) { + list_del(&rpi_hdr->list); + dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, + rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); + kfree(rpi_hdr->dmabuf); + kfree(rpi_hdr); + } + + phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; + memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); +} + +/** + * lpfc_hba_alloc - Allocate driver hba data structure for a device. + * @pdev: pointer to pci device data structure. + * + * This routine is invoked to allocate the driver hba data structure for an + * HBA device. If the allocation is successful, the phba reference to the + * PCI device data structure is set. + * + * Return codes + * pointer to @phba - sucessful + * NULL - error + **/ +static struct lpfc_hba * +lpfc_hba_alloc(struct pci_dev *pdev) +{ + struct lpfc_hba *phba; + + /* Allocate memory for HBA structure */ + phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); + if (!phba) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1417 Failed to allocate hba struct.\n"); + return NULL; + } + + /* Set reference to PCI device in HBA structure */ + phba->pcidev = pdev; + + /* Assign an unused board number */ + phba->brd_no = lpfc_get_instance(); + if (phba->brd_no < 0) { + kfree(phba); + return NULL; + } + + return phba; +} + +/** + * lpfc_hba_free - Free driver hba data structure with a device. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to free the driver hba data structure with an + * HBA device. + **/ +static void +lpfc_hba_free(struct lpfc_hba *phba) +{ + /* Release the driver assigned board number */ + idr_remove(&lpfc_hba_index, phba->brd_no); + + kfree(phba); + return; +} + +/** + * lpfc_create_shost - Create hba physical port with associated scsi host. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to create HBA physical port and associate a SCSI + * host with it. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_create_shost(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport; + struct Scsi_Host *shost; + + /* Initialize HBA FC structure */ + phba->fc_edtov = FF_DEF_EDTOV; + phba->fc_ratov = FF_DEF_RATOV; + phba->fc_altov = FF_DEF_ALTOV; + phba->fc_arbtov = FF_DEF_ARBTOV; + + vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); + if (!vport) + return -ENODEV; + + shost = lpfc_shost_from_vport(vport); + phba->pport = vport; + lpfc_debugfs_initialize(vport); + /* Put reference to SCSI host to driver's device private data */ + pci_set_drvdata(phba->pcidev, shost); return 0; } @@ -3316,340 +4454,2755 @@ lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) } /** - * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device + * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to enable the MSI-X interrupt vectors to device - * with SLI-3 interface specs. The kernel function pci_enable_msix() is - * called to enable the MSI-X vectors. Note that pci_enable_msix(), once - * invoked, enables either all or nothing, depending on the current - * availability of PCI vector resources. The device driver is responsible - * for calling the individual request_irq() to register each MSI-X vector - * with a interrupt handler, which is done in this function. Note that - * later when device is unloading, the driver should always call free_irq() - * on all MSI-X vectors it has done request_irq() on before calling - * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device - * will be left with MSI-X enabled and leaks its vectors. + * This routine is invoked to wait for SLI4 device Power On Self Test (POST) + * done and check status. * - * Return codes - * 0 - sucessful - * other values - error + * Return 0 if successful, otherwise -ENODEV. **/ -static int -lpfc_sli_enable_msix(struct lpfc_hba *phba) +int +lpfc_sli4_post_status_check(struct lpfc_hba *phba) { - int rc, i; - LPFC_MBOXQ_t *pmb; + struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad; + uint32_t onlnreg0, onlnreg1; + int i, port_error = -ENODEV; - /* Set up MSI-X multi-message vectors */ - for (i = 0; i < LPFC_MSIX_VECTORS; i++) - phba->msix_entries[i].entry = i; + if (!phba->sli4_hba.STAregaddr) + return -ENODEV; - /* Configure MSI-X capability structure */ - rc = pci_enable_msix(phba->pcidev, phba->msix_entries, - ARRAY_SIZE(phba->msix_entries)); - if (rc) { - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0420 PCI enable MSI-X failed (%d)\n", rc); - goto msi_fail_out; + /* With uncoverable error, log the error message and return error */ + onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); + onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); + if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { + uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); + uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); + if (uerrlo_reg.word0 || uerrhi_reg.word0) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1422 HBA Unrecoverable error: " + "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " + "online0_reg=0x%x, online1_reg=0x%x\n", + uerrlo_reg.word0, uerrhi_reg.word0, + onlnreg0, onlnreg1); + } + return -ENODEV; } - for (i = 0; i < LPFC_MSIX_VECTORS; i++) - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0477 MSI-X entry[%d]: vector=x%x " - "message=%d\n", i, - phba->msix_entries[i].vector, - phba->msix_entries[i].entry); - /* - * Assign MSI-X vectors to interrupt handlers - */ - /* vector-0 is associated to slow-path handler */ - rc = request_irq(phba->msix_entries[0].vector, - &lpfc_sli_sp_intr_handler, IRQF_SHARED, - LPFC_SP_DRIVER_HANDLER_NAME, phba); - if (rc) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "0421 MSI-X slow-path request_irq failed " - "(%d)\n", rc); - goto msi_fail_out; + /* Wait up to 30 seconds for the SLI Port POST done and ready */ + for (i = 0; i < 3000; i++) { + sta_reg.word0 = readl(phba->sli4_hba.STAregaddr); + /* Encounter fatal POST error, break out */ + if (bf_get(lpfc_hst_state_perr, &sta_reg)) { + port_error = -ENODEV; + break; + } + if (LPFC_POST_STAGE_ARMFW_READY == + bf_get(lpfc_hst_state_port_status, &sta_reg)) { + port_error = 0; + break; + } + msleep(10); } - /* vector-1 is associated to fast-path handler */ - rc = request_irq(phba->msix_entries[1].vector, - &lpfc_sli_fp_intr_handler, IRQF_SHARED, - LPFC_FP_DRIVER_HANDLER_NAME, phba); + if (port_error) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1408 Failure HBA POST Status: sta_reg=0x%x, " + "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, " + "dl=x%x, pstatus=x%x\n", sta_reg.word0, + bf_get(lpfc_hst_state_perr, &sta_reg), + bf_get(lpfc_hst_state_sfi, &sta_reg), + bf_get(lpfc_hst_state_nip, &sta_reg), + bf_get(lpfc_hst_state_ipc, &sta_reg), + bf_get(lpfc_hst_state_xrom, &sta_reg), + bf_get(lpfc_hst_state_dl, &sta_reg), + bf_get(lpfc_hst_state_port_status, &sta_reg)); + + /* Log device information */ + scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr); + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2534 Device Info: ChipType=0x%x, SliRev=0x%x, " + "FeatureL1=0x%x, FeatureL2=0x%x\n", + bf_get(lpfc_scratchpad_chiptype, &scratchpad), + bf_get(lpfc_scratchpad_slirev, &scratchpad), + bf_get(lpfc_scratchpad_featurelevel1, &scratchpad), + bf_get(lpfc_scratchpad_featurelevel2, &scratchpad)); + + return port_error; +} - if (rc) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "0429 MSI-X fast-path request_irq failed " - "(%d)\n", rc); - goto irq_fail_out; - } +/** + * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up SLI4 BAR0 PCI config space register + * memory map. + **/ +static void +lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba) +{ + phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p + + LPFC_UERR_STATUS_LO; + phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + + LPFC_UERR_STATUS_HI; + phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p + + LPFC_ONLINE0; + phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p + + LPFC_ONLINE1; + phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p + + LPFC_SCRATCHPAD; +} - /* - * Configure HBA MSI-X attention conditions to messages - */ - pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); +/** + * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up SLI4 BAR1 control status register (CSR) + * memory map. + **/ +static void +lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) +{ - if (!pmb) { - rc = -ENOMEM; - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0474 Unable to allocate memory for issuing " - "MBOX_CONFIG_MSI command\n"); - goto mem_fail_out; - } - rc = lpfc_config_msi(phba, pmb); - if (rc) - goto mbx_fail_out; - rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); - if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, - "0351 Config MSI mailbox command failed, " - "mbxCmd x%x, mbxStatus x%x\n", - pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); - goto mbx_fail_out; - } - - /* Free memory allocated for mailbox command */ - mempool_free(pmb, phba->mbox_mem_pool); - return rc; - -mbx_fail_out: - /* Free memory allocated for mailbox command */ - mempool_free(pmb, phba->mbox_mem_pool); - -mem_fail_out: - /* free the irq already requested */ - free_irq(phba->msix_entries[1].vector, phba); - -irq_fail_out: - /* free the irq already requested */ - free_irq(phba->msix_entries[0].vector, phba); - -msi_fail_out: - /* Unconfigure MSI-X capability structure */ - pci_disable_msix(phba->pcidev); - return rc; + phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p + + LPFC_HST_STATE; + phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + + LPFC_HST_ISR0; + phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + + LPFC_HST_IMR0; + phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + + LPFC_HST_ISCR0; + return; } /** - * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. + * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. * @phba: pointer to lpfc hba data structure. + * @vf: virtual function number * - * This routine is invoked to release the MSI-X vectors and then disable the - * MSI-X interrupt mode to device with SLI-3 interface spec. + * This routine is invoked to set up SLI4 BAR2 doorbell register memory map + * based on the given viftual function number, @vf. + * + * Return 0 if successful, otherwise -ENODEV. **/ -static void -lpfc_sli_disable_msix(struct lpfc_hba *phba) +static int +lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) { - int i; - - /* Free up MSI-X multi-message vectors */ - for (i = 0; i < LPFC_MSIX_VECTORS; i++) - free_irq(phba->msix_entries[i].vector, phba); - /* Disable MSI-X */ - pci_disable_msix(phba->pcidev); + if (vf > LPFC_VIR_FUNC_MAX) + return -ENODEV; - return; + phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + + vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); + phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + + vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); + phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + + vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); + phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + + vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); + phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + + vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); + return 0; } /** - * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. + * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to enable the MSI interrupt mode to device with - * SLI-3 interface spec. The kernel function pci_enable_msi() is called to - * enable the MSI vector. The device driver is responsible for calling the - * request_irq() to register MSI vector with a interrupt the handler, which - * is done in this function. + * This routine is invoked to create the bootstrap mailbox + * region consistent with the SLI-4 interface spec. This + * routine allocates all memory necessary to communicate + * mailbox commands to the port and sets up all alignment + * needs. No locks are expected to be held when calling + * this routine. * * Return codes * 0 - sucessful - * other values - error - */ + * ENOMEM - could not allocated memory. + **/ static int -lpfc_sli_enable_msi(struct lpfc_hba *phba) +lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) { - int rc; + uint32_t bmbx_size; + struct lpfc_dmabuf *dmabuf; + struct dma_address *dma_address; + uint32_t pa_addr; + uint64_t phys_addr; + + dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!dmabuf) + return -ENOMEM; - rc = pci_enable_msi(phba->pcidev); - if (!rc) - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0462 PCI enable MSI mode success.\n"); - else { - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0471 PCI enable MSI mode failed (%d)\n", rc); - return rc; + /* + * The bootstrap mailbox region is comprised of 2 parts + * plus an alignment restriction of 16 bytes. + */ + bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, + bmbx_size, + &dmabuf->phys, + GFP_KERNEL); + if (!dmabuf->virt) { + kfree(dmabuf); + return -ENOMEM; } + memset(dmabuf->virt, 0, bmbx_size); - rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, - IRQF_SHARED, LPFC_DRIVER_NAME, phba); - if (rc) { - pci_disable_msi(phba->pcidev); - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "0478 MSI request_irq failed (%d)\n", rc); - } - return rc; + /* + * Initialize the bootstrap mailbox pointers now so that the register + * operations are simple later. The mailbox dma address is required + * to be 16-byte aligned. Also align the virtual memory as each + * maibox is copied into the bmbx mailbox region before issuing the + * command to the port. + */ + phba->sli4_hba.bmbx.dmabuf = dmabuf; + phba->sli4_hba.bmbx.bmbx_size = bmbx_size; + + phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, + LPFC_ALIGN_16_BYTE); + phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, + LPFC_ALIGN_16_BYTE); + + /* + * Set the high and low physical addresses now. The SLI4 alignment + * requirement is 16 bytes and the mailbox is posted to the port + * as two 30-bit addresses. The other data is a bit marking whether + * the 30-bit address is the high or low address. + * Upcast bmbx aphys to 64bits so shift instruction compiles + * clean on 32 bit machines. + */ + dma_address = &phba->sli4_hba.bmbx.dma_address; + phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; + pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); + dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | + LPFC_BMBX_BIT1_ADDR_HI); + + pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); + dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | + LPFC_BMBX_BIT1_ADDR_LO); + return 0; } /** - * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. + * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to disable the MSI interrupt mode to device with - * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has - * done request_irq() on before calling pci_disable_msi(). Failure to do so - * results in a BUG_ON() and a device will be left with MSI enabled and leaks - * its vector. - */ + * This routine is invoked to teardown the bootstrap mailbox + * region and release all host resources. This routine requires + * the caller to ensure all mailbox commands recovered, no + * additional mailbox comands are sent, and interrupts are disabled + * before calling this routine. + * + **/ static void -lpfc_sli_disable_msi(struct lpfc_hba *phba) +lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) { - free_irq(phba->pcidev->irq, phba); - pci_disable_msi(phba->pcidev); - return; + dma_free_coherent(&phba->pcidev->dev, + phba->sli4_hba.bmbx.bmbx_size, + phba->sli4_hba.bmbx.dmabuf->virt, + phba->sli4_hba.bmbx.dmabuf->phys); + + kfree(phba->sli4_hba.bmbx.dmabuf); + memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); } /** - * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. + * lpfc_sli4_read_config - Get the config parameters. * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to enable device interrupt and associate driver's - * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface - * spec. Depends on the interrupt mode configured to the driver, the driver - * will try to fallback from the configured interrupt mode to an interrupt - * mode which is supported by the platform, kernel, and device in the order - * of: - * MSI-X -> MSI -> IRQ. + * This routine is invoked to read the configuration parameters from the HBA. + * The configuration parameters are used to set the base and maximum values + * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource + * allocation for the port. * * Return codes - * 0 - sucessful - * other values - error + * 0 - sucessful + * ENOMEM - No availble memory + * EIO - The mailbox failed to complete successfully. **/ -static uint32_t -lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) +static int +lpfc_sli4_read_config(struct lpfc_hba *phba) { - uint32_t intr_mode = LPFC_INTR_ERROR; - int retval; + LPFC_MBOXQ_t *pmb; + struct lpfc_mbx_read_config *rd_config; + uint32_t rc = 0; - if (cfg_mode == 2) { - /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ - retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); - if (!retval) { - /* Now, try to enable MSI-X interrupt mode */ - retval = lpfc_sli_enable_msix(phba); - if (!retval) { - /* Indicate initialization to MSI-X mode */ - phba->intr_type = MSIX; - intr_mode = 2; - } - } + pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2011 Unable to allocate memory for issuing " + "SLI_CONFIG_SPECIAL mailbox command\n"); + return -ENOMEM; } - /* Fallback to MSI if MSI-X initialization failed */ - if (cfg_mode >= 1 && phba->intr_type == NONE) { - retval = lpfc_sli_enable_msi(phba); - if (!retval) { - /* Indicate initialization to MSI mode */ - phba->intr_type = MSI; - intr_mode = 1; - } - } + lpfc_read_config(phba, pmb); - /* Fallback to INTx if both MSI-X/MSI initalization failed */ - if (phba->intr_type == NONE) { - retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, - IRQF_SHARED, LPFC_DRIVER_NAME, phba); - if (!retval) { - /* Indicate initialization to INTx mode */ - phba->intr_type = INTx; - intr_mode = 0; - } + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2012 Mailbox failed , mbxCmd x%x " + "READ_CONFIG, mbxStatus x%x\n", + bf_get(lpfc_mqe_command, &pmb->u.mqe), + bf_get(lpfc_mqe_status, &pmb->u.mqe)); + rc = -EIO; + } else { + rd_config = &pmb->u.mqe.un.rd_config; + phba->sli4_hba.max_cfg_param.max_xri = + bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); + phba->sli4_hba.max_cfg_param.xri_base = + bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); + phba->sli4_hba.max_cfg_param.max_vpi = + bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); + phba->sli4_hba.max_cfg_param.vpi_base = + bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); + phba->sli4_hba.max_cfg_param.max_rpi = + bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); + phba->sli4_hba.max_cfg_param.rpi_base = + bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); + phba->sli4_hba.max_cfg_param.max_vfi = + bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); + phba->sli4_hba.max_cfg_param.vfi_base = + bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); + phba->sli4_hba.max_cfg_param.max_fcfi = + bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); + phba->sli4_hba.max_cfg_param.fcfi_base = + bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config); + phba->sli4_hba.max_cfg_param.max_eq = + bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); + phba->sli4_hba.max_cfg_param.max_rq = + bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); + phba->sli4_hba.max_cfg_param.max_wq = + bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); + phba->sli4_hba.max_cfg_param.max_cq = + bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); + phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); + phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; + phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; + phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; + phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; + phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi; + phba->max_vports = phba->max_vpi; + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2003 cfg params XRI(B:%d M:%d), " + "VPI(B:%d M:%d) " + "VFI(B:%d M:%d) " + "RPI(B:%d M:%d) " + "FCFI(B:%d M:%d)\n", + phba->sli4_hba.max_cfg_param.xri_base, + phba->sli4_hba.max_cfg_param.max_xri, + phba->sli4_hba.max_cfg_param.vpi_base, + phba->sli4_hba.max_cfg_param.max_vpi, + phba->sli4_hba.max_cfg_param.vfi_base, + phba->sli4_hba.max_cfg_param.max_vfi, + phba->sli4_hba.max_cfg_param.rpi_base, + phba->sli4_hba.max_cfg_param.max_rpi, + phba->sli4_hba.max_cfg_param.fcfi_base, + phba->sli4_hba.max_cfg_param.max_fcfi); } - return intr_mode; + mempool_free(pmb, phba->mbox_mem_pool); + + /* Reset the DFT_HBA_Q_DEPTH to the max xri */ + if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri)) + phba->cfg_hba_queue_depth = + phba->sli4_hba.max_cfg_param.max_xri; + return rc; } /** - * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. + * lpfc_dev_endian_order_setup - Notify the port of the host's endian order. * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to disable device interrupt and disassociate the - * driver's interrupt handler(s) from interrupt vector(s) to device with - * SLI-3 interface spec. Depending on the interrupt mode, the driver will - * release the interrupt vector(s) for the message signaled interrupt. + * This routine is invoked to setup the host-side endian order to the + * HBA consistent with the SLI-4 interface spec. + * + * Return codes + * 0 - sucessful + * ENOMEM - No availble memory + * EIO - The mailbox failed to complete successfully. **/ -static void -lpfc_sli_disable_intr(struct lpfc_hba *phba) +static int +lpfc_setup_endian_order(struct lpfc_hba *phba) { - /* Disable the currently initialized interrupt mode */ - if (phba->intr_type == MSIX) - lpfc_sli_disable_msix(phba); - else if (phba->intr_type == MSI) - lpfc_sli_disable_msi(phba); - else if (phba->intr_type == INTx) - free_irq(phba->pcidev->irq, phba); + LPFC_MBOXQ_t *mboxq; + uint32_t rc = 0; + uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, + HOST_ENDIAN_HIGH_WORD1}; - /* Reset interrupt management states */ - phba->intr_type = NONE; - phba->sli.slistat.sli_intr = 0; + mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0492 Unable to allocate memory for issuing " + "SLI_CONFIG_SPECIAL mailbox command\n"); + return -ENOMEM; + } - return; + /* + * The SLI4_CONFIG_SPECIAL mailbox command requires the first two + * words to contain special data values and no other data. + */ + memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); + memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0493 SLI_CONFIG_SPECIAL mailbox failed with " + "status x%x\n", + rc); + rc = -EIO; + } + + mempool_free(mboxq, phba->mbox_mem_pool); + return rc; } /** - * lpfc_unset_hba - Unset SLI3 hba device initialization + * lpfc_sli4_queue_create - Create all the SLI4 queues * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to unset the HBA device initialization steps to - * a device with SLI-3 interface spec. + * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA + * operation. For each SLI4 queue type, the parameters such as queue entry + * count (queue depth) shall be taken from the module parameter. For now, + * we just use some constant number as place holder. + * + * Return codes + * 0 - sucessful + * ENOMEM - No availble memory + * EIO - The mailbox failed to complete successfully. **/ -static void -lpfc_unset_hba(struct lpfc_hba *phba) +static int +lpfc_sli4_queue_create(struct lpfc_hba *phba) { - struct lpfc_vport *vport = phba->pport; - struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_queue *qdesc; + int fcp_eqidx, fcp_cqidx, fcp_wqidx; + int cfg_fcp_wq_count; + int cfg_fcp_eq_count; - spin_lock_irq(shost->host_lock); - vport->load_flag |= FC_UNLOADING; - spin_unlock_irq(shost->host_lock); + /* + * Sanity check for confiugred queue parameters against the run-time + * device parameters + */ - lpfc_stop_hba_timers(phba); + /* Sanity check on FCP fast-path WQ parameters */ + cfg_fcp_wq_count = phba->cfg_fcp_wq_count; + if (cfg_fcp_wq_count > + (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { + cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - + LPFC_SP_WQN_DEF; + if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2581 Not enough WQs (%d) from " + "the pci function for supporting " + "FCP WQs (%d)\n", + phba->sli4_hba.max_cfg_param.max_wq, + phba->cfg_fcp_wq_count); + goto out_error; + } + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "2582 Not enough WQs (%d) from the pci " + "function for supporting the requested " + "FCP WQs (%d), the actual FCP WQs can " + "be supported: %d\n", + phba->sli4_hba.max_cfg_param.max_wq, + phba->cfg_fcp_wq_count, cfg_fcp_wq_count); + } + /* The actual number of FCP work queues adopted */ + phba->cfg_fcp_wq_count = cfg_fcp_wq_count; + + /* Sanity check on FCP fast-path EQ parameters */ + cfg_fcp_eq_count = phba->cfg_fcp_eq_count; + if (cfg_fcp_eq_count > + (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { + cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - + LPFC_SP_EQN_DEF; + if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2574 Not enough EQs (%d) from the " + "pci function for supporting FCP " + "EQs (%d)\n", + phba->sli4_hba.max_cfg_param.max_eq, + phba->cfg_fcp_eq_count); + goto out_error; + } + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "2575 Not enough EQs (%d) from the pci " + "function for supporting the requested " + "FCP EQs (%d), the actual FCP EQs can " + "be supported: %d\n", + phba->sli4_hba.max_cfg_param.max_eq, + phba->cfg_fcp_eq_count, cfg_fcp_eq_count); + } + /* It does not make sense to have more EQs than WQs */ + if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "2593 The number of FCP EQs (%d) is more " + "than the number of FCP WQs (%d), take " + "the number of FCP EQs same as than of " + "WQs (%d)\n", cfg_fcp_eq_count, + phba->cfg_fcp_wq_count, + phba->cfg_fcp_wq_count); + cfg_fcp_eq_count = phba->cfg_fcp_wq_count; + } + /* The actual number of FCP event queues adopted */ + phba->cfg_fcp_eq_count = cfg_fcp_eq_count; + /* The overall number of event queues used */ + phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; - phba->pport->work_port_events = 0; + /* + * Create Event Queues (EQs) + */ - lpfc_sli_hba_down(phba); + /* Get EQ depth from module parameter, fake the default for now */ + phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; + phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; - lpfc_sli_brdrestart(phba); + /* Create slow path event queue */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, + phba->sli4_hba.eq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0496 Failed allocate slow-path EQ\n"); + goto out_error; + } + phba->sli4_hba.sp_eq = qdesc; + + /* Create fast-path FCP Event Queue(s) */ + phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * + phba->cfg_fcp_eq_count), GFP_KERNEL); + if (!phba->sli4_hba.fp_eq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2576 Failed allocate memory for fast-path " + "EQ record array\n"); + goto out_free_sp_eq; + } + for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, + phba->sli4_hba.eq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0497 Failed allocate fast-path EQ\n"); + goto out_free_fp_eq; + } + phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; + } + + /* + * Create Complete Queues (CQs) + */ + + /* Get CQ depth from module parameter, fake the default for now */ + phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; + phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; + + /* Create slow-path Mailbox Command Complete Queue */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0500 Failed allocate slow-path mailbox CQ\n"); + goto out_free_fp_eq; + } + phba->sli4_hba.mbx_cq = qdesc; + + /* Create slow-path ELS Complete Queue */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0501 Failed allocate slow-path ELS CQ\n"); + goto out_free_mbx_cq; + } + phba->sli4_hba.els_cq = qdesc; + + /* Create slow-path Unsolicited Receive Complete Queue */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0502 Failed allocate slow-path USOL RX CQ\n"); + goto out_free_els_cq; + } + phba->sli4_hba.rxq_cq = qdesc; + + /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ + phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * + phba->cfg_fcp_eq_count), GFP_KERNEL); + if (!phba->sli4_hba.fcp_cq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2577 Failed allocate memory for fast-path " + "CQ record array\n"); + goto out_free_rxq_cq; + } + for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0499 Failed allocate fast-path FCP " + "CQ (%d)\n", fcp_cqidx); + goto out_free_fcp_cq; + } + phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; + } + + /* Create Mailbox Command Queue */ + phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; + phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; + + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, + phba->sli4_hba.mq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0505 Failed allocate slow-path MQ\n"); + goto out_free_fcp_cq; + } + phba->sli4_hba.mbx_wq = qdesc; + + /* + * Create all the Work Queues (WQs) + */ + phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; + phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; + + /* Create slow-path ELS Work Queue */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, + phba->sli4_hba.wq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0504 Failed allocate slow-path ELS WQ\n"); + goto out_free_mbx_wq; + } + phba->sli4_hba.els_wq = qdesc; + + /* Create fast-path FCP Work Queue(s) */ + phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * + phba->cfg_fcp_wq_count), GFP_KERNEL); + if (!phba->sli4_hba.fcp_wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2578 Failed allocate memory for fast-path " + "WQ record array\n"); + goto out_free_els_wq; + } + for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, + phba->sli4_hba.wq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0503 Failed allocate fast-path FCP " + "WQ (%d)\n", fcp_wqidx); + goto out_free_fcp_wq; + } + phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; + } + + /* + * Create Receive Queue (RQ) + */ + phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; + phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; + + /* Create Receive Queue for header */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, + phba->sli4_hba.rq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0506 Failed allocate receive HRQ\n"); + goto out_free_fcp_wq; + } + phba->sli4_hba.hdr_rq = qdesc; + + /* Create Receive Queue for data */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, + phba->sli4_hba.rq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0507 Failed allocate receive DRQ\n"); + goto out_free_hdr_rq; + } + phba->sli4_hba.dat_rq = qdesc; + + return 0; + +out_free_hdr_rq: + lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); + phba->sli4_hba.hdr_rq = NULL; +out_free_fcp_wq: + for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { + lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); + phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; + } + kfree(phba->sli4_hba.fcp_wq); +out_free_els_wq: + lpfc_sli4_queue_free(phba->sli4_hba.els_wq); + phba->sli4_hba.els_wq = NULL; +out_free_mbx_wq: + lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); + phba->sli4_hba.mbx_wq = NULL; +out_free_fcp_cq: + for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { + lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); + phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; + } + kfree(phba->sli4_hba.fcp_cq); +out_free_rxq_cq: + lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq); + phba->sli4_hba.rxq_cq = NULL; +out_free_els_cq: + lpfc_sli4_queue_free(phba->sli4_hba.els_cq); + phba->sli4_hba.els_cq = NULL; +out_free_mbx_cq: + lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); + phba->sli4_hba.mbx_cq = NULL; +out_free_fp_eq: + for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { + lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); + phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; + } + kfree(phba->sli4_hba.fp_eq); +out_free_sp_eq: + lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); + phba->sli4_hba.sp_eq = NULL; +out_error: + return -ENOMEM; +} + +/** + * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to release all the SLI4 queues with the FCoE HBA + * operation. + * + * Return codes + * 0 - sucessful + * ENOMEM - No availble memory + * EIO - The mailbox failed to complete successfully. + **/ +static void +lpfc_sli4_queue_destroy(struct lpfc_hba *phba) +{ + int fcp_qidx; + + /* Release mailbox command work queue */ + lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); + phba->sli4_hba.mbx_wq = NULL; + + /* Release ELS work queue */ + lpfc_sli4_queue_free(phba->sli4_hba.els_wq); + phba->sli4_hba.els_wq = NULL; + + /* Release FCP work queue */ + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) + lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); + kfree(phba->sli4_hba.fcp_wq); + phba->sli4_hba.fcp_wq = NULL; + + /* Release unsolicited receive queue */ + lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); + phba->sli4_hba.hdr_rq = NULL; + lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); + phba->sli4_hba.dat_rq = NULL; + + /* Release unsolicited receive complete queue */ + lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq); + phba->sli4_hba.rxq_cq = NULL; + + /* Release ELS complete queue */ + lpfc_sli4_queue_free(phba->sli4_hba.els_cq); + phba->sli4_hba.els_cq = NULL; + + /* Release mailbox command complete queue */ + lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); + phba->sli4_hba.mbx_cq = NULL; + + /* Release FCP response complete queue */ + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) + lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); + kfree(phba->sli4_hba.fcp_cq); + phba->sli4_hba.fcp_cq = NULL; + + /* Release fast-path event queue */ + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) + lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); + kfree(phba->sli4_hba.fp_eq); + phba->sli4_hba.fp_eq = NULL; + + /* Release slow-path event queue */ + lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); + phba->sli4_hba.sp_eq = NULL; + + return; +} + +/** + * lpfc_sli4_queue_setup - Set up all the SLI4 queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up all the SLI4 queues for the FCoE HBA + * operation. + * + * Return codes + * 0 - sucessful + * ENOMEM - No availble memory + * EIO - The mailbox failed to complete successfully. + **/ +int +lpfc_sli4_queue_setup(struct lpfc_hba *phba) +{ + int rc = -ENOMEM; + int fcp_eqidx, fcp_cqidx, fcp_wqidx; + int fcp_cq_index = 0; + + /* + * Set up Event Queues (EQs) + */ + + /* Set up slow-path event queue */ + if (!phba->sli4_hba.sp_eq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0520 Slow-path EQ not allocated\n"); + goto out_error; + } + rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, + LPFC_SP_DEF_IMAX); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0521 Failed setup of slow-path EQ: " + "rc = 0x%x\n", rc); + goto out_error; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2583 Slow-path EQ setup: queue-id=%d\n", + phba->sli4_hba.sp_eq->queue_id); + + /* Set up fast-path event queue */ + for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { + if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0522 Fast-path EQ (%d) not " + "allocated\n", fcp_eqidx); + goto out_destroy_fp_eq; + } + rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], + phba->cfg_fcp_imax); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0523 Failed setup of fast-path EQ " + "(%d), rc = 0x%x\n", fcp_eqidx, rc); + goto out_destroy_fp_eq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2584 Fast-path EQ setup: " + "queue[%d]-id=%d\n", fcp_eqidx, + phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); + } + + /* + * Set up Complete Queues (CQs) + */ + + /* Set up slow-path MBOX Complete Queue as the first CQ */ + if (!phba->sli4_hba.mbx_cq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0528 Mailbox CQ not allocated\n"); + goto out_destroy_fp_eq; + } + rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, + LPFC_MCQ, LPFC_MBOX); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0529 Failed setup of slow-path mailbox CQ: " + "rc = 0x%x\n", rc); + goto out_destroy_fp_eq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", + phba->sli4_hba.mbx_cq->queue_id, + phba->sli4_hba.sp_eq->queue_id); + + /* Set up slow-path ELS Complete Queue */ + if (!phba->sli4_hba.els_cq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0530 ELS CQ not allocated\n"); + goto out_destroy_mbx_cq; + } + rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, + LPFC_WCQ, LPFC_ELS); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0531 Failed setup of slow-path ELS CQ: " + "rc = 0x%x\n", rc); + goto out_destroy_mbx_cq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", + phba->sli4_hba.els_cq->queue_id, + phba->sli4_hba.sp_eq->queue_id); + + /* Set up slow-path Unsolicited Receive Complete Queue */ + if (!phba->sli4_hba.rxq_cq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0532 USOL RX CQ not allocated\n"); + goto out_destroy_els_cq; + } + rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq, + LPFC_RCQ, LPFC_USOL); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0533 Failed setup of slow-path USOL RX CQ: " + "rc = 0x%x\n", rc); + goto out_destroy_els_cq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n", + phba->sli4_hba.rxq_cq->queue_id, + phba->sli4_hba.sp_eq->queue_id); + + /* Set up fast-path FCP Response Complete Queue */ + for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { + if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0526 Fast-path FCP CQ (%d) not " + "allocated\n", fcp_cqidx); + goto out_destroy_fcp_cq; + } + rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], + phba->sli4_hba.fp_eq[fcp_cqidx], + LPFC_WCQ, LPFC_FCP); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0527 Failed setup of fast-path FCP " + "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); + goto out_destroy_fcp_cq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2588 FCP CQ setup: cq[%d]-id=%d, " + "parent eq[%d]-id=%d\n", + fcp_cqidx, + phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, + fcp_cqidx, + phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); + } + + /* + * Set up all the Work Queues (WQs) + */ + + /* Set up Mailbox Command Queue */ + if (!phba->sli4_hba.mbx_wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0538 Slow-path MQ not allocated\n"); + goto out_destroy_fcp_cq; + } + rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, + phba->sli4_hba.mbx_cq, LPFC_MBOX); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0539 Failed setup of slow-path MQ: " + "rc = 0x%x\n", rc); + goto out_destroy_fcp_cq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", + phba->sli4_hba.mbx_wq->queue_id, + phba->sli4_hba.mbx_cq->queue_id); + + /* Set up slow-path ELS Work Queue */ + if (!phba->sli4_hba.els_wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0536 Slow-path ELS WQ not allocated\n"); + goto out_destroy_mbx_wq; + } + rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, + phba->sli4_hba.els_cq, LPFC_ELS); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0537 Failed setup of slow-path ELS WQ: " + "rc = 0x%x\n", rc); + goto out_destroy_mbx_wq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", + phba->sli4_hba.els_wq->queue_id, + phba->sli4_hba.els_cq->queue_id); + + /* Set up fast-path FCP Work Queue */ + for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { + if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0534 Fast-path FCP WQ (%d) not " + "allocated\n", fcp_wqidx); + goto out_destroy_fcp_wq; + } + rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], + phba->sli4_hba.fcp_cq[fcp_cq_index], + LPFC_FCP); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0535 Failed setup of fast-path FCP " + "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); + goto out_destroy_fcp_wq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2591 FCP WQ setup: wq[%d]-id=%d, " + "parent cq[%d]-id=%d\n", + fcp_wqidx, + phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, + fcp_cq_index, + phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); + /* Round robin FCP Work Queue's Completion Queue assignment */ + fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); + } + + /* + * Create Receive Queue (RQ) + */ + if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0540 Receive Queue not allocated\n"); + goto out_destroy_fcp_wq; + } + rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, + phba->sli4_hba.rxq_cq, LPFC_USOL); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0541 Failed setup of Receive Queue: " + "rc = 0x%x\n", rc); + goto out_destroy_fcp_wq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " + "parent cq-id=%d\n", + phba->sli4_hba.hdr_rq->queue_id, + phba->sli4_hba.dat_rq->queue_id, + phba->sli4_hba.rxq_cq->queue_id); + return 0; + +out_destroy_fcp_wq: + for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) + lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); + lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); +out_destroy_mbx_wq: + lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); +out_destroy_fcp_cq: + for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) + lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); + lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq); +out_destroy_els_cq: + lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); +out_destroy_mbx_cq: + lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); +out_destroy_fp_eq: + for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) + lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); + lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); +out_error: + return rc; +} + +/** + * lpfc_sli4_queue_unset - Unset all the SLI4 queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset all the SLI4 queues with the FCoE HBA + * operation. + * + * Return codes + * 0 - sucessful + * ENOMEM - No availble memory + * EIO - The mailbox failed to complete successfully. + **/ +void +lpfc_sli4_queue_unset(struct lpfc_hba *phba) +{ + int fcp_qidx; + + /* Unset mailbox command work queue */ + lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); + /* Unset ELS work queue */ + lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); + /* Unset unsolicited receive queue */ + lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); + /* Unset FCP work queue */ + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) + lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); + /* Unset mailbox command complete queue */ + lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); + /* Unset ELS complete queue */ + lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); + /* Unset unsolicited receive complete queue */ + lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq); + /* Unset FCP response complete queue */ + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) + lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); + /* Unset fast-path event queue */ + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) + lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); + /* Unset slow-path event queue */ + lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); +} + +/** + * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate and set up a pool of completion queue + * events. The body of the completion queue event is a completion queue entry + * CQE. For now, this pool is used for the interrupt service routine to queue + * the following HBA completion queue events for the worker thread to process: + * - Mailbox asynchronous events + * - Receive queue completion unsolicited events + * Later, this can be used for all the slow-path events. + * + * Return codes + * 0 - sucessful + * -ENOMEM - No availble memory + **/ +static int +lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event; + int i; + + for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { + cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); + if (!cq_event) + goto out_pool_create_fail; + list_add_tail(&cq_event->list, + &phba->sli4_hba.sp_cqe_event_pool); + } + return 0; + +out_pool_create_fail: + lpfc_sli4_cq_event_pool_destroy(phba); + return -ENOMEM; +} + +/** + * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to free the pool of completion queue events at + * driver unload time. Note that, it is the responsibility of the driver + * cleanup routine to free all the outstanding completion-queue events + * allocated from this pool back into the pool before invoking this routine + * to destroy the pool. + **/ +static void +lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event, *next_cq_event; + + list_for_each_entry_safe(cq_event, next_cq_event, + &phba->sli4_hba.sp_cqe_event_pool, list) { + list_del(&cq_event->list); + kfree(cq_event); + } +} + +/** + * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool + * @phba: pointer to lpfc hba data structure. + * + * This routine is the lock free version of the API invoked to allocate a + * completion-queue event from the free pool. + * + * Return: Pointer to the newly allocated completion-queue event if successful + * NULL otherwise. + **/ +struct lpfc_cq_event * +__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event = NULL; + + list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, + struct lpfc_cq_event, list); + return cq_event; +} + +/** + * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool + * @phba: pointer to lpfc hba data structure. + * + * This routine is the lock version of the API invoked to allocate a + * completion-queue event from the free pool. + * + * Return: Pointer to the newly allocated completion-queue event if successful + * NULL otherwise. + **/ +struct lpfc_cq_event * +lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event; + unsigned long iflags; + + spin_lock_irqsave(&phba->hbalock, iflags); + cq_event = __lpfc_sli4_cq_event_alloc(phba); + spin_unlock_irqrestore(&phba->hbalock, iflags); + return cq_event; +} + +/** + * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool + * @phba: pointer to lpfc hba data structure. + * @cq_event: pointer to the completion queue event to be freed. + * + * This routine is the lock free version of the API invoked to release a + * completion-queue event back into the free pool. + **/ +void +__lpfc_sli4_cq_event_release(struct lpfc_hba *phba, + struct lpfc_cq_event *cq_event) +{ + list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); +} + +/** + * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool + * @phba: pointer to lpfc hba data structure. + * @cq_event: pointer to the completion queue event to be freed. + * + * This routine is the lock version of the API invoked to release a + * completion-queue event back into the free pool. + **/ +void +lpfc_sli4_cq_event_release(struct lpfc_hba *phba, + struct lpfc_cq_event *cq_event) +{ + unsigned long iflags; + spin_lock_irqsave(&phba->hbalock, iflags); + __lpfc_sli4_cq_event_release(phba, cq_event); + spin_unlock_irqrestore(&phba->hbalock, iflags); +} + +/** + * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool + * @phba: pointer to lpfc hba data structure. + * + * This routine is to free all the pending completion-queue events to the + * back into the free pool for device reset. + **/ +static void +lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) +{ + LIST_HEAD(cqelist); + struct lpfc_cq_event *cqe; + unsigned long iflags; + + /* Retrieve all the pending WCQEs from pending WCQE lists */ + spin_lock_irqsave(&phba->hbalock, iflags); + /* Pending FCP XRI abort events */ + list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, + &cqelist); + /* Pending ELS XRI abort events */ + list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, + &cqelist); + /* Pending asynnc events */ + list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, + &cqelist); + spin_unlock_irqrestore(&phba->hbalock, iflags); + + while (!list_empty(&cqelist)) { + list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); + lpfc_sli4_cq_event_release(phba, cqe); + } +} + +/** + * lpfc_pci_function_reset - Reset pci function. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to request a PCI function reset. It will destroys + * all resources assigned to the PCI function which originates this request. + * + * Return codes + * 0 - sucessful + * ENOMEM - No availble memory + * EIO - The mailbox failed to complete successfully. + **/ +int +lpfc_pci_function_reset(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mboxq; + uint32_t rc = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0494 Unable to allocate memory for issuing " + "SLI_FUNCTION_RESET mailbox command\n"); + return -ENOMEM; + } + + /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */ + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, + LPFC_SLI4_MBX_EMBED); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + shdr = (union lpfc_sli4_cfg_shdr *) + &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (rc != MBX_TIMEOUT) + mempool_free(mboxq, phba->mbox_mem_pool); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0495 SLI_FUNCTION_RESET mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + rc = -ENXIO; + } + return rc; +} + +/** + * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands + * @phba: pointer to lpfc hba data structure. + * @cnt: number of nop mailbox commands to send. + * + * This routine is invoked to send a number @cnt of NOP mailbox command and + * wait for each command to complete. + * + * Return: the number of NOP mailbox command completed. + **/ +static int +lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) +{ + LPFC_MBOXQ_t *mboxq; + int length, cmdsent; + uint32_t mbox_tmo; + uint32_t rc = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + if (cnt == 0) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "2518 Requested to send 0 NOP mailbox cmd\n"); + return cnt; + } + + mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2519 Unable to allocate memory for issuing " + "NOP mailbox command\n"); + return 0; + } + + /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ + length = (sizeof(struct lpfc_mbx_nop) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); + + mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + for (cmdsent = 0; cmdsent < cnt; cmdsent++) { + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + else + rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); + if (rc == MBX_TIMEOUT) + break; + /* Check return status */ + shdr = (union lpfc_sli4_cfg_shdr *) + &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, + &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "2520 NOP mailbox command failed " + "status x%x add_status x%x mbx " + "status x%x\n", shdr_status, + shdr_add_status, rc); + break; + } + } + + if (rc != MBX_TIMEOUT) + mempool_free(mboxq, phba->mbox_mem_pool); + + return cmdsent; +} + +/** + * lpfc_sli4_fcfi_unreg - Unregister fcfi to device + * @phba: pointer to lpfc hba data structure. + * @fcfi: fcf index. + * + * This routine is invoked to unregister a FCFI from device. + **/ +void +lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi) +{ + LPFC_MBOXQ_t *mbox; + uint32_t mbox_tmo; + int rc; + unsigned long flags; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + + if (!mbox) + return; + + lpfc_unreg_fcfi(mbox, fcfi); + + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + else { + mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); + } + if (rc != MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + if (rc != MBX_SUCCESS) + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2517 Unregister FCFI command failed " + "status %d, mbxStatus x%x\n", rc, + bf_get(lpfc_mqe_status, &mbox->u.mqe)); + else { + spin_lock_irqsave(&phba->hbalock, flags); + /* Mark the FCFI is no longer registered */ + phba->fcf.fcf_flag &= + ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED); + spin_unlock_irqrestore(&phba->hbalock, flags); + } +} + +/** + * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up the PCI device memory space for device + * with SLI-4 interface spec. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) +{ + struct pci_dev *pdev; + unsigned long bar0map_len, bar1map_len, bar2map_len; + int error = -ENODEV; + + /* Obtain PCI device reference */ + if (!phba->pcidev) + return error; + else + pdev = phba->pcidev; + + /* Set the device DMA mask size */ + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) + return error; + + /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the + * number of bytes required by each mapping. They are actually + * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device. + */ + phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0); + bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0); + + phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1); + bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1); + + phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2); + bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2); + + /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */ + phba->sli4_hba.conf_regs_memmap_p = + ioremap(phba->pci_bar0_map, bar0map_len); + if (!phba->sli4_hba.conf_regs_memmap_p) { + dev_printk(KERN_ERR, &pdev->dev, + "ioremap failed for SLI4 PCI config registers.\n"); + goto out; + } + + /* Map SLI4 HBA Control Register base to a kernel virtual address. */ + phba->sli4_hba.ctrl_regs_memmap_p = + ioremap(phba->pci_bar1_map, bar1map_len); + if (!phba->sli4_hba.ctrl_regs_memmap_p) { + dev_printk(KERN_ERR, &pdev->dev, + "ioremap failed for SLI4 HBA control registers.\n"); + goto out_iounmap_conf; + } + + /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */ + phba->sli4_hba.drbl_regs_memmap_p = + ioremap(phba->pci_bar2_map, bar2map_len); + if (!phba->sli4_hba.drbl_regs_memmap_p) { + dev_printk(KERN_ERR, &pdev->dev, + "ioremap failed for SLI4 HBA doorbell registers.\n"); + goto out_iounmap_ctrl; + } + + /* Set up BAR0 PCI config space register memory map */ + lpfc_sli4_bar0_register_memmap(phba); + + /* Set up BAR1 register memory map */ + lpfc_sli4_bar1_register_memmap(phba); + + /* Set up BAR2 register memory map */ + error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); + if (error) + goto out_iounmap_all; + + return 0; + +out_iounmap_all: + iounmap(phba->sli4_hba.drbl_regs_memmap_p); +out_iounmap_ctrl: + iounmap(phba->sli4_hba.ctrl_regs_memmap_p); +out_iounmap_conf: + iounmap(phba->sli4_hba.conf_regs_memmap_p); +out: + return error; +} + +/** + * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset the PCI device memory space for device + * with SLI-4 interface spec. + **/ +static void +lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) +{ + struct pci_dev *pdev; + + /* Obtain PCI device reference */ + if (!phba->pcidev) + return; + else + pdev = phba->pcidev; + + /* Free coherent DMA memory allocated */ + + /* Unmap I/O memory space */ + iounmap(phba->sli4_hba.drbl_regs_memmap_p); + iounmap(phba->sli4_hba.ctrl_regs_memmap_p); + iounmap(phba->sli4_hba.conf_regs_memmap_p); + + return; +} + +/** + * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to enable the MSI-X interrupt vectors to device + * with SLI-3 interface specs. The kernel function pci_enable_msix() is + * called to enable the MSI-X vectors. Note that pci_enable_msix(), once + * invoked, enables either all or nothing, depending on the current + * availability of PCI vector resources. The device driver is responsible + * for calling the individual request_irq() to register each MSI-X vector + * with a interrupt handler, which is done in this function. Note that + * later when device is unloading, the driver should always call free_irq() + * on all MSI-X vectors it has done request_irq() on before calling + * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device + * will be left with MSI-X enabled and leaks its vectors. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_sli_enable_msix(struct lpfc_hba *phba) +{ + int rc, i; + LPFC_MBOXQ_t *pmb; + + /* Set up MSI-X multi-message vectors */ + for (i = 0; i < LPFC_MSIX_VECTORS; i++) + phba->msix_entries[i].entry = i; + + /* Configure MSI-X capability structure */ + rc = pci_enable_msix(phba->pcidev, phba->msix_entries, + ARRAY_SIZE(phba->msix_entries)); + if (rc) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0420 PCI enable MSI-X failed (%d)\n", rc); + goto msi_fail_out; + } + for (i = 0; i < LPFC_MSIX_VECTORS; i++) + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0477 MSI-X entry[%d]: vector=x%x " + "message=%d\n", i, + phba->msix_entries[i].vector, + phba->msix_entries[i].entry); + /* + * Assign MSI-X vectors to interrupt handlers + */ + + /* vector-0 is associated to slow-path handler */ + rc = request_irq(phba->msix_entries[0].vector, + &lpfc_sli_sp_intr_handler, IRQF_SHARED, + LPFC_SP_DRIVER_HANDLER_NAME, phba); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0421 MSI-X slow-path request_irq failed " + "(%d)\n", rc); + goto msi_fail_out; + } + + /* vector-1 is associated to fast-path handler */ + rc = request_irq(phba->msix_entries[1].vector, + &lpfc_sli_fp_intr_handler, IRQF_SHARED, + LPFC_FP_DRIVER_HANDLER_NAME, phba); + + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0429 MSI-X fast-path request_irq failed " + "(%d)\n", rc); + goto irq_fail_out; + } + + /* + * Configure HBA MSI-X attention conditions to messages + */ + pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + + if (!pmb) { + rc = -ENOMEM; + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0474 Unable to allocate memory for issuing " + "MBOX_CONFIG_MSI command\n"); + goto mem_fail_out; + } + rc = lpfc_config_msi(phba, pmb); + if (rc) + goto mbx_fail_out; + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, + "0351 Config MSI mailbox command failed, " + "mbxCmd x%x, mbxStatus x%x\n", + pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); + goto mbx_fail_out; + } + + /* Free memory allocated for mailbox command */ + mempool_free(pmb, phba->mbox_mem_pool); + return rc; + +mbx_fail_out: + /* Free memory allocated for mailbox command */ + mempool_free(pmb, phba->mbox_mem_pool); + +mem_fail_out: + /* free the irq already requested */ + free_irq(phba->msix_entries[1].vector, phba); + +irq_fail_out: + /* free the irq already requested */ + free_irq(phba->msix_entries[0].vector, phba); + +msi_fail_out: + /* Unconfigure MSI-X capability structure */ + pci_disable_msix(phba->pcidev); + return rc; +} + +/** + * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to release the MSI-X vectors and then disable the + * MSI-X interrupt mode to device with SLI-3 interface spec. + **/ +static void +lpfc_sli_disable_msix(struct lpfc_hba *phba) +{ + int i; + + /* Free up MSI-X multi-message vectors */ + for (i = 0; i < LPFC_MSIX_VECTORS; i++) + free_irq(phba->msix_entries[i].vector, phba); + /* Disable MSI-X */ + pci_disable_msix(phba->pcidev); + + return; +} + +/** + * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to enable the MSI interrupt mode to device with + * SLI-3 interface spec. The kernel function pci_enable_msi() is called to + * enable the MSI vector. The device driver is responsible for calling the + * request_irq() to register MSI vector with a interrupt the handler, which + * is done in this function. + * + * Return codes + * 0 - sucessful + * other values - error + */ +static int +lpfc_sli_enable_msi(struct lpfc_hba *phba) +{ + int rc; + + rc = pci_enable_msi(phba->pcidev); + if (!rc) + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0462 PCI enable MSI mode success.\n"); + else { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0471 PCI enable MSI mode failed (%d)\n", rc); + return rc; + } + + rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, + IRQF_SHARED, LPFC_DRIVER_NAME, phba); + if (rc) { + pci_disable_msi(phba->pcidev); + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0478 MSI request_irq failed (%d)\n", rc); + } + return rc; +} + +/** + * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to disable the MSI interrupt mode to device with + * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has + * done request_irq() on before calling pci_disable_msi(). Failure to do so + * results in a BUG_ON() and a device will be left with MSI enabled and leaks + * its vector. + */ +static void +lpfc_sli_disable_msi(struct lpfc_hba *phba) +{ + free_irq(phba->pcidev->irq, phba); + pci_disable_msi(phba->pcidev); + return; +} + +/** + * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to enable device interrupt and associate driver's + * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface + * spec. Depends on the interrupt mode configured to the driver, the driver + * will try to fallback from the configured interrupt mode to an interrupt + * mode which is supported by the platform, kernel, and device in the order + * of: + * MSI-X -> MSI -> IRQ. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static uint32_t +lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) +{ + uint32_t intr_mode = LPFC_INTR_ERROR; + int retval; + + if (cfg_mode == 2) { + /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ + retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); + if (!retval) { + /* Now, try to enable MSI-X interrupt mode */ + retval = lpfc_sli_enable_msix(phba); + if (!retval) { + /* Indicate initialization to MSI-X mode */ + phba->intr_type = MSIX; + intr_mode = 2; + } + } + } + + /* Fallback to MSI if MSI-X initialization failed */ + if (cfg_mode >= 1 && phba->intr_type == NONE) { + retval = lpfc_sli_enable_msi(phba); + if (!retval) { + /* Indicate initialization to MSI mode */ + phba->intr_type = MSI; + intr_mode = 1; + } + } + + /* Fallback to INTx if both MSI-X/MSI initalization failed */ + if (phba->intr_type == NONE) { + retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, + IRQF_SHARED, LPFC_DRIVER_NAME, phba); + if (!retval) { + /* Indicate initialization to INTx mode */ + phba->intr_type = INTx; + intr_mode = 0; + } + } + return intr_mode; +} + +/** + * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to disable device interrupt and disassociate the + * driver's interrupt handler(s) from interrupt vector(s) to device with + * SLI-3 interface spec. Depending on the interrupt mode, the driver will + * release the interrupt vector(s) for the message signaled interrupt. + **/ +static void +lpfc_sli_disable_intr(struct lpfc_hba *phba) +{ + /* Disable the currently initialized interrupt mode */ + if (phba->intr_type == MSIX) + lpfc_sli_disable_msix(phba); + else if (phba->intr_type == MSI) + lpfc_sli_disable_msi(phba); + else if (phba->intr_type == INTx) + free_irq(phba->pcidev->irq, phba); + + /* Reset interrupt management states */ + phba->intr_type = NONE; + phba->sli.slistat.sli_intr = 0; + + return; +} + +/** + * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to enable the MSI-X interrupt vectors to device + * with SLI-4 interface spec. The kernel function pci_enable_msix() is called + * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, + * enables either all or nothing, depending on the current availability of + * PCI vector resources. The device driver is responsible for calling the + * individual request_irq() to register each MSI-X vector with a interrupt + * handler, which is done in this function. Note that later when device is + * unloading, the driver should always call free_irq() on all MSI-X vectors + * it has done request_irq() on before calling pci_disable_msix(). Failure + * to do so results in a BUG_ON() and a device will be left with MSI-X + * enabled and leaks its vectors. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_sli4_enable_msix(struct lpfc_hba *phba) +{ + int rc, index; + + /* Set up MSI-X multi-message vectors */ + for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) + phba->sli4_hba.msix_entries[index].entry = index; + + /* Configure MSI-X capability structure */ + rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, + phba->sli4_hba.cfg_eqn); + if (rc) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0484 PCI enable MSI-X failed (%d)\n", rc); + goto msi_fail_out; + } + /* Log MSI-X vector assignment */ + for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0489 MSI-X entry[%d]: vector=x%x " + "message=%d\n", index, + phba->sli4_hba.msix_entries[index].vector, + phba->sli4_hba.msix_entries[index].entry); + /* + * Assign MSI-X vectors to interrupt handlers + */ + + /* The first vector must associated to slow-path handler for MQ */ + rc = request_irq(phba->sli4_hba.msix_entries[0].vector, + &lpfc_sli4_sp_intr_handler, IRQF_SHARED, + LPFC_SP_DRIVER_HANDLER_NAME, phba); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0485 MSI-X slow-path request_irq failed " + "(%d)\n", rc); + goto msi_fail_out; + } + + /* The rest of the vector(s) are associated to fast-path handler(s) */ + for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) { + phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; + phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; + rc = request_irq(phba->sli4_hba.msix_entries[index].vector, + &lpfc_sli4_fp_intr_handler, IRQF_SHARED, + LPFC_FP_DRIVER_HANDLER_NAME, + &phba->sli4_hba.fcp_eq_hdl[index - 1]); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0486 MSI-X fast-path (%d) " + "request_irq failed (%d)\n", index, rc); + goto cfg_fail_out; + } + } + + return rc; + +cfg_fail_out: + /* free the irq already requested */ + for (--index; index >= 1; index--) + free_irq(phba->sli4_hba.msix_entries[index - 1].vector, + &phba->sli4_hba.fcp_eq_hdl[index - 1]); + + /* free the irq already requested */ + free_irq(phba->sli4_hba.msix_entries[0].vector, phba); + +msi_fail_out: + /* Unconfigure MSI-X capability structure */ + pci_disable_msix(phba->pcidev); + return rc; +} + +/** + * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to release the MSI-X vectors and then disable the + * MSI-X interrupt mode to device with SLI-4 interface spec. + **/ +static void +lpfc_sli4_disable_msix(struct lpfc_hba *phba) +{ + int index; + + /* Free up MSI-X multi-message vectors */ + free_irq(phba->sli4_hba.msix_entries[0].vector, phba); + + for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) + free_irq(phba->sli4_hba.msix_entries[index].vector, + &phba->sli4_hba.fcp_eq_hdl[index - 1]); + /* Disable MSI-X */ + pci_disable_msix(phba->pcidev); + + return; +} + +/** + * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to enable the MSI interrupt mode to device with + * SLI-4 interface spec. The kernel function pci_enable_msi() is called + * to enable the MSI vector. The device driver is responsible for calling + * the request_irq() to register MSI vector with a interrupt the handler, + * which is done in this function. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_sli4_enable_msi(struct lpfc_hba *phba) +{ + int rc, index; + + rc = pci_enable_msi(phba->pcidev); + if (!rc) + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0487 PCI enable MSI mode success.\n"); + else { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0488 PCI enable MSI mode failed (%d)\n", rc); + return rc; + } + + rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, + IRQF_SHARED, LPFC_DRIVER_NAME, phba); + if (rc) { + pci_disable_msi(phba->pcidev); + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0490 MSI request_irq failed (%d)\n", rc); + } + + for (index = 0; index < phba->cfg_fcp_eq_count; index++) { + phba->sli4_hba.fcp_eq_hdl[index].idx = index; + phba->sli4_hba.fcp_eq_hdl[index].phba = phba; + } + + return rc; +} + +/** + * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to disable the MSI interrupt mode to device with + * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has + * done request_irq() on before calling pci_disable_msi(). Failure to do so + * results in a BUG_ON() and a device will be left with MSI enabled and leaks + * its vector. + **/ +static void +lpfc_sli4_disable_msi(struct lpfc_hba *phba) +{ + free_irq(phba->pcidev->irq, phba); + pci_disable_msi(phba->pcidev); + return; +} + +/** + * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to enable device interrupt and associate driver's + * interrupt handler(s) to interrupt vector(s) to device with SLI-4 + * interface spec. Depends on the interrupt mode configured to the driver, + * the driver will try to fallback from the configured interrupt mode to an + * interrupt mode which is supported by the platform, kernel, and device in + * the order of: + * MSI-X -> MSI -> IRQ. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static uint32_t +lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) +{ + uint32_t intr_mode = LPFC_INTR_ERROR; + int retval, index; + + if (cfg_mode == 2) { + /* Preparation before conf_msi mbox cmd */ + retval = 0; + if (!retval) { + /* Now, try to enable MSI-X interrupt mode */ + retval = lpfc_sli4_enable_msix(phba); + if (!retval) { + /* Indicate initialization to MSI-X mode */ + phba->intr_type = MSIX; + intr_mode = 2; + } + } + } + + /* Fallback to MSI if MSI-X initialization failed */ + if (cfg_mode >= 1 && phba->intr_type == NONE) { + retval = lpfc_sli4_enable_msi(phba); + if (!retval) { + /* Indicate initialization to MSI mode */ + phba->intr_type = MSI; + intr_mode = 1; + } + } + + /* Fallback to INTx if both MSI-X/MSI initalization failed */ + if (phba->intr_type == NONE) { + retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, + IRQF_SHARED, LPFC_DRIVER_NAME, phba); + if (!retval) { + /* Indicate initialization to INTx mode */ + phba->intr_type = INTx; + intr_mode = 0; + for (index = 0; index < phba->cfg_fcp_eq_count; + index++) { + phba->sli4_hba.fcp_eq_hdl[index].idx = index; + phba->sli4_hba.fcp_eq_hdl[index].phba = phba; + } + } + } + return intr_mode; +} + +/** + * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to disable device interrupt and disassociate + * the driver's interrupt handler(s) from interrupt vector(s) to device + * with SLI-4 interface spec. Depending on the interrupt mode, the driver + * will release the interrupt vector(s) for the message signaled interrupt. + **/ +static void +lpfc_sli4_disable_intr(struct lpfc_hba *phba) +{ + /* Disable the currently initialized interrupt mode */ + if (phba->intr_type == MSIX) + lpfc_sli4_disable_msix(phba); + else if (phba->intr_type == MSI) + lpfc_sli4_disable_msi(phba); + else if (phba->intr_type == INTx) + free_irq(phba->pcidev->irq, phba); + + /* Reset interrupt management states */ + phba->intr_type = NONE; + phba->sli.slistat.sli_intr = 0; + + return; +} + +/** + * lpfc_unset_hba - Unset SLI3 hba device initialization + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset the HBA device initialization steps to + * a device with SLI-3 interface spec. + **/ +static void +lpfc_unset_hba(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport = phba->pport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + spin_lock_irq(shost->host_lock); + vport->load_flag |= FC_UNLOADING; + spin_unlock_irq(shost->host_lock); + + lpfc_stop_hba_timers(phba); + + phba->pport->work_port_events = 0; + + lpfc_sli_hba_down(phba); + + lpfc_sli_brdrestart(phba); + + lpfc_sli_disable_intr(phba); + + return; +} + +/** + * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset the HBA device initialization steps to + * a device with SLI-4 interface spec. + **/ +static void +lpfc_sli4_unset_hba(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport = phba->pport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + spin_lock_irq(shost->host_lock); + vport->load_flag |= FC_UNLOADING; + spin_unlock_irq(shost->host_lock); + + phba->pport->work_port_events = 0; + + lpfc_sli4_hba_down(phba); + + lpfc_sli4_disable_intr(phba); + + return; +} + +/** + * lpfc_sli4_hba_unset - Unset the fcoe hba + * @phba: Pointer to HBA context object. + * + * This function is called in the SLI4 code path to reset the HBA's FCoE + * function. The caller is not required to hold any lock. This routine + * issues PCI function reset mailbox command to reset the FCoE function. + * At the end of the function, it calls lpfc_hba_down_post function to + * free any pending commands. + **/ +static void +lpfc_sli4_hba_unset(struct lpfc_hba *phba) +{ + int wait_cnt = 0; + LPFC_MBOXQ_t *mboxq; + + lpfc_stop_hba_timers(phba); + phba->sli4_hba.intr_enable = 0; + + /* + * Gracefully wait out the potential current outstanding asynchronous + * mailbox command. + */ + + /* First, block any pending async mailbox command from posted */ + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; + spin_unlock_irq(&phba->hbalock); + /* Now, trying to wait it out if we can */ + while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { + msleep(10); + if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) + break; + } + /* Forcefully release the outstanding mailbox command if timed out */ + if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { + spin_lock_irq(&phba->hbalock); + mboxq = phba->sli.mbox_active; + mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; + __lpfc_mbox_cmpl_put(phba, mboxq); + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + phba->sli.mbox_active = NULL; + spin_unlock_irq(&phba->hbalock); + } + + /* Tear down the queues in the HBA */ + lpfc_sli4_queue_unset(phba); + + /* Disable PCI subsystem interrupt */ + lpfc_sli4_disable_intr(phba); + + /* Stop kthread signal shall trigger work_done one more time */ + kthread_stop(phba->worker_thread); + + /* Stop the SLI4 device port */ + phba->pport->work_port_events = 0; +} + +/** + * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. + * @pdev: pointer to PCI device + * @pid: pointer to PCI device identifier + * + * This routine is to be called to attach a device with SLI-3 interface spec + * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is + * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific + * information of the device and driver to see if the driver state that it can + * support this kind of device. If the match is successful, the driver core + * invokes this routine. If this routine determines it can claim the HBA, it + * does all the initialization that it needs to do to handle the HBA properly. + * + * Return code + * 0 - driver can claim the device + * negative value - driver can not claim the device + **/ +static int __devinit +lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) +{ + struct lpfc_hba *phba; + struct lpfc_vport *vport = NULL; + int error; + uint32_t cfg_mode, intr_mode; + + /* Allocate memory for HBA structure */ + phba = lpfc_hba_alloc(pdev); + if (!phba) + return -ENOMEM; + + /* Perform generic PCI device enabling operation */ + error = lpfc_enable_pci_dev(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1401 Failed to enable pci device.\n"); + goto out_free_phba; + } + + /* Set up SLI API function jump table for PCI-device group-0 HBAs */ + error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); + if (error) + goto out_disable_pci_dev; + + /* Set up SLI-3 specific device PCI memory space */ + error = lpfc_sli_pci_mem_setup(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1402 Failed to set up pci memory space.\n"); + goto out_disable_pci_dev; + } + + /* Set up phase-1 common device driver resources */ + error = lpfc_setup_driver_resource_phase1(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1403 Failed to set up driver resource.\n"); + goto out_unset_pci_mem_s3; + } + + /* Set up SLI-3 specific device driver resources */ + error = lpfc_sli_driver_resource_setup(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1404 Failed to set up driver resource.\n"); + goto out_unset_pci_mem_s3; + } + + /* Initialize and populate the iocb list per host */ + error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1405 Failed to initialize iocb list.\n"); + goto out_unset_driver_resource_s3; + } + + /* Set up common device driver resources */ + error = lpfc_setup_driver_resource_phase2(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1406 Failed to set up driver resource.\n"); + goto out_free_iocb_list; + } + + /* Create SCSI host to the physical port */ + error = lpfc_create_shost(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1407 Failed to create scsi host.\n"); + goto out_unset_driver_resource; + } + + /* Configure sysfs attributes */ + vport = phba->pport; + error = lpfc_alloc_sysfs_attr(vport); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1476 Failed to allocate sysfs attr\n"); + goto out_destroy_shost; + } + + /* Now, trying to enable interrupt and bring up the device */ + cfg_mode = phba->cfg_use_msi; + while (true) { + /* Put device to a known state before enabling interrupt */ + lpfc_stop_port(phba); + /* Configure and enable interrupt */ + intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); + if (intr_mode == LPFC_INTR_ERROR) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0431 Failed to enable interrupt.\n"); + error = -ENODEV; + goto out_free_sysfs_attr; + } + /* SLI-3 HBA setup */ + if (lpfc_sli_hba_setup(phba)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1477 Failed to set up hba\n"); + error = -ENODEV; + goto out_remove_device; + } + + /* Wait 50ms for the interrupts of previous mailbox commands */ + msleep(50); + /* Check active interrupts on message signaled interrupts */ + if (intr_mode == 0 || + phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { + /* Log the current active interrupt mode */ + phba->intr_mode = intr_mode; + lpfc_log_intr_mode(phba, intr_mode); + break; + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0447 Configure interrupt mode (%d) " + "failed active interrupt test.\n", + intr_mode); + /* Disable the current interrupt mode */ + lpfc_sli_disable_intr(phba); + /* Try next level of interrupt mode */ + cfg_mode = --intr_mode; + } + } + + /* Perform post initialization setup */ + lpfc_post_init_setup(phba); + + /* Check if there are static vports to be created. */ + lpfc_create_static_vport(phba); + + return 0; + +out_remove_device: + lpfc_unset_hba(phba); +out_free_sysfs_attr: + lpfc_free_sysfs_attr(vport); +out_destroy_shost: + lpfc_destroy_shost(phba); +out_unset_driver_resource: + lpfc_unset_driver_resource_phase2(phba); +out_free_iocb_list: + lpfc_free_iocb_list(phba); +out_unset_driver_resource_s3: + lpfc_sli_driver_resource_unset(phba); +out_unset_pci_mem_s3: + lpfc_sli_pci_mem_unset(phba); +out_disable_pci_dev: + lpfc_disable_pci_dev(phba); +out_free_phba: + lpfc_hba_free(phba); + return error; +} + +/** + * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. + * @pdev: pointer to PCI device + * + * This routine is to be called to disattach a device with SLI-3 interface + * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is + * removed from PCI bus, it performs all the necessary cleanup for the HBA + * device to be removed from the PCI subsystem properly. + **/ +static void __devexit +lpfc_pci_remove_one_s3(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_vport **vports; + struct lpfc_hba *phba = vport->phba; + int i; + int bars = pci_select_bars(pdev, IORESOURCE_MEM); + + spin_lock_irq(&phba->hbalock); + vport->load_flag |= FC_UNLOADING; + spin_unlock_irq(&phba->hbalock); + + lpfc_free_sysfs_attr(vport); + + /* Release all the vports against this physical port */ + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) + for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) + fc_vport_terminate(vports[i]->fc_vport); + lpfc_destroy_vport_work_array(phba, vports); + + /* Remove FC host and then SCSI host with the physical port */ + fc_remove_host(shost); + scsi_remove_host(shost); + lpfc_cleanup(vport); + + /* + * Bring down the SLI Layer. This step disable all interrupts, + * clears the rings, discards all mailbox commands, and resets + * the HBA. + */ + + /* HBA interrupt will be diabled after this call */ + lpfc_sli_hba_down(phba); + /* Stop kthread signal shall trigger work_done one more time */ + kthread_stop(phba->worker_thread); + /* Final cleanup of txcmplq and reset the HBA */ + lpfc_sli_brdrestart(phba); + + lpfc_stop_hba_timers(phba); + spin_lock_irq(&phba->hbalock); + list_del_init(&vport->listentry); + spin_unlock_irq(&phba->hbalock); + + lpfc_debugfs_terminate(vport); + + /* Disable interrupt */ + lpfc_sli_disable_intr(phba); + + pci_set_drvdata(pdev, NULL); + scsi_host_put(shost); + + /* + * Call scsi_free before mem_free since scsi bufs are released to their + * corresponding pools here. + */ + lpfc_scsi_free(phba); + lpfc_mem_free_all(phba); + + dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), + phba->hbqslimp.virt, phba->hbqslimp.phys); + + /* Free resources associated with SLI2 interface */ + dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, + phba->slim2p.virt, phba->slim2p.phys); + + /* unmap adapter SLIM and Control Registers */ + iounmap(phba->ctrl_regs_memmap_p); + iounmap(phba->slim_memmap_p); + + lpfc_hba_free(phba); + + pci_release_selected_regions(pdev, bars); + pci_disable_device(pdev); +} + +/** + * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt + * @pdev: pointer to PCI device + * @msg: power management message + * + * This routine is to be called from the kernel's PCI subsystem to support + * system Power Management (PM) to device with SLI-3 interface spec. When + * PM invokes this method, it quiesces the device by stopping the driver's + * worker thread for the device, turning off device's interrupt and DMA, + * and bring the device offline. Note that as the driver implements the + * minimum PM requirements to a power-aware driver's PM support for the + * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) + * to the suspend() method call will be treated as SUSPEND and the driver will + * fully reinitialize its device during resume() method call, the driver will + * set device to PCI_D3hot state in PCI config space instead of setting it + * according to the @msg provided by the PM. + * + * Return code + * 0 - driver suspended the device + * Error otherwise + **/ +static int +lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0473 PCI device Power Management suspend.\n"); + + /* Bring down the device */ + lpfc_offline_prep(phba); + lpfc_offline(phba); + kthread_stop(phba->worker_thread); + + /* Disable interrupt from device */ + lpfc_sli_disable_intr(phba); + + /* Save device state to PCI config space */ + pci_save_state(pdev); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +/** + * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt + * @pdev: pointer to PCI device + * + * This routine is to be called from the kernel's PCI subsystem to support + * system Power Management (PM) to device with SLI-3 interface spec. When PM + * invokes this method, it restores the device's PCI config space state and + * fully reinitializes the device and brings it online. Note that as the + * driver implements the minimum PM requirements to a power-aware driver's + * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, + * FREEZE) to the suspend() method call will be treated as SUSPEND and the + * driver will fully reinitialize its device during resume() method call, + * the device will be set to PCI_D0 directly in PCI config space before + * restoring the state. + * + * Return code + * 0 - driver suspended the device + * Error otherwise + **/ +static int +lpfc_pci_resume_one_s3(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + uint32_t intr_mode; + int error; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0452 PCI device Power Management resume.\n"); + + /* Restore device state from PCI config space */ + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + if (pdev->is_busmaster) + pci_set_master(pdev); + + /* Startup the kernel thread for this host adapter. */ + phba->worker_thread = kthread_run(lpfc_do_work, phba, + "lpfc_worker_%d", phba->brd_no); + if (IS_ERR(phba->worker_thread)) { + error = PTR_ERR(phba->worker_thread); + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0434 PM resume failed to start worker " + "thread: error=x%x.\n", error); + return error; + } + + /* Configure and enable interrupt */ + intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); + if (intr_mode == LPFC_INTR_ERROR) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0430 PM resume Failed to enable interrupt\n"); + return -EIO; + } else + phba->intr_mode = intr_mode; + + /* Restart HBA and bring it online */ + lpfc_sli_brdrestart(phba); + lpfc_online(phba); + + /* Log the current active interrupt mode */ + lpfc_log_intr_mode(phba, phba->intr_mode); + + return 0; +} + +/** + * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error + * @pdev: pointer to PCI device. + * @state: the current PCI connection state. + * + * This routine is called from the PCI subsystem for I/O error handling to + * device with SLI-3 interface spec. This function is called by the PCI + * subsystem after a PCI bus error affecting this device has been detected. + * When this function is invoked, it will need to stop all the I/Os and + * interrupt(s) to the device. Once that is done, it will return + * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery + * as desired. + * + * Return codes + * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery + * PCI_ERS_RESULT_DISCONNECT - device could not be recovered + **/ +static pci_ers_result_t +lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; + + if (state == pci_channel_io_perm_failure) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0472 PCI channel I/O permanent failure\n"); + /* Block all SCSI devices' I/Os on the host */ + lpfc_scsi_dev_block(phba); + /* Clean up all driver's outstanding SCSI I/Os */ + lpfc_sli_flush_fcp_rings(phba); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_disable_device(pdev); + /* + * There may be I/Os dropped by the firmware. + * Error iocb (I/O) on txcmplq and let the SCSI layer + * retry it after re-establishing link. + */ + pring = &psli->ring[psli->fcp_ring]; + lpfc_sli_abort_iocb_ring(phba, pring); + + /* Disable interrupt */ + lpfc_sli_disable_intr(phba); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. + * @pdev: pointer to PCI device. + * + * This routine is called from the PCI subsystem for error handling to + * device with SLI-3 interface spec. This is called after PCI bus has been + * reset to restart the PCI card from scratch, as if from a cold-boot. + * During the PCI subsystem error recovery, after driver returns + * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error + * recovery and then call this routine before calling the .resume method + * to recover the device. This function will initialize the HBA device, + * enable the interrupt, but it will just put the HBA to offline state + * without passing any I/O traffic. + * + * Return codes + * PCI_ERS_RESULT_RECOVERED - the device has been recovered + * PCI_ERS_RESULT_DISCONNECT - device could not be recovered + */ +static pci_ers_result_t +lpfc_io_slot_reset_s3(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + struct lpfc_sli *psli = &phba->sli; + uint32_t intr_mode; + + dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); + if (pci_enable_device_mem(pdev)) { + printk(KERN_ERR "lpfc: Cannot re-enable " + "PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_restore_state(pdev); + if (pdev->is_busmaster) + pci_set_master(pdev); + + spin_lock_irq(&phba->hbalock); + psli->sli_flag &= ~LPFC_SLI_ACTIVE; + spin_unlock_irq(&phba->hbalock); + + /* Configure and enable interrupt */ + intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); + if (intr_mode == LPFC_INTR_ERROR) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0427 Cannot re-enable interrupt after " + "slot reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } else + phba->intr_mode = intr_mode; + + /* Take device offline; this will perform cleanup */ + lpfc_offline(phba); + lpfc_sli_brdrestart(phba); + + /* Log the current active interrupt mode */ + lpfc_log_intr_mode(phba, phba->intr_mode); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. + * @pdev: pointer to PCI device + * + * This routine is called from the PCI subsystem for error handling to device + * with SLI-3 interface spec. It is called when kernel error recovery tells + * the lpfc driver that it is ok to resume normal PCI operation after PCI bus + * error recovery. After this call, traffic can start to flow from this device + * again. + */ +static void +lpfc_io_resume_s3(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - lpfc_sli_disable_intr(phba); + lpfc_online(phba); +} - return; +/** + * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve + * @phba: pointer to lpfc hba data structure. + * + * returns the number of ELS/CT IOCBs to reserve + **/ +int +lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) +{ + int max_xri = phba->sli4_hba.max_cfg_param.max_xri; + + if (max_xri <= 100) + return 4; + else if (max_xri <= 256) + return 8; + else if (max_xri <= 512) + return 16; + else if (max_xri <= 1024) + return 32; + else + return 48; } /** - * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. + * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys * @pdev: pointer to PCI device * @pid: pointer to PCI device identifier * - * This routine is to be called to attach a device with SLI-3 interface spec - * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is + * This routine is called from the kernel's PCI subsystem to device with + * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific - * information of the device and driver to see if the driver state that it can - * support this kind of device. If the match is successful, the driver core - * invokes this routine. If this routine determines it can claim the HBA, it - * does all the initialization that it needs to do to handle the HBA properly. + * information of the device and driver to see if the driver state that it + * can support this kind of device. If the match is successful, the driver + * core invokes this routine. If this routine determines it can claim the HBA, + * it does all the initialization that it needs to do to handle the HBA + * properly. * * Return code * 0 - driver can claim the device * negative value - driver can not claim the device **/ static int __devinit -lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) +lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) { struct lpfc_hba *phba; struct lpfc_vport *vport = NULL; int error; uint32_t cfg_mode, intr_mode; + int mcnt; /* Allocate memory for HBA structure */ phba = lpfc_hba_alloc(pdev); @@ -3660,20 +7213,20 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) error = lpfc_enable_pci_dev(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1401 Failed to enable pci device.\n"); + "1409 Failed to enable pci device.\n"); goto out_free_phba; } - /* Set up SLI API function jump table for PCI-device group-0 HBAs */ - error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); + /* Set up SLI API function jump table for PCI-device group-1 HBAs */ + error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); if (error) goto out_disable_pci_dev; - /* Set up SLI-3 specific device PCI memory space */ - error = lpfc_sli_pci_mem_setup(phba); + /* Set up SLI-4 specific device PCI memory space */ + error = lpfc_sli4_pci_mem_setup(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1402 Failed to set up pci memory space.\n"); + "1410 Failed to set up pci memory space.\n"); goto out_disable_pci_dev; } @@ -3681,31 +7234,32 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) error = lpfc_setup_driver_resource_phase1(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1403 Failed to set up driver resource.\n"); - goto out_unset_pci_mem_s3; + "1411 Failed to set up driver resource.\n"); + goto out_unset_pci_mem_s4; } - /* Set up SLI-3 specific device driver resources */ - error = lpfc_sli_driver_resource_setup(phba); + /* Set up SLI-4 Specific device driver resources */ + error = lpfc_sli4_driver_resource_setup(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1404 Failed to set up driver resource.\n"); - goto out_unset_pci_mem_s3; + "1412 Failed to set up driver resource.\n"); + goto out_unset_pci_mem_s4; } /* Initialize and populate the iocb list per host */ - error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); + error = lpfc_init_iocb_list(phba, + phba->sli4_hba.max_cfg_param.max_xri); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1405 Failed to initialize iocb list.\n"); - goto out_unset_driver_resource_s3; + "1413 Failed to initialize iocb list.\n"); + goto out_unset_driver_resource_s4; } /* Set up common device driver resources */ error = lpfc_setup_driver_resource_phase2(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1406 Failed to set up driver resource.\n"); + "1414 Failed to set up driver resource.\n"); goto out_free_iocb_list; } @@ -3713,7 +7267,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) error = lpfc_create_shost(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1407 Failed to create scsi host.\n"); + "1415 Failed to create scsi host.\n"); goto out_unset_driver_resource; } @@ -3722,7 +7276,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) error = lpfc_alloc_sysfs_attr(vport); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1476 Failed to allocate sysfs attr\n"); + "1416 Failed to allocate sysfs attr\n"); goto out_destroy_shost; } @@ -3732,52 +7286,51 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) /* Put device to a known state before enabling interrupt */ lpfc_stop_port(phba); /* Configure and enable interrupt */ - intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); + intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0431 Failed to enable interrupt.\n"); + "0426 Failed to enable interrupt.\n"); error = -ENODEV; goto out_free_sysfs_attr; } - /* SLI-3 HBA setup */ - if (lpfc_sli_hba_setup(phba)) { + /* Set up SLI-4 HBA */ + if (lpfc_sli4_hba_setup(phba)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1477 Failed to set up hba\n"); + "1421 Failed to set up hba\n"); error = -ENODEV; - goto out_remove_device; + goto out_disable_intr; } - /* Wait 50ms for the interrupts of previous mailbox commands */ - msleep(50); - /* Check active interrupts on message signaled interrupts */ + /* Send NOP mbx cmds for non-INTx mode active interrupt test */ + if (intr_mode != 0) + mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, + LPFC_ACT_INTR_CNT); + + /* Check active interrupts received only for MSI/MSI-X */ if (intr_mode == 0 || - phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { + phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { /* Log the current active interrupt mode */ phba->intr_mode = intr_mode; lpfc_log_intr_mode(phba, intr_mode); break; - } else { - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0447 Configure interrupt mode (%d) " - "failed active interrupt test.\n", - intr_mode); - /* Disable the current interrupt mode */ - lpfc_sli_disable_intr(phba); - /* Try next level of interrupt mode */ - cfg_mode = --intr_mode; } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0451 Configure interrupt mode (%d) " + "failed active interrupt test.\n", + intr_mode); + /* Unset the preivous SLI-4 HBA setup */ + lpfc_sli4_unset_hba(phba); + /* Try next level of interrupt mode */ + cfg_mode = --intr_mode; } /* Perform post initialization setup */ lpfc_post_init_setup(phba); - /* Check if there are static vports to be created. */ - lpfc_create_static_vport(phba); - return 0; -out_remove_device: - lpfc_unset_hba(phba); +out_disable_intr: + lpfc_sli4_disable_intr(phba); out_free_sysfs_attr: lpfc_free_sysfs_attr(vport); out_destroy_shost: @@ -3786,10 +7339,10 @@ out_unset_driver_resource: lpfc_unset_driver_resource_phase2(phba); out_free_iocb_list: lpfc_free_iocb_list(phba); -out_unset_driver_resource_s3: - lpfc_sli_driver_resource_unset(phba); -out_unset_pci_mem_s3: - lpfc_sli_pci_mem_unset(phba); +out_unset_driver_resource_s4: + lpfc_sli4_driver_resource_unset(phba); +out_unset_pci_mem_s4: + lpfc_sli4_pci_mem_unset(phba); out_disable_pci_dev: lpfc_disable_pci_dev(phba); out_free_phba: @@ -3798,28 +7351,29 @@ out_free_phba: } /** - * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. + * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem * @pdev: pointer to PCI device * - * This routine is to be called to disattach a device with SLI-3 interface - * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is + * This routine is called from the kernel's PCI subsystem to device with + * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is * removed from PCI bus, it performs all the necessary cleanup for the HBA * device to be removed from the PCI subsystem properly. **/ static void __devexit -lpfc_pci_remove_one_s3(struct pci_dev *pdev) +lpfc_pci_remove_one_s4(struct pci_dev *pdev) { - struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_vport **vports; - struct lpfc_hba *phba = vport->phba; + struct lpfc_hba *phba = vport->phba; int i; - int bars = pci_select_bars(pdev, IORESOURCE_MEM); + /* Mark the device unloading flag */ spin_lock_irq(&phba->hbalock); vport->load_flag |= FC_UNLOADING; spin_unlock_irq(&phba->hbalock); + /* Free the HBA sysfs attributes */ lpfc_free_sysfs_attr(vport); /* Release all the vports against this physical port */ @@ -3832,73 +7386,56 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev) /* Remove FC host and then SCSI host with the physical port */ fc_remove_host(shost); scsi_remove_host(shost); + + /* Perform cleanup on the physical port */ lpfc_cleanup(vport); /* - * Bring down the SLI Layer. This step disable all interrupts, + * Bring down the SLI Layer. This step disables all interrupts, * clears the rings, discards all mailbox commands, and resets - * the HBA. + * the HBA FCoE function. */ + lpfc_debugfs_terminate(vport); + lpfc_sli4_hba_unset(phba); - /* HBA interrupt will be diabled after this call */ - lpfc_sli_hba_down(phba); - /* Stop kthread signal shall trigger work_done one more time */ - kthread_stop(phba->worker_thread); - /* Final cleanup of txcmplq and reset the HBA */ - lpfc_sli_brdrestart(phba); - - lpfc_stop_hba_timers(phba); spin_lock_irq(&phba->hbalock); list_del_init(&vport->listentry); spin_unlock_irq(&phba->hbalock); - lpfc_debugfs_terminate(vport); - - /* Disable interrupt */ - lpfc_sli_disable_intr(phba); - - pci_set_drvdata(pdev, NULL); - scsi_host_put(shost); - - /* - * Call scsi_free before mem_free since scsi bufs are released to their - * corresponding pools here. + /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi + * buffers are released to their corresponding pools here. */ lpfc_scsi_free(phba); - lpfc_mem_free_all(phba); - - dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), - phba->hbqslimp.virt, phba->hbqslimp.phys); + lpfc_sli4_driver_resource_unset(phba); - /* Free resources associated with SLI2 interface */ - dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, - phba->slim2p.virt, phba->slim2p.phys); + /* Unmap adapter Control and Doorbell registers */ + lpfc_sli4_pci_mem_unset(phba); - /* unmap adapter SLIM and Control Registers */ - iounmap(phba->ctrl_regs_memmap_p); - iounmap(phba->slim_memmap_p); + /* Release PCI resources and disable device's PCI function */ + scsi_host_put(shost); + lpfc_disable_pci_dev(phba); + /* Finally, free the driver's device data structure */ lpfc_hba_free(phba); - pci_release_selected_regions(pdev, bars); - pci_disable_device(pdev); + return; } /** - * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt + * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt * @pdev: pointer to PCI device * @msg: power management message * - * This routine is to be called from the kernel's PCI subsystem to support - * system Power Management (PM) to device with SLI-3 interface spec. When - * PM invokes this method, it quiesces the device by stopping the driver's - * worker thread for the device, turning off device's interrupt and DMA, - * and bring the device offline. Note that as the driver implements the - * minimum PM requirements to a power-aware driver's PM support for the - * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) - * to the suspend() method call will be treated as SUSPEND and the driver will - * fully reinitialize its device during resume() method call, the driver will - * set device to PCI_D3hot state in PCI config space instead of setting it + * This routine is called from the kernel's PCI subsystem to support system + * Power Management (PM) to device with SLI-4 interface spec. When PM invokes + * this method, it quiesces the device by stopping the driver's worker + * thread for the device, turning off device's interrupt and DMA, and bring + * the device offline. Note that as the driver implements the minimum PM + * requirements to a power-aware driver's PM support for suspend/resume -- all + * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() + * method call will be treated as SUSPEND and the driver will fully + * reinitialize its device during resume() method call, the driver will set + * device to PCI_D3hot state in PCI config space instead of setting it * according to the @msg provided by the PM. * * Return code @@ -3906,13 +7443,13 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev) * Error otherwise **/ static int -lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) +lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0473 PCI device Power Management suspend.\n"); + "0298 PCI device Power Management suspend.\n"); /* Bring down the device */ lpfc_offline_prep(phba); @@ -3920,7 +7457,7 @@ lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) kthread_stop(phba->worker_thread); /* Disable interrupt from device */ - lpfc_sli_disable_intr(phba); + lpfc_sli4_disable_intr(phba); /* Save device state to PCI config space */ pci_save_state(pdev); @@ -3930,26 +7467,26 @@ lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) } /** - * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt + * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt * @pdev: pointer to PCI device * - * This routine is to be called from the kernel's PCI subsystem to support - * system Power Management (PM) to device with SLI-3 interface spec. When PM - * invokes this method, it restores the device's PCI config space state and - * fully reinitializes the device and brings it online. Note that as the - * driver implements the minimum PM requirements to a power-aware driver's - * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, - * FREEZE) to the suspend() method call will be treated as SUSPEND and the - * driver will fully reinitialize its device during resume() method call, - * the device will be set to PCI_D0 directly in PCI config space before - * restoring the state. + * This routine is called from the kernel's PCI subsystem to support system + * Power Management (PM) to device with SLI-4 interface spac. When PM invokes + * this method, it restores the device's PCI config space state and fully + * reinitializes the device and brings it online. Note that as the driver + * implements the minimum PM requirements to a power-aware driver's PM for + * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) + * to the suspend() method call will be treated as SUSPEND and the driver + * will fully reinitialize its device during resume() method call, the device + * will be set to PCI_D0 directly in PCI config space before restoring the + * state. * * Return code * 0 - driver suspended the device * Error otherwise **/ static int -lpfc_pci_resume_one_s3(struct pci_dev *pdev) +lpfc_pci_resume_one_s4(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; @@ -3957,7 +7494,7 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev) int error; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0452 PCI device Power Management resume.\n"); + "0292 PCI device Power Management resume.\n"); /* Restore device state from PCI config space */ pci_set_power_state(pdev, PCI_D0); @@ -3965,22 +7502,22 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev) if (pdev->is_busmaster) pci_set_master(pdev); - /* Startup the kernel thread for this host adapter. */ + /* Startup the kernel thread for this host adapter. */ phba->worker_thread = kthread_run(lpfc_do_work, phba, "lpfc_worker_%d", phba->brd_no); if (IS_ERR(phba->worker_thread)) { error = PTR_ERR(phba->worker_thread); lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0434 PM resume failed to start worker " + "0293 PM resume failed to start worker " "thread: error=x%x.\n", error); return error; } /* Configure and enable interrupt */ - intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); + intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0430 PM resume Failed to enable interrupt\n"); + "0294 PM resume Failed to enable interrupt\n"); return -EIO; } else phba->intr_mode = intr_mode; @@ -3996,134 +7533,65 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev) } /** - * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error + * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device * @pdev: pointer to PCI device. * @state: the current PCI connection state. * - * This routine is called from the PCI subsystem for I/O error handling to - * device with SLI-3 interface spec. This function is called by the PCI - * subsystem after a PCI bus error affecting this device has been detected. - * When this function is invoked, it will need to stop all the I/Os and - * interrupt(s) to the device. Once that is done, it will return - * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery - * as desired. + * This routine is called from the PCI subsystem for error handling to device + * with SLI-4 interface spec. This function is called by the PCI subsystem + * after a PCI bus error affecting this device has been detected. When this + * function is invoked, it will need to stop all the I/Os and interrupt(s) + * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET + * for the PCI subsystem to perform proper recovery as desired. * * Return codes * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery * PCI_ERS_RESULT_DISCONNECT - device could not be recovered **/ static pci_ers_result_t -lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) +lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) { - struct Scsi_Host *shost = pci_get_drvdata(pdev); - struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring; - - if (state == pci_channel_io_perm_failure) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0472 PCI channel I/O permanent failure\n"); - /* Block all SCSI devices' I/Os on the host */ - lpfc_scsi_dev_block(phba); - /* Clean up all driver's outstanding SCSI I/Os */ - lpfc_sli_flush_fcp_rings(phba); - return PCI_ERS_RESULT_DISCONNECT; - } - - pci_disable_device(pdev); - /* - * There may be I/Os dropped by the firmware. - * Error iocb (I/O) on txcmplq and let the SCSI layer - * retry it after re-establishing link. - */ - pring = &psli->ring[psli->fcp_ring]; - lpfc_sli_abort_iocb_ring(phba, pring); - - /* Disable interrupt */ - lpfc_sli_disable_intr(phba); - - /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } /** - * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. + * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch * @pdev: pointer to PCI device. * - * This routine is called from the PCI subsystem for error handling to - * device with SLI-3 interface spec. This is called after PCI bus has been - * reset to restart the PCI card from scratch, as if from a cold-boot. - * During the PCI subsystem error recovery, after driver returns + * This routine is called from the PCI subsystem for error handling to device + * with SLI-4 interface spec. It is called after PCI bus has been reset to + * restart the PCI card from scratch, as if from a cold-boot. During the + * PCI subsystem error recovery, after the driver returns * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error - * recovery and then call this routine before calling the .resume method - * to recover the device. This function will initialize the HBA device, - * enable the interrupt, but it will just put the HBA to offline state - * without passing any I/O traffic. + * recovery and then call this routine before calling the .resume method to + * recover the device. This function will initialize the HBA device, enable + * the interrupt, but it will just put the HBA to offline state without + * passing any I/O traffic. * * Return codes * PCI_ERS_RESULT_RECOVERED - the device has been recovered * PCI_ERS_RESULT_DISCONNECT - device could not be recovered */ static pci_ers_result_t -lpfc_io_slot_reset_s3(struct pci_dev *pdev) +lpfc_io_slot_reset_s4(struct pci_dev *pdev) { - struct Scsi_Host *shost = pci_get_drvdata(pdev); - struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - struct lpfc_sli *psli = &phba->sli; - uint32_t intr_mode; - - dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); - if (pci_enable_device_mem(pdev)) { - printk(KERN_ERR "lpfc: Cannot re-enable " - "PCI device after reset.\n"); - return PCI_ERS_RESULT_DISCONNECT; - } - - pci_restore_state(pdev); - if (pdev->is_busmaster) - pci_set_master(pdev); - - spin_lock_irq(&phba->hbalock); - psli->sli_flag &= ~LPFC_SLI_ACTIVE; - spin_unlock_irq(&phba->hbalock); - - /* Configure and enable interrupt */ - intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); - if (intr_mode == LPFC_INTR_ERROR) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0427 Cannot re-enable interrupt after " - "slot reset.\n"); - return PCI_ERS_RESULT_DISCONNECT; - } else - phba->intr_mode = intr_mode; - - /* Take device offline; this will perform cleanup */ - lpfc_offline(phba); - lpfc_sli_brdrestart(phba); - - /* Log the current active interrupt mode */ - lpfc_log_intr_mode(phba, phba->intr_mode); - return PCI_ERS_RESULT_RECOVERED; } /** - * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. + * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device * @pdev: pointer to PCI device * * This routine is called from the PCI subsystem for error handling to device - * with SLI-3 interface spec. It is called when kernel error recovery tells + * with SLI-4 interface spec. It is called when kernel error recovery tells * the lpfc driver that it is ok to resume normal PCI operation after PCI bus * error recovery. After this call, traffic can start to flow from this device * again. - */ + **/ static void -lpfc_io_resume_s3(struct pci_dev *pdev) +lpfc_io_resume_s4(struct pci_dev *pdev) { - struct Scsi_Host *shost = pci_get_drvdata(pdev); - struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - - lpfc_online(phba); + return; } /** @@ -4154,6 +7622,10 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) return -ENODEV; switch (dev_id) { + case PCI_DEVICE_ID_TIGERSHARK: + case PCI_DEVICE_ID_TIGERSHARK_S: + rc = lpfc_pci_probe_one_s4(pdev, pid); + break; default: rc = lpfc_pci_probe_one_s3(pdev, pid); break; @@ -4181,6 +7653,9 @@ lpfc_pci_remove_one(struct pci_dev *pdev) case LPFC_PCI_DEV_LP: lpfc_pci_remove_one_s3(pdev); break; + case LPFC_PCI_DEV_OC: + lpfc_pci_remove_one_s4(pdev); + break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1424 Invalid PCI device group: 0x%x\n", @@ -4215,6 +7690,9 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) case LPFC_PCI_DEV_LP: rc = lpfc_pci_suspend_one_s3(pdev, msg); break; + case LPFC_PCI_DEV_OC: + rc = lpfc_pci_suspend_one_s4(pdev, msg); + break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1425 Invalid PCI device group: 0x%x\n", @@ -4248,6 +7726,9 @@ lpfc_pci_resume_one(struct pci_dev *pdev) case LPFC_PCI_DEV_LP: rc = lpfc_pci_resume_one_s3(pdev); break; + case LPFC_PCI_DEV_OC: + rc = lpfc_pci_resume_one_s4(pdev); + break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1426 Invalid PCI device group: 0x%x\n", @@ -4283,6 +7764,9 @@ lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) case LPFC_PCI_DEV_LP: rc = lpfc_io_error_detected_s3(pdev, state); break; + case LPFC_PCI_DEV_OC: + rc = lpfc_io_error_detected_s4(pdev, state); + break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1427 Invalid PCI device group: 0x%x\n", @@ -4317,6 +7801,9 @@ lpfc_io_slot_reset(struct pci_dev *pdev) case LPFC_PCI_DEV_LP: rc = lpfc_io_slot_reset_s3(pdev); break; + case LPFC_PCI_DEV_OC: + rc = lpfc_io_slot_reset_s4(pdev); + break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1428 Invalid PCI device group: 0x%x\n", @@ -4346,6 +7833,9 @@ lpfc_io_resume(struct pci_dev *pdev) case LPFC_PCI_DEV_LP: lpfc_io_resume_s3(pdev); break; + case LPFC_PCI_DEV_OC: + lpfc_io_resume_s4(pdev); + break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1429 Invalid PCI device group: 0x%x\n", diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 134fc7fc2127..7f5899b70bd2 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -28,8 +28,10 @@ #include +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 35a976733398..516f4802f84e 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -28,8 +28,10 @@ #include +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -45,7 +47,7 @@ * @phba: HBA to allocate pools for * * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool, - * lpfc_mbuf_pool, lpfc_hbq_pool. Creates and allocates kmalloc-backed mempools + * lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. * * Notes: Not interrupt-safe. Must be called with no locks held. If any @@ -56,19 +58,30 @@ * -ENOMEM on failure (if any memory allocations fail) **/ int -lpfc_mem_alloc(struct lpfc_hba * phba) +lpfc_mem_alloc(struct lpfc_hba *phba, int align) { struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; int longs; int i; - phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", - phba->pcidev, phba->cfg_sg_dma_buf_size, 8, 0); + if (phba->sli_rev == LPFC_SLI_REV4) + phba->lpfc_scsi_dma_buf_pool = + pci_pool_create("lpfc_scsi_dma_buf_pool", + phba->pcidev, + phba->cfg_sg_dma_buf_size, + phba->cfg_sg_dma_buf_size, + 0); + else + phba->lpfc_scsi_dma_buf_pool = + pci_pool_create("lpfc_scsi_dma_buf_pool", + phba->pcidev, phba->cfg_sg_dma_buf_size, + align, 0); if (!phba->lpfc_scsi_dma_buf_pool) goto fail; phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev, - LPFC_BPL_SIZE, 8,0); + LPFC_BPL_SIZE, + align, 0); if (!phba->lpfc_mbuf_pool) goto fail_free_dma_buf_pool; @@ -97,23 +110,31 @@ lpfc_mem_alloc(struct lpfc_hba * phba) sizeof(struct lpfc_nodelist)); if (!phba->nlp_mem_pool) goto fail_free_mbox_pool; - - phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",phba->pcidev, - LPFC_BPL_SIZE, 8, 0); - if (!phba->lpfc_hbq_pool) + phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool", + phba->pcidev, + LPFC_HDR_BUF_SIZE, align, 0); + if (!phba->lpfc_hrb_pool) goto fail_free_nlp_mem_pool; + phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool", + phba->pcidev, + LPFC_DATA_BUF_SIZE, align, 0); + if (!phba->lpfc_drb_pool) + goto fail_free_hbq_pool; /* vpi zero is reserved for the physical port so add 1 to max */ longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG; phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); if (!phba->vpi_bmask) - goto fail_free_hbq_pool; + goto fail_free_dbq_pool; return 0; + fail_free_dbq_pool: + pci_pool_destroy(phba->lpfc_drb_pool); + phba->lpfc_drb_pool = NULL; fail_free_hbq_pool: - lpfc_sli_hbqbuf_free_all(phba); - pci_pool_destroy(phba->lpfc_hbq_pool); + pci_pool_destroy(phba->lpfc_hrb_pool); + phba->lpfc_hrb_pool = NULL; fail_free_nlp_mem_pool: mempool_destroy(phba->nlp_mem_pool); phba->nlp_mem_pool = NULL; @@ -136,27 +157,73 @@ lpfc_mem_alloc(struct lpfc_hba * phba) } /** - * lpfc_mem_free - Frees all PCI and memory allocated by lpfc_mem_alloc + * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc * @phba: HBA to free memory for * - * Description: Frees PCI pools lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, - * lpfc_hbq_pool. Frees kmalloc-backed mempools for LPFC_MBOXQ_t and - * lpfc_nodelist. Also frees the VPI bitmask + * Description: Free the memory allocated by lpfc_mem_alloc routine. This + * routine is a the counterpart of lpfc_mem_alloc. * * Returns: None **/ void -lpfc_mem_free(struct lpfc_hba * phba) +lpfc_mem_free(struct lpfc_hba *phba) { - struct lpfc_sli *psli = &phba->sli; - struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; - LPFC_MBOXQ_t *mbox, *next_mbox; - struct lpfc_dmabuf *mp; int i; + struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; + /* Free VPI bitmask memory */ kfree(phba->vpi_bmask); + + /* Free HBQ pools */ lpfc_sli_hbqbuf_free_all(phba); + pci_pool_destroy(phba->lpfc_drb_pool); + phba->lpfc_drb_pool = NULL; + pci_pool_destroy(phba->lpfc_hrb_pool); + phba->lpfc_hrb_pool = NULL; + + /* Free NLP memory pool */ + mempool_destroy(phba->nlp_mem_pool); + phba->nlp_mem_pool = NULL; + + /* Free mbox memory pool */ + mempool_destroy(phba->mbox_mem_pool); + phba->mbox_mem_pool = NULL; + + /* Free MBUF memory pool */ + for (i = 0; i < pool->current_count; i++) + pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, + pool->elements[i].phys); + kfree(pool->elements); + + pci_pool_destroy(phba->lpfc_mbuf_pool); + phba->lpfc_mbuf_pool = NULL; + /* Free DMA buffer memory pool */ + pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); + phba->lpfc_scsi_dma_buf_pool = NULL; + + return; +} + +/** + * lpfc_mem_free_all - Frees all PCI and driver memory + * @phba: HBA to free memory for + * + * Description: Free memory from PCI and driver memory pools and also those + * used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees + * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees + * the VPI bitmask. + * + * Returns: None + **/ +void +lpfc_mem_free_all(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + LPFC_MBOXQ_t *mbox, *next_mbox; + struct lpfc_dmabuf *mp; + + /* Free memory used in mailbox queue back to mailbox memory pool */ list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { mp = (struct lpfc_dmabuf *) (mbox->context1); if (mp) { @@ -166,6 +233,7 @@ lpfc_mem_free(struct lpfc_hba * phba) list_del(&mbox->list); mempool_free(mbox, phba->mbox_mem_pool); } + /* Free memory used in mailbox cmpl list back to mailbox memory pool */ list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { mp = (struct lpfc_dmabuf *) (mbox->context1); if (mp) { @@ -175,8 +243,10 @@ lpfc_mem_free(struct lpfc_hba * phba) list_del(&mbox->list); mempool_free(mbox, phba->mbox_mem_pool); } - + /* Free the active mailbox command back to the mailbox memory pool */ + spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + spin_unlock_irq(&phba->hbalock); if (psli->mbox_active) { mbox = psli->mbox_active; mp = (struct lpfc_dmabuf *) (mbox->context1); @@ -188,27 +258,14 @@ lpfc_mem_free(struct lpfc_hba * phba) psli->mbox_active = NULL; } - for (i = 0; i < pool->current_count; i++) - pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, - pool->elements[i].phys); - kfree(pool->elements); - - pci_pool_destroy(phba->lpfc_hbq_pool); - mempool_destroy(phba->nlp_mem_pool); - mempool_destroy(phba->mbox_mem_pool); - - pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); - pci_pool_destroy(phba->lpfc_mbuf_pool); - - phba->lpfc_hbq_pool = NULL; - phba->nlp_mem_pool = NULL; - phba->mbox_mem_pool = NULL; - phba->lpfc_scsi_dma_buf_pool = NULL; - phba->lpfc_mbuf_pool = NULL; + /* Free and destroy all the allocated memory pools */ + lpfc_mem_free(phba); /* Free the iocb lookup array */ kfree(psli->iocbq_lookup); psli->iocbq_lookup = NULL; + + return; } /** @@ -305,7 +362,7 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) * lpfc_els_hbq_alloc - Allocate an HBQ buffer * @phba: HBA to allocate HBQ buffer for * - * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hbq_pool PCI + * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI * pool along a non-DMA-mapped container for it. * * Notes: Not interrupt-safe. Must be called with no locks held. @@ -323,7 +380,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) if (!hbqbp) return NULL; - hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, + hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, &hbqbp->dbuf.phys); if (!hbqbp->dbuf.virt) { kfree(hbqbp); @@ -334,7 +391,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) } /** - * lpfc_mem_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc + * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc * @phba: HBA buffer was allocated for * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc * @@ -348,11 +405,72 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) void lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) { - pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); + pci_pool_free(phba->lpfc_hrb_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); kfree(hbqbp); return; } +/** + * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer + * @phba: HBA to allocate a receive buffer for + * + * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI + * pool along a non-DMA-mapped container for it. + * + * Notes: Not interrupt-safe. Must be called with no locks held. + * + * Returns: + * pointer to HBQ on success + * NULL on failure + **/ +struct hbq_dmabuf * +lpfc_sli4_rb_alloc(struct lpfc_hba *phba) +{ + struct hbq_dmabuf *dma_buf; + + dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); + if (!dma_buf) + return NULL; + + dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, + &dma_buf->hbuf.phys); + if (!dma_buf->hbuf.virt) { + kfree(dma_buf); + return NULL; + } + dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, + &dma_buf->dbuf.phys); + if (!dma_buf->dbuf.virt) { + pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, + dma_buf->hbuf.phys); + kfree(dma_buf); + return NULL; + } + dma_buf->size = LPFC_BPL_SIZE; + return dma_buf; +} + +/** + * lpfc_sli4_rb_free - Frees a receive buffer + * @phba: HBA buffer was allocated for + * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc + * + * Description: Frees both the container and the DMA-mapped buffers returned by + * lpfc_sli4_rb_alloc. + * + * Notes: Can be called with or without locks held. + * + * Returns: None + **/ +void +lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab) +{ + pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); + pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); + kfree(dmab); + return; +} + /** * lpfc_in_buf_free - Free a DMA buffer * @phba: HBA buffer is associated with diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 08cdc77af41c..6ba5a72f6049 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -28,8 +28,10 @@ #include #include +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index a226c053c0f4..9af2db355bc6 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -31,8 +31,10 @@ #include #include "lpfc_version.h" +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -57,6 +59,8 @@ static char *dif_op_str[] = { "SCSI_PROT_READ_CONVERT", "SCSI_PROT_WRITE_CONVERT" }; +static void +lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); static void lpfc_debug_save_data(struct scsi_cmnd *cmnd) @@ -565,12 +569,279 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) } iocb->ulpClass = CLASS3; psb->status = IOSTAT_SUCCESS; + /* Put it back into the SCSI buffer list */ + lpfc_release_scsi_buf_s4(phba, psb); } return bcnt; } +/** + * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort + * @phba: pointer to lpfc hba data structure. + * @axri: pointer to the fcp xri abort wcqe structure. + * + * This routine is invoked by the worker thread to process a SLI4 fast-path + * FCP aborted xri. + **/ +void +lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, + struct sli4_wcqe_xri_aborted *axri) +{ + uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); + struct lpfc_scsi_buf *psb, *next_psb; + unsigned long iflag = 0; + + spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); + list_for_each_entry_safe(psb, next_psb, + &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { + if (psb->cur_iocbq.sli4_xritag == xri) { + list_del(&psb->list); + psb->status = IOSTAT_SUCCESS; + spin_unlock_irqrestore( + &phba->sli4_hba.abts_scsi_buf_list_lock, + iflag); + lpfc_release_scsi_buf_s4(phba, psb); + return; + } + } + spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, + iflag); +} + +/** + * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block + * @phba: pointer to lpfc hba data structure. + * + * This routine walks the list of scsi buffers that have been allocated and + * repost them to the HBA by using SGL block post. This is needed after a + * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine + * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list + * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers. + * + * Returns: 0 = success, non-zero failure. + **/ +int +lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) +{ + struct lpfc_scsi_buf *psb; + int index, status, bcnt = 0, rcnt = 0, rc = 0; + LIST_HEAD(sblist); + + for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) { + psb = phba->sli4_hba.lpfc_scsi_psb_array[index]; + if (psb) { + /* Remove from SCSI buffer list */ + list_del(&psb->list); + /* Add it to a local SCSI buffer list */ + list_add_tail(&psb->list, &sblist); + if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) { + bcnt = rcnt; + rcnt = 0; + } + } else + /* A hole present in the XRI array, need to skip */ + bcnt = rcnt; + + if (index == phba->sli4_hba.scsi_xri_cnt - 1) + /* End of XRI array for SCSI buffer, complete */ + bcnt = rcnt; + + /* Continue until collect up to a nembed page worth of sgls */ + if (bcnt == 0) + continue; + /* Now, post the SCSI buffer list sgls as a block */ + status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); + /* Reset SCSI buffer count for next round of posting */ + bcnt = 0; + while (!list_empty(&sblist)) { + list_remove_head(&sblist, psb, struct lpfc_scsi_buf, + list); + if (status) { + /* Put this back on the abort scsi list */ + psb->status = IOSTAT_LOCAL_REJECT; + psb->result = IOERR_ABORT_REQUESTED; + rc++; + } else + psb->status = IOSTAT_SUCCESS; + /* Put it back into the SCSI buffer list */ + lpfc_release_scsi_buf_s4(phba, psb); + } + } + return rc; +} + +/** + * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec + * @vport: The virtual port for which this call being executed. + * @num_to_allocate: The requested number of buffers to allocate. + * + * This routine allocates a scsi buffer for device with SLI-4 interface spec, + * the scsi buffer contains all the necessary information needed to initiate + * a SCSI I/O. + * + * Return codes: + * int - number of scsi buffers that were allocated. + * 0 = failure, less than num_to_alloc is a partial failure. + **/ +static int +lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_scsi_buf *psb; + struct sli4_sge *sgl; + IOCB_t *iocb; + dma_addr_t pdma_phys_fcp_cmd; + dma_addr_t pdma_phys_fcp_rsp; + dma_addr_t pdma_phys_bpl, pdma_phys_bpl1; + uint16_t iotag, last_xritag = NO_XRI; + int status = 0, index; + int bcnt; + int non_sequential_xri = 0; + int rc = 0; + LIST_HEAD(sblist); + + for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { + psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); + if (!psb) + break; + + /* + * Get memory from the pci pool to map the virt space to pci bus + * space for an I/O. The DMA buffer includes space for the + * struct fcp_cmnd, struct fcp_rsp and the number of bde's + * necessary to support the sg_tablesize. + */ + psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, + GFP_KERNEL, &psb->dma_handle); + if (!psb->data) { + kfree(psb); + break; + } + + /* Initialize virtual ptrs to dma_buf region. */ + memset(psb->data, 0, phba->cfg_sg_dma_buf_size); + + /* Allocate iotag for psb->cur_iocbq. */ + iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); + if (iotag == 0) { + kfree(psb); + break; + } + + psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba); + if (psb->cur_iocbq.sli4_xritag == NO_XRI) { + pci_pool_free(phba->lpfc_scsi_dma_buf_pool, + psb->data, psb->dma_handle); + kfree(psb); + break; + } + if (last_xritag != NO_XRI + && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) { + non_sequential_xri = 1; + } else + list_add_tail(&psb->list, &sblist); + last_xritag = psb->cur_iocbq.sli4_xritag; + + index = phba->sli4_hba.scsi_xri_cnt++; + psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; + + psb->fcp_bpl = psb->data; + psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size) + - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd + + sizeof(struct fcp_cmnd)); + + /* Initialize local short-hand pointers. */ + sgl = (struct sli4_sge *)psb->fcp_bpl; + pdma_phys_bpl = psb->dma_handle; + pdma_phys_fcp_cmd = + (psb->dma_handle + phba->cfg_sg_dma_buf_size) + - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); + + /* + * The first two bdes are the FCP_CMD and FCP_RSP. The balance + * are sg list bdes. Initialize the first two and leave the + * rest for queuecommand. + */ + sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); + sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); + bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd)); + bf_set(lpfc_sli4_sge_last, sgl, 0); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->word3 = cpu_to_le32(sgl->word3); + sgl++; + + /* Setup the physical region for the FCP RSP */ + sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); + sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); + bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp)); + bf_set(lpfc_sli4_sge_last, sgl, 1); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->word3 = cpu_to_le32(sgl->word3); + + /* + * Since the IOCB for the FCP I/O is built into this + * lpfc_scsi_buf, initialize it with all known data now. + */ + iocb = &psb->cur_iocbq.iocb; + iocb->un.fcpi64.bdl.ulpIoTag32 = 0; + iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64; + /* setting the BLP size to 2 * sizeof BDE may not be correct. + * We are setting the bpl to point to out sgl. An sgl's + * entries are 16 bytes, a bpl entries are 12 bytes. + */ + iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); + iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd); + iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd); + iocb->ulpBdeCount = 1; + iocb->ulpLe = 1; + iocb->ulpClass = CLASS3; + if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) + pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE; + else + pdma_phys_bpl1 = 0; + psb->dma_phys_bpl = pdma_phys_bpl; + phba->sli4_hba.lpfc_scsi_psb_array[index] = psb; + if (non_sequential_xri) { + status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl, + pdma_phys_bpl1, + psb->cur_iocbq.sli4_xritag); + if (status) { + /* Put this back on the abort scsi list */ + psb->status = IOSTAT_LOCAL_REJECT; + psb->result = IOERR_ABORT_REQUESTED; + rc++; + } else + psb->status = IOSTAT_SUCCESS; + /* Put it back into the SCSI buffer list */ + lpfc_release_scsi_buf_s4(phba, psb); + break; + } + } + if (bcnt) { + status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); + /* Reset SCSI buffer count for next round of posting */ + while (!list_empty(&sblist)) { + list_remove_head(&sblist, psb, struct lpfc_scsi_buf, + list); + if (status) { + /* Put this back on the abort scsi list */ + psb->status = IOSTAT_LOCAL_REJECT; + psb->result = IOERR_ABORT_REQUESTED; + rc++; + } else + psb->status = IOSTAT_SUCCESS; + /* Put it back into the SCSI buffer list */ + lpfc_release_scsi_buf_s4(phba, psb); + } + } + + return bcnt + non_sequential_xri - rc; +} + /** * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator * @vport: The virtual port for which this call being executed. @@ -637,6 +908,39 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); } +/** + * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list. + * @phba: The Hba for which this call is being executed. + * @psb: The scsi buffer which is being released. + * + * This routine releases @psb scsi buffer by adding it to tail of @phba + * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer + * and cannot be reused for at least RA_TOV amount of time if it was + * aborted. + **/ +static void +lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) +{ + unsigned long iflag = 0; + + if (psb->status == IOSTAT_LOCAL_REJECT + && psb->result == IOERR_ABORT_REQUESTED) { + spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, + iflag); + psb->pCmd = NULL; + list_add_tail(&psb->list, + &phba->sli4_hba.lpfc_abts_scsi_buf_list); + spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, + iflag); + } else { + + spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); + psb->pCmd = NULL; + list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); + spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + } +} + /** * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. * @phba: The Hba for which this call is being executed. @@ -1454,6 +1758,115 @@ out: return ret; } +/** + * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec + * @phba: The Hba for which this call is being executed. + * @lpfc_cmd: The scsi buffer which is going to be mapped. + * + * This routine does the pci dma mapping for scatter-gather list of scsi cmnd + * field of @lpfc_cmd for device with SLI-4 interface spec. + * + * Return codes: + * 1 - Error + * 0 - Success + **/ +static int +lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) +{ + struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; + struct scatterlist *sgel = NULL; + struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; + struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; + IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; + dma_addr_t physaddr; + uint32_t num_bde = 0; + uint32_t dma_len; + uint32_t dma_offset = 0; + int nseg; + + /* + * There are three possibilities here - use scatter-gather segment, use + * the single mapping, or neither. Start the lpfc command prep by + * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first + * data bde entry. + */ + if (scsi_sg_count(scsi_cmnd)) { + /* + * The driver stores the segment count returned from pci_map_sg + * because this a count of dma-mappings used to map the use_sg + * pages. They are not guaranteed to be the same for those + * architectures that implement an IOMMU. + */ + + nseg = scsi_dma_map(scsi_cmnd); + if (unlikely(!nseg)) + return 1; + sgl += 1; + /* clear the last flag in the fcp_rsp map entry */ + sgl->word2 = le32_to_cpu(sgl->word2); + bf_set(lpfc_sli4_sge_last, sgl, 0); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl += 1; + + lpfc_cmd->seg_cnt = nseg; + if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { + printk(KERN_ERR "%s: Too many sg segments from " + "dma_map_sg. Config %d, seg_cnt %d\n", + __func__, phba->cfg_sg_seg_cnt, + lpfc_cmd->seg_cnt); + scsi_dma_unmap(scsi_cmnd); + return 1; + } + + /* + * The driver established a maximum scatter-gather segment count + * during probe that limits the number of sg elements in any + * single scsi command. Just run through the seg_cnt and format + * the sge's. + * When using SLI-3 the driver will try to fit all the BDEs into + * the IOCB. If it can't then the BDEs get added to a BPL as it + * does for SLI-2 mode. + */ + scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { + physaddr = sg_dma_address(sgel); + dma_len = sg_dma_len(sgel); + bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel)); + sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); + sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); + if ((num_bde + 1) == nseg) + bf_set(lpfc_sli4_sge_last, sgl, 1); + else + bf_set(lpfc_sli4_sge_last, sgl, 0); + bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->word3 = cpu_to_le32(sgl->word3); + dma_offset += dma_len; + sgl++; + } + } else { + sgl += 1; + /* clear the last flag in the fcp_rsp map entry */ + sgl->word2 = le32_to_cpu(sgl->word2); + bf_set(lpfc_sli4_sge_last, sgl, 1); + sgl->word2 = cpu_to_le32(sgl->word2); + } + + /* + * Finish initializing those IOCB fields that are dependent on the + * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is + * explicitly reinitialized. + * all iocb memory resources are reused. + */ + fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); + + /* + * Due to difference in data length between DIF/non-DIF paths, + * we need to set word 4 of IOCB here + */ + iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); + return 0; +} + /** * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer * @phba: The Hba for which this call is being executed. @@ -1589,6 +2002,22 @@ lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) psb->pCmd->sc_data_direction); } +/** + * lpfc_scsi_unprep_dma_buf_s4 - Un-map DMA mapping of SG-list for SLI4 dev + * @phba: The Hba for which this call is being executed. + * @psb: The scsi buffer which is going to be un-mapped. + * + * This routine does DMA un-mapping of scatter gather list of scsi command + * field of @lpfc_cmd for device with SLI-4 interface spec. If we have to + * remove the sgl for this scsi buffer then we will do it here. For now + * we should be able to just call the sli3 unprep routine. + **/ +static void +lpfc_scsi_unprep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) +{ + lpfc_scsi_unprep_dma_buf_s3(phba, psb); +} + /** * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list * @phba: The Hba for which this call is being executed. @@ -2128,6 +2557,29 @@ lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, piocbq->vport = vport; } +/** + * lpfc_scsi_prep_cmnd_s4 - Convert scsi cmnd to FCP infor unit for SLI4 dev + * @vport: The virtual port for which this call is being executed. + * @lpfc_cmd: The scsi command which needs to send. + * @pnode: Pointer to lpfc_nodelist. + * + * This routine initializes fcp_cmnd and iocb data structure from scsi command + * to transfer for device with SLI4 interface spec. + **/ +static void +lpfc_scsi_prep_cmnd_s4(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, + struct lpfc_nodelist *pnode) +{ + /* + * The prep cmnd routines do not touch the sgl or its + * entries. We may not have to do anything different. + * I will leave this function in place until we can + * run some IO through the driver and determine if changes + * are needed. + */ + return lpfc_scsi_prep_cmnd_s3(vport, lpfc_cmd, pnode); +} + /** * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit * @vport: The virtual port for which this call is being executed. @@ -2208,6 +2660,37 @@ lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport, return 1; } +/** + * lpfc_scsi_prep_task_mgmt_cmnd_s4 - Convert SLI4 scsi TM cmd to FCP info unit + * @vport: The virtual port for which this call is being executed. + * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. + * @lun: Logical unit number. + * @task_mgmt_cmd: SCSI task management command. + * + * This routine creates FCP information unit corresponding to @task_mgmt_cmd + * for device with SLI-4 interface spec. + * + * Return codes: + * 0 - Error + * 1 - Success + **/ +static int +lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport, + struct lpfc_scsi_buf *lpfc_cmd, + unsigned int lun, + uint8_t task_mgmt_cmd) +{ + /* + * The prep cmnd routines do not touch the sgl or its + * entries. We may not have to do anything different. + * I will leave this function in place until we can + * run some IO through the driver and determine if changes + * are needed. + */ + return lpfc_scsi_prep_task_mgmt_cmd_s3(vport, lpfc_cmd, lun, + task_mgmt_cmd); +} + /** * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info * @vport: The virtual port for which this call is being executed. @@ -2257,6 +2740,15 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) lpfc_scsi_prep_task_mgmt_cmd_s3; phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; break; + case LPFC_PCI_DEV_OC: + phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4; + phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; + phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s4; + phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s4; + phba->lpfc_scsi_prep_task_mgmt_cmd = + lpfc_scsi_prep_task_mgmt_cmd_s4; + phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; + break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1418 Invalid HBA PCI-device group: 0x%x\n", diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index c7c440d5fa29..65dfc8bd5b49 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -140,6 +140,8 @@ struct lpfc_scsi_buf { struct fcp_rsp *fcp_rsp; struct ulp_bde64 *fcp_bpl; + dma_addr_t dma_phys_bpl; + /* cur_iocbq has phys of the dma-able buffer. * Iotag is in here */ diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index e2d07d97fa8b..706bb22a6e8e 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -29,9 +29,12 @@ #include #include #include +#include +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -120,6 +123,76 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba) return iocbq; } +/** + * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. + * @phba: Pointer to HBA context object. + * @xritag: XRI value. + * + * This function clears the sglq pointer from the array of acive + * sglq's. The xritag that is passed in is used to index into the + * array. Before the xritag can be used it needs to be adjusted + * by subtracting the xribase. + * + * Returns sglq ponter = success, NULL = Failure. + **/ +static struct lpfc_sglq * +__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) +{ + uint16_t adj_xri; + struct lpfc_sglq *sglq; + adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; + if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri) + return NULL; + sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri]; + phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL; + return sglq; +} + +/** + * __lpfc_get_active_sglq - Get the active sglq for this XRI. + * @phba: Pointer to HBA context object. + * @xritag: XRI value. + * + * This function returns the sglq pointer from the array of acive + * sglq's. The xritag that is passed in is used to index into the + * array. Before the xritag can be used it needs to be adjusted + * by subtracting the xribase. + * + * Returns sglq ponter = success, NULL = Failure. + **/ +static struct lpfc_sglq * +__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) +{ + uint16_t adj_xri; + struct lpfc_sglq *sglq; + adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; + if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri) + return NULL; + sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri]; + return sglq; +} + +/** + * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool + * @phba: Pointer to HBA context object. + * + * This function is called with hbalock held. This function + * Gets a new driver sglq object from the sglq list. If the + * list is not empty then it is successful, it returns pointer to the newly + * allocated sglq object else it returns NULL. + **/ +static struct lpfc_sglq * +__lpfc_sli_get_sglq(struct lpfc_hba *phba) +{ + struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list; + struct lpfc_sglq *sglq = NULL; + uint16_t adj_xri; + list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); + adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; + phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; + return sglq; +} + /** * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool * @phba: Pointer to HBA context object. @@ -298,6 +371,14 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) case CMD_GEN_REQUEST64_CR: case CMD_GEN_REQUEST64_CX: case CMD_XMIT_ELS_RSP64_CX: + case DSSCMD_IWRITE64_CR: + case DSSCMD_IWRITE64_CX: + case DSSCMD_IREAD64_CR: + case DSSCMD_IREAD64_CX: + case DSSCMD_INVALIDATE_DEK: + case DSSCMD_SET_KEK: + case DSSCMD_GET_KEK_ID: + case DSSCMD_GEN_XFER: type = LPFC_SOL_IOCB; break; case CMD_ABORT_XRI_CN: @@ -2629,6 +2710,56 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) return retval; } +/** + * lpfc_sli_brdready_s4 - Check for sli4 host ready status + * @phba: Pointer to HBA context object. + * @mask: Bit mask to be checked. + * + * This function checks the host status register to check if HBA is + * ready. This function will wait in a loop for the HBA to be ready + * If the HBA is not ready , the function will will reset the HBA PCI + * function again. The function returns 1 when HBA fail to be ready + * otherwise returns zero. + **/ +static int +lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) +{ + uint32_t status; + int retval = 0; + + /* Read the HBA Host Status Register */ + status = lpfc_sli4_post_status_check(phba); + + if (status) { + phba->pport->port_state = LPFC_VPORT_UNKNOWN; + lpfc_sli_brdrestart(phba); + status = lpfc_sli4_post_status_check(phba); + } + + /* Check to see if any errors occurred during init */ + if (status) { + phba->link_state = LPFC_HBA_ERROR; + retval = 1; + } else + phba->sli4_hba.intr_enable = 0; + + return retval; +} + +/** + * lpfc_sli_brdready - Wrapper func for checking the hba readyness + * @phba: Pointer to HBA context object. + * @mask: Bit mask to be checked. + * + * This routine wraps the actual SLI3 or SLI4 hba readyness check routine + * from the API jump table function pointer from the lpfc_hba struct. + **/ +int +lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) +{ + return phba->lpfc_sli_brdready(phba, mask); +} + #define BARRIER_TEST_PATTERN (0xdeadbeef) /** @@ -2863,7 +2994,66 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) } /** - * lpfc_sli_brdrestart - Restart the HBA + * lpfc_sli4_brdreset - Reset a sli-4 HBA + * @phba: Pointer to HBA context object. + * + * This function resets a SLI4 HBA. This function disables PCI layer parity + * checking during resets the device. The caller is not required to hold + * any locks. + * + * This function returns 0 always. + **/ +int +lpfc_sli4_brdreset(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + uint16_t cfg_value; + uint8_t qindx; + + /* Reset HBA */ + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0295 Reset HBA Data: x%x x%x\n", + phba->pport->port_state, psli->sli_flag); + + /* perform board reset */ + phba->fc_eventTag = 0; + phba->pport->fc_myDID = 0; + phba->pport->fc_prevDID = 0; + + /* Turn off parity checking and serr during the physical reset */ + pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); + pci_write_config_word(phba->pcidev, PCI_COMMAND, + (cfg_value & + ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); + + spin_lock_irq(&phba->hbalock); + psli->sli_flag &= ~(LPFC_PROCESS_LA); + phba->fcf.fcf_flag = 0; + /* Clean up the child queue list for the CQs */ + list_del_init(&phba->sli4_hba.mbx_wq->list); + list_del_init(&phba->sli4_hba.els_wq->list); + list_del_init(&phba->sli4_hba.hdr_rq->list); + list_del_init(&phba->sli4_hba.dat_rq->list); + list_del_init(&phba->sli4_hba.mbx_cq->list); + list_del_init(&phba->sli4_hba.els_cq->list); + list_del_init(&phba->sli4_hba.rxq_cq->list); + for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++) + list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list); + for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++) + list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list); + spin_unlock_irq(&phba->hbalock); + + /* Now physically reset the device */ + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0389 Performing PCI function reset!\n"); + /* Perform FCoE PCI function reset */ + lpfc_pci_function_reset(phba); + + return 0; +} + +/** + * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba * @phba: Pointer to HBA context object. * * This function is called in the SLI initialization code path to @@ -2875,8 +3065,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) * The function does not guarantee completion of MBX_RESTART mailbox * command before the return of this function. **/ -int -lpfc_sli_brdrestart(struct lpfc_hba *phba) +static int +lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) { MAILBOX_t *mb; struct lpfc_sli *psli; @@ -2915,7 +3105,7 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba) lpfc_sli_brdreset(phba); phba->pport->stopped = 0; phba->link_state = LPFC_INIT_START; - + phba->hba_flag = 0; spin_unlock_irq(&phba->hbalock); memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); @@ -2929,6 +3119,55 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba) return 0; } +/** + * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba + * @phba: Pointer to HBA context object. + * + * This function is called in the SLI initialization code path to restart + * a SLI4 HBA. The caller is not required to hold any lock. + * At the end of the function, it calls lpfc_hba_down_post function to + * free any pending commands. + **/ +static int +lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + + + /* Restart HBA */ + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0296 Restart HBA Data: x%x x%x\n", + phba->pport->port_state, psli->sli_flag); + + lpfc_sli4_brdreset(phba); + + spin_lock_irq(&phba->hbalock); + phba->pport->stopped = 0; + phba->link_state = LPFC_INIT_START; + phba->hba_flag = 0; + spin_unlock_irq(&phba->hbalock); + + memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); + psli->stats_start = get_seconds(); + + lpfc_hba_down_post(phba); + + return 0; +} + +/** + * lpfc_sli_brdrestart - Wrapper func for restarting hba + * @phba: Pointer to HBA context object. + * + * This routine wraps the actual SLI3 or SLI4 hba restart routine from the + * API jump table function pointer from the lpfc_hba struct. +**/ +int +lpfc_sli_brdrestart(struct lpfc_hba *phba) +{ + return phba->lpfc_sli_brdrestart(phba); +} + /** * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart * @phba: Pointer to HBA context object. @@ -3353,125 +3592,607 @@ lpfc_sli_hba_setup_error: return rc; } - /** - * lpfc_mbox_timeout - Timeout call back function for mbox timer - * @ptr: context object - pointer to hba structure. - * - * This is the callback function for mailbox timer. The mailbox - * timer is armed when a new mailbox command is issued and the timer - * is deleted when the mailbox complete. The function is called by - * the kernel timer code when a mailbox does not complete within - * expected time. This function wakes up the worker thread to - * process the mailbox timeout and returns. All the processing is - * done by the worker thread function lpfc_mbox_timeout_handler. + * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region + * @phba: Pointer to HBA context object. + * @mboxq: mailbox pointer. + * This function issue a dump mailbox command to read config region + * 23 and parse the records in the region and populate driver + * data structure. **/ -void -lpfc_mbox_timeout(unsigned long ptr) +static int +lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba, + LPFC_MBOXQ_t *mboxq) { - struct lpfc_hba *phba = (struct lpfc_hba *) ptr; - unsigned long iflag; - uint32_t tmo_posted; + struct lpfc_dmabuf *mp; + struct lpfc_mqe *mqe; + uint32_t data_length; + int rc; - spin_lock_irqsave(&phba->pport->work_port_lock, iflag); - tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; - if (!tmo_posted) - phba->pport->work_port_events |= WORKER_MBOX_TMO; - spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); + /* Program the default value of vlan_id and fc_map */ + phba->valid_vlan = 0; + phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; + phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; + phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; - if (!tmo_posted) - lpfc_worker_wake_up(phba); - return; -} + mqe = &mboxq->u.mqe; + if (lpfc_dump_fcoe_param(phba, mboxq)) + return -ENOMEM; + + mp = (struct lpfc_dmabuf *) mboxq->context1; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "(%d):2571 Mailbox cmd x%x Status x%x " + "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " + "x%x x%x x%x x%x x%x x%x x%x x%x x%x " + "CQ: x%x x%x x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + bf_get(lpfc_mqe_command, mqe), + bf_get(lpfc_mqe_status, mqe), + mqe->un.mb_words[0], mqe->un.mb_words[1], + mqe->un.mb_words[2], mqe->un.mb_words[3], + mqe->un.mb_words[4], mqe->un.mb_words[5], + mqe->un.mb_words[6], mqe->un.mb_words[7], + mqe->un.mb_words[8], mqe->un.mb_words[9], + mqe->un.mb_words[10], mqe->un.mb_words[11], + mqe->un.mb_words[12], mqe->un.mb_words[13], + mqe->un.mb_words[14], mqe->un.mb_words[15], + mqe->un.mb_words[16], mqe->un.mb_words[50], + mboxq->mcqe.word0, + mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, + mboxq->mcqe.trailer); + + if (rc) { + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + return -EIO; + } + data_length = mqe->un.mb_words[5]; + if (data_length > DMP_FCOEPARAM_RGN_SIZE) + return -EIO; + lpfc_parse_fcoe_conf(phba, mp->virt, data_length); + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + return 0; +} /** - * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout - * @phba: Pointer to HBA context object. + * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to the LPFC_MBOXQ_t structure. + * @vpd: pointer to the memory to hold resulting port vpd data. + * @vpd_size: On input, the number of bytes allocated to @vpd. + * On output, the number of data bytes in @vpd. * - * This function is called from worker thread when a mailbox command times out. - * The caller is not required to hold any locks. This function will reset the - * HBA and recover all the pending commands. + * This routine executes a READ_REV SLI4 mailbox command. In + * addition, this routine gets the port vpd data. + * + * Return codes + * 0 - sucessful + * ENOMEM - could not allocated memory. **/ -void -lpfc_mbox_timeout_handler(struct lpfc_hba *phba) +static int +lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, + uint8_t *vpd, uint32_t *vpd_size) { - LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; - MAILBOX_t *mb = &pmbox->mb; - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring; + int rc = 0; + uint32_t dma_size; + struct lpfc_dmabuf *dmabuf; + struct lpfc_mqe *mqe; - /* Check the pmbox pointer first. There is a race condition - * between the mbox timeout handler getting executed in the - * worklist and the mailbox actually completing. When this - * race condition occurs, the mbox_active will be NULL. + dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!dmabuf) + return -ENOMEM; + + /* + * Get a DMA buffer for the vpd data resulting from the READ_REV + * mailbox command. */ - spin_lock_irq(&phba->hbalock); - if (pmbox == NULL) { - lpfc_printf_log(phba, KERN_WARNING, - LOG_MBOX | LOG_SLI, - "0353 Active Mailbox cleared - mailbox timeout " - "exiting\n"); - spin_unlock_irq(&phba->hbalock); - return; + dma_size = *vpd_size; + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, + dma_size, + &dmabuf->phys, + GFP_KERNEL); + if (!dmabuf->virt) { + kfree(dmabuf); + return -ENOMEM; } + memset(dmabuf->virt, 0, dma_size); - /* Mbox cmd timeout */ - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", - mb->mbxCommand, - phba->pport->port_state, - phba->sli.sli_flag, - phba->sli.mbox_active); - spin_unlock_irq(&phba->hbalock); - - /* Setting state unknown so lpfc_sli_abort_iocb_ring - * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing - * it to fail all oustanding SCSI IO. + /* + * The SLI4 implementation of READ_REV conflicts at word1, + * bits 31:16 and SLI4 adds vpd functionality not present + * in SLI3. This code corrects the conflicts. */ - spin_lock_irq(&phba->pport->work_port_lock); - phba->pport->work_port_events &= ~WORKER_MBOX_TMO; - spin_unlock_irq(&phba->pport->work_port_lock); - spin_lock_irq(&phba->hbalock); - phba->link_state = LPFC_LINK_UNKNOWN; - psli->sli_flag &= ~LPFC_SLI2_ACTIVE; - spin_unlock_irq(&phba->hbalock); + lpfc_read_rev(phba, mboxq); + mqe = &mboxq->u.mqe; + mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); + mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); + mqe->un.read_rev.word1 &= 0x0000FFFF; + bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); + bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); + + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc) { + dma_free_coherent(&phba->pcidev->dev, dma_size, + dmabuf->virt, dmabuf->phys); + return -EIO; + } - pring = &psli->ring[psli->fcp_ring]; - lpfc_sli_abort_iocb_ring(phba, pring); + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "(%d):0380 Mailbox cmd x%x Status x%x " + "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " + "x%x x%x x%x x%x x%x x%x x%x x%x x%x " + "CQ: x%x x%x x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + bf_get(lpfc_mqe_command, mqe), + bf_get(lpfc_mqe_status, mqe), + mqe->un.mb_words[0], mqe->un.mb_words[1], + mqe->un.mb_words[2], mqe->un.mb_words[3], + mqe->un.mb_words[4], mqe->un.mb_words[5], + mqe->un.mb_words[6], mqe->un.mb_words[7], + mqe->un.mb_words[8], mqe->un.mb_words[9], + mqe->un.mb_words[10], mqe->un.mb_words[11], + mqe->un.mb_words[12], mqe->un.mb_words[13], + mqe->un.mb_words[14], mqe->un.mb_words[15], + mqe->un.mb_words[16], mqe->un.mb_words[50], + mboxq->mcqe.word0, + mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, + mboxq->mcqe.trailer); - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "0345 Resetting board due to mailbox timeout\n"); + /* + * The available vpd length cannot be bigger than the + * DMA buffer passed to the port. Catch the less than + * case and update the caller's size. + */ + if (mqe->un.read_rev.avail_vpd_len < *vpd_size) + *vpd_size = mqe->un.read_rev.avail_vpd_len; - /* Reset the HBA device */ - lpfc_reset_hba(phba); + lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size); + dma_free_coherent(&phba->pcidev->dev, dma_size, + dmabuf->virt, dmabuf->phys); + kfree(dmabuf); + return 0; } /** - * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware - * @phba: Pointer to HBA context object. - * @pmbox: Pointer to mailbox object. - * @flag: Flag indicating how the mailbox need to be processed. + * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues + * @phba: pointer to lpfc hba data structure. * - * This function is called by discovery code and HBA management code - * to submit a mailbox command to firmware with SLI-3 interface spec. This - * function gets the hbalock to protect the data structures. - * The mailbox command can be submitted in polling mode, in which case - * this function will wait in a polling loop for the completion of the - * mailbox. - * If the mailbox is submitted in no_wait mode (not polling) the - * function will submit the command and returns immediately without waiting - * for the mailbox completion. The no_wait is supported only when HBA - * is in SLI2/SLI3 mode - interrupts are enabled. - * The SLI interface allows only one mailbox pending at a time. If the - * mailbox is issued in polling mode and there is already a mailbox - * pending, then the function will return an error. If the mailbox is issued - * in NO_WAIT mode and there is a mailbox pending already, the function - * will return MBX_BUSY after queuing the mailbox into mailbox queue. - * The sli layer owns the mailbox object until the completion of mailbox - * command if this function return MBX_BUSY or MBX_SUCCESS. For all other - * return codes the caller owns the mailbox command after the return of - * the function. + * This routine is called to explicitly arm the SLI4 device's completion and + * event queues + **/ +static void +lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) +{ + uint8_t fcp_eqidx; + + lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); + lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); + lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM); + for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) + lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], + LPFC_QUEUE_REARM); + lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); + for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) + lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], + LPFC_QUEUE_REARM); +} + +/** + * lpfc_sli4_hba_setup - SLI4 device intialization PCI function + * @phba: Pointer to HBA context object. + * + * This function is the main SLI4 device intialization PCI function. This + * function is called by the HBA intialization code, HBA reset code and + * HBA error attention handler code. Caller is not required to hold any + * locks. + **/ +int +lpfc_sli4_hba_setup(struct lpfc_hba *phba) +{ + int rc; + LPFC_MBOXQ_t *mboxq; + struct lpfc_mqe *mqe; + uint8_t *vpd; + uint32_t vpd_size; + uint32_t ftr_rsp = 0; + struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); + struct lpfc_vport *vport = phba->pport; + struct lpfc_dmabuf *mp; + + /* Perform a PCI function reset to start from clean */ + rc = lpfc_pci_function_reset(phba); + if (unlikely(rc)) + return -ENODEV; + + /* Check the HBA Host Status Register for readyness */ + rc = lpfc_sli4_post_status_check(phba); + if (unlikely(rc)) + return -ENODEV; + else { + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag |= LPFC_SLI_ACTIVE; + spin_unlock_irq(&phba->hbalock); + } + + /* + * Allocate a single mailbox container for initializing the + * port. + */ + mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + return -ENOMEM; + + /* + * Continue initialization with default values even if driver failed + * to read FCoE param config regions + */ + if (lpfc_sli4_read_fcoe_params(phba, mboxq)) + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, + "2570 Failed to read FCoE parameters \n"); + + /* Issue READ_REV to collect vpd and FW information. */ + vpd_size = PAGE_SIZE; + vpd = kzalloc(vpd_size, GFP_KERNEL); + if (!vpd) { + rc = -ENOMEM; + goto out_free_mbox; + } + + rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); + if (unlikely(rc)) + goto out_free_vpd; + + mqe = &mboxq->u.mqe; + if ((bf_get(lpfc_mbx_rd_rev_sli_lvl, + &mqe->un.read_rev) != LPFC_SLI_REV4) || + (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev) == 0)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0376 READ_REV Error. SLI Level %d " + "FCoE enabled %d\n", + bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev), + bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)); + rc = -EIO; + goto out_free_vpd; + } + /* Single threaded at this point, no need for lock */ + spin_lock_irq(&phba->hbalock); + phba->hba_flag |= HBA_FCOE_SUPPORT; + spin_unlock_irq(&phba->hbalock); + /* + * Evaluate the read rev and vpd data. Populate the driver + * state with the results. If this routine fails, the failure + * is not fatal as the driver will use generic values. + */ + rc = lpfc_parse_vpd(phba, vpd, vpd_size); + if (unlikely(!rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0377 Error %d parsing vpd. " + "Using defaults.\n", rc); + rc = 0; + } + + /* By now, we should determine the SLI revision, hard code for now */ + phba->sli_rev = LPFC_SLI_REV4; + + /* + * Discover the port's supported feature set and match it against the + * hosts requests. + */ + lpfc_request_features(phba, mboxq); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (unlikely(rc)) { + rc = -EIO; + goto out_free_vpd; + } + + /* + * The port must support FCP initiator mode as this is the + * only mode running in the host. + */ + if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "0378 No support for fcpi mode.\n"); + ftr_rsp++; + } + + /* + * If the port cannot support the host's requested features + * then turn off the global config parameters to disable the + * feature in the driver. This is not a fatal error. + */ + if ((phba->cfg_enable_bg) && + !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) + ftr_rsp++; + + if (phba->max_vpi && phba->cfg_enable_npiv && + !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) + ftr_rsp++; + + if (ftr_rsp) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "0379 Feature Mismatch Data: x%08x %08x " + "x%x x%x x%x\n", mqe->un.req_ftrs.word2, + mqe->un.req_ftrs.word3, phba->cfg_enable_bg, + phba->cfg_enable_npiv, phba->max_vpi); + if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) + phba->cfg_enable_bg = 0; + if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) + phba->cfg_enable_npiv = 0; + } + + /* These SLI3 features are assumed in SLI4 */ + spin_lock_irq(&phba->hbalock); + phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); + spin_unlock_irq(&phba->hbalock); + + /* Read the port's service parameters. */ + lpfc_read_sparam(phba, mboxq, vport->vpi); + mboxq->vport = vport; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + mp = (struct lpfc_dmabuf *) mboxq->context1; + if (rc == MBX_SUCCESS) { + memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); + rc = 0; + } + + /* + * This memory was allocated by the lpfc_read_sparam routine. Release + * it to the mbuf pool. + */ + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + mboxq->context1 = NULL; + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0382 READ_SPARAM command failed " + "status %d, mbxStatus x%x\n", + rc, bf_get(lpfc_mqe_status, mqe)); + phba->link_state = LPFC_HBA_ERROR; + rc = -EIO; + goto out_free_vpd; + } + + if (phba->cfg_soft_wwnn) + u64_to_wwn(phba->cfg_soft_wwnn, + vport->fc_sparam.nodeName.u.wwn); + if (phba->cfg_soft_wwpn) + u64_to_wwn(phba->cfg_soft_wwpn, + vport->fc_sparam.portName.u.wwn); + memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, + sizeof(struct lpfc_name)); + memcpy(&vport->fc_portname, &vport->fc_sparam.portName, + sizeof(struct lpfc_name)); + + /* Update the fc_host data structures with new wwn. */ + fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); + fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); + + /* Register SGL pool to the device using non-embedded mailbox command */ + rc = lpfc_sli4_post_sgl_list(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0582 Error %d during sgl post operation", rc); + rc = -ENODEV; + goto out_free_vpd; + } + + /* Register SCSI SGL pool to the device */ + rc = lpfc_sli4_repost_scsi_sgl_list(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "0383 Error %d during scsi sgl post opeation", + rc); + /* Some Scsi buffers were moved to the abort scsi list */ + /* A pci function reset will repost them */ + rc = -ENODEV; + goto out_free_vpd; + } + + /* Post the rpi header region to the device. */ + rc = lpfc_sli4_post_all_rpi_hdrs(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0393 Error %d during rpi post operation\n", + rc); + rc = -ENODEV; + goto out_free_vpd; + } + /* Temporary initialization of lpfc_fip_flag to non-fip */ + bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0); + + /* Set up all the queues to the device */ + rc = lpfc_sli4_queue_setup(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0381 Error %d during queue setup.\n ", rc); + goto out_stop_timers; + } + + /* Arm the CQs and then EQs on device */ + lpfc_sli4_arm_cqeq_intr(phba); + + /* Indicate device interrupt mode */ + phba->sli4_hba.intr_enable = 1; + + /* Allow asynchronous mailbox command to go through */ + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; + spin_unlock_irq(&phba->hbalock); + + /* Post receive buffers to the device */ + lpfc_sli4_rb_setup(phba); + + /* Start the ELS watchdog timer */ + /* + * The driver for SLI4 is not yet ready to process timeouts + * or interrupts. Once it is, the comment bars can be removed. + */ + /* mod_timer(&vport->els_tmofunc, + * jiffies + HZ * (phba->fc_ratov*2)); */ + + /* Start heart beat timer */ + mod_timer(&phba->hb_tmofunc, + jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + phba->hb_outstanding = 0; + phba->last_completion_time = jiffies; + + /* Start error attention (ERATT) polling timer */ + mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); + + /* + * The port is ready, set the host's link state to LINK_DOWN + * in preparation for link interrupts. + */ + lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed); + mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + lpfc_set_loopback_flag(phba); + /* Change driver state to LPFC_LINK_DOWN right before init link */ + spin_lock_irq(&phba->hbalock); + phba->link_state = LPFC_LINK_DOWN; + spin_unlock_irq(&phba->hbalock); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (unlikely(rc != MBX_NOT_FINISHED)) { + kfree(vpd); + return 0; + } else + rc = -EIO; + + /* Unset all the queues set up in this routine when error out */ + if (rc) + lpfc_sli4_queue_unset(phba); + +out_stop_timers: + if (rc) + lpfc_stop_hba_timers(phba); +out_free_vpd: + kfree(vpd); +out_free_mbox: + mempool_free(mboxq, phba->mbox_mem_pool); + return rc; +} + +/** + * lpfc_mbox_timeout - Timeout call back function for mbox timer + * @ptr: context object - pointer to hba structure. + * + * This is the callback function for mailbox timer. The mailbox + * timer is armed when a new mailbox command is issued and the timer + * is deleted when the mailbox complete. The function is called by + * the kernel timer code when a mailbox does not complete within + * expected time. This function wakes up the worker thread to + * process the mailbox timeout and returns. All the processing is + * done by the worker thread function lpfc_mbox_timeout_handler. + **/ +void +lpfc_mbox_timeout(unsigned long ptr) +{ + struct lpfc_hba *phba = (struct lpfc_hba *) ptr; + unsigned long iflag; + uint32_t tmo_posted; + + spin_lock_irqsave(&phba->pport->work_port_lock, iflag); + tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; + if (!tmo_posted) + phba->pport->work_port_events |= WORKER_MBOX_TMO; + spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); + + if (!tmo_posted) + lpfc_worker_wake_up(phba); + return; +} + + +/** + * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout + * @phba: Pointer to HBA context object. + * + * This function is called from worker thread when a mailbox command times out. + * The caller is not required to hold any locks. This function will reset the + * HBA and recover all the pending commands. + **/ +void +lpfc_mbox_timeout_handler(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; + MAILBOX_t *mb = &pmbox->mb; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; + + /* Check the pmbox pointer first. There is a race condition + * between the mbox timeout handler getting executed in the + * worklist and the mailbox actually completing. When this + * race condition occurs, the mbox_active will be NULL. + */ + spin_lock_irq(&phba->hbalock); + if (pmbox == NULL) { + lpfc_printf_log(phba, KERN_WARNING, + LOG_MBOX | LOG_SLI, + "0353 Active Mailbox cleared - mailbox timeout " + "exiting\n"); + spin_unlock_irq(&phba->hbalock); + return; + } + + /* Mbox cmd timeout */ + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", + mb->mbxCommand, + phba->pport->port_state, + phba->sli.sli_flag, + phba->sli.mbox_active); + spin_unlock_irq(&phba->hbalock); + + /* Setting state unknown so lpfc_sli_abort_iocb_ring + * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing + * it to fail all oustanding SCSI IO. + */ + spin_lock_irq(&phba->pport->work_port_lock); + phba->pport->work_port_events &= ~WORKER_MBOX_TMO; + spin_unlock_irq(&phba->pport->work_port_lock); + spin_lock_irq(&phba->hbalock); + phba->link_state = LPFC_LINK_UNKNOWN; + psli->sli_flag &= ~LPFC_SLI2_ACTIVE; + spin_unlock_irq(&phba->hbalock); + + pring = &psli->ring[psli->fcp_ring]; + lpfc_sli_abort_iocb_ring(phba, pring); + + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0345 Resetting board due to mailbox timeout\n"); + + /* Reset the HBA device */ + lpfc_reset_hba(phba); +} + +/** + * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware + * @phba: Pointer to HBA context object. + * @pmbox: Pointer to mailbox object. + * @flag: Flag indicating how the mailbox need to be processed. + * + * This function is called by discovery code and HBA management code + * to submit a mailbox command to firmware with SLI-3 interface spec. This + * function gets the hbalock to protect the data structures. + * The mailbox command can be submitted in polling mode, in which case + * this function will wait in a polling loop for the completion of the + * mailbox. + * If the mailbox is submitted in no_wait mode (not polling) the + * function will submit the command and returns immediately without waiting + * for the mailbox completion. The no_wait is supported only when HBA + * is in SLI2/SLI3 mode - interrupts are enabled. + * The SLI interface allows only one mailbox pending at a time. If the + * mailbox is issued in polling mode and there is already a mailbox + * pending, then the function will return an error. If the mailbox is issued + * in NO_WAIT mode and there is a mailbox pending already, the function + * will return MBX_BUSY after queuing the mailbox into mailbox queue. + * The sli layer owns the mailbox object until the completion of mailbox + * command if this function return MBX_BUSY or MBX_SUCCESS. For all other + * return codes the caller owns the mailbox command after the return of + * the function. **/ static int lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, @@ -3812,12 +4533,419 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, out_not_finished: if (processing_queue) { - pmbox->mb.mbxStatus = MBX_NOT_FINISHED; + pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; lpfc_mbox_cmpl_put(phba, pmbox); } return MBX_NOT_FINISHED; } +/** + * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox + * @phba: Pointer to HBA context object. + * @mboxq: Pointer to mailbox object. + * + * The function posts a mailbox to the port. The mailbox is expected + * to be comletely filled in and ready for the port to operate on it. + * This routine executes a synchronous completion operation on the + * mailbox by polling for its completion. + * + * The caller must not be holding any locks when calling this routine. + * + * Returns: + * MBX_SUCCESS - mailbox posted successfully + * Any of the MBX error values. + **/ +static int +lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + int rc = MBX_SUCCESS; + unsigned long iflag; + uint32_t db_ready; + uint32_t mcqe_status; + uint32_t mbx_cmnd; + unsigned long timeout; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_mqe *mb = &mboxq->u.mqe; + struct lpfc_bmbx_create *mbox_rgn; + struct dma_address *dma_address; + struct lpfc_register bmbx_reg; + + /* + * Only one mailbox can be active to the bootstrap mailbox region + * at a time and there is no queueing provided. + */ + spin_lock_irqsave(&phba->hbalock, iflag); + if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2532 Mailbox command x%x (x%x) " + "cannot issue Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, mboxq), + psli->sli_flag, MBX_POLL); + return MBXERR_ERROR; + } + /* The server grabs the token and owns it until release */ + psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; + phba->sli.mbox_active = mboxq; + spin_unlock_irqrestore(&phba->hbalock, iflag); + + /* + * Initialize the bootstrap memory region to avoid stale data areas + * in the mailbox post. Then copy the caller's mailbox contents to + * the bmbx mailbox region. + */ + mbx_cmnd = bf_get(lpfc_mqe_command, mb); + memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); + lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, + sizeof(struct lpfc_mqe)); + + /* Post the high mailbox dma address to the port and wait for ready. */ + dma_address = &phba->sli4_hba.bmbx.dma_address; + writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); + + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd) + * 1000) + jiffies; + do { + bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); + db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); + if (!db_ready) + msleep(2); + + if (time_after(jiffies, timeout)) { + rc = MBXERR_ERROR; + goto exit; + } + } while (!db_ready); + + /* Post the low mailbox dma address to the port. */ + writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd) + * 1000) + jiffies; + do { + bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); + db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); + if (!db_ready) + msleep(2); + + if (time_after(jiffies, timeout)) { + rc = MBXERR_ERROR; + goto exit; + } + } while (!db_ready); + + /* + * Read the CQ to ensure the mailbox has completed. + * If so, update the mailbox status so that the upper layers + * can complete the request normally. + */ + lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, + sizeof(struct lpfc_mqe)); + mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; + lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, + sizeof(struct lpfc_mcqe)); + mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); + + /* Prefix the mailbox status with range x4000 to note SLI4 status. */ + if (mcqe_status != MB_CQE_STATUS_SUCCESS) { + bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status); + rc = MBXERR_ERROR; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "(%d):0356 Mailbox cmd x%x (x%x) Status x%x " + "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" + " x%x x%x CQ: x%x x%x x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq), + bf_get(lpfc_mqe_status, mb), + mb->un.mb_words[0], mb->un.mb_words[1], + mb->un.mb_words[2], mb->un.mb_words[3], + mb->un.mb_words[4], mb->un.mb_words[5], + mb->un.mb_words[6], mb->un.mb_words[7], + mb->un.mb_words[8], mb->un.mb_words[9], + mb->un.mb_words[10], mb->un.mb_words[11], + mb->un.mb_words[12], mboxq->mcqe.word0, + mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, + mboxq->mcqe.trailer); +exit: + /* We are holding the token, no needed for lock when release */ + spin_lock_irqsave(&phba->hbalock, iflag); + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + phba->sli.mbox_active = NULL; + spin_unlock_irqrestore(&phba->hbalock, iflag); + return rc; +} + +/** + * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware + * @phba: Pointer to HBA context object. + * @pmbox: Pointer to mailbox object. + * @flag: Flag indicating how the mailbox need to be processed. + * + * This function is called by discovery code and HBA management code to submit + * a mailbox command to firmware with SLI-4 interface spec. + * + * Return codes the caller owns the mailbox command after the return of the + * function. + **/ +static int +lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, + uint32_t flag) +{ + struct lpfc_sli *psli = &phba->sli; + unsigned long iflags; + int rc; + + /* Detect polling mode and jump to a handler */ + if (!phba->sli4_hba.intr_enable) { + if (flag == MBX_POLL) + rc = lpfc_sli4_post_sync_mbox(phba, mboxq); + else + rc = -EIO; + if (rc != MBX_SUCCESS) + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2541 Mailbox command x%x " + "(x%x) cannot issue Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, mboxq), + psli->sli_flag, flag); + return rc; + } else if (flag == MBX_POLL) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2542 Mailbox command x%x (x%x) " + "cannot issue Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, mboxq), + psli->sli_flag, flag); + return -EIO; + } + + /* Now, interrupt mode asynchrous mailbox command */ + rc = lpfc_mbox_cmd_check(phba, mboxq); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2543 Mailbox command x%x (x%x) " + "cannot issue Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, mboxq), + psli->sli_flag, flag); + goto out_not_finished; + } + rc = lpfc_mbox_dev_check(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2544 Mailbox command x%x (x%x) " + "cannot issue Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, mboxq), + psli->sli_flag, flag); + goto out_not_finished; + } + + /* Put the mailbox command to the driver internal FIFO */ + psli->slistat.mbox_busy++; + spin_lock_irqsave(&phba->hbalock, iflags); + lpfc_mbox_put(phba, mboxq); + spin_unlock_irqrestore(&phba->hbalock, iflags); + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "(%d):0354 Mbox cmd issue - Enqueue Data: " + "x%x (x%x) x%x x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0xffffff, + bf_get(lpfc_mqe_command, &mboxq->u.mqe), + lpfc_sli4_mbox_opcode_get(phba, mboxq), + phba->pport->port_state, + psli->sli_flag, MBX_NOWAIT); + /* Wake up worker thread to transport mailbox command from head */ + lpfc_worker_wake_up(phba); + + return MBX_BUSY; + +out_not_finished: + return MBX_NOT_FINISHED; +} + +/** + * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device + * @phba: Pointer to HBA context object. + * + * This function is called by worker thread to send a mailbox command to + * SLI4 HBA firmware. + * + **/ +int +lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + LPFC_MBOXQ_t *mboxq; + int rc = MBX_SUCCESS; + unsigned long iflags; + struct lpfc_mqe *mqe; + uint32_t mbx_cmnd; + + /* Check interrupt mode before post async mailbox command */ + if (unlikely(!phba->sli4_hba.intr_enable)) + return MBX_NOT_FINISHED; + + /* Check for mailbox command service token */ + spin_lock_irqsave(&phba->hbalock, iflags); + if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { + spin_unlock_irqrestore(&phba->hbalock, iflags); + return MBX_NOT_FINISHED; + } + if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { + spin_unlock_irqrestore(&phba->hbalock, iflags); + return MBX_NOT_FINISHED; + } + if (unlikely(phba->sli.mbox_active)) { + spin_unlock_irqrestore(&phba->hbalock, iflags); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0384 There is pending active mailbox cmd\n"); + return MBX_NOT_FINISHED; + } + /* Take the mailbox command service token */ + psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; + + /* Get the next mailbox command from head of queue */ + mboxq = lpfc_mbox_get(phba); + + /* If no more mailbox command waiting for post, we're done */ + if (!mboxq) { + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + spin_unlock_irqrestore(&phba->hbalock, iflags); + return MBX_SUCCESS; + } + phba->sli.mbox_active = mboxq; + spin_unlock_irqrestore(&phba->hbalock, iflags); + + /* Check device readiness for posting mailbox command */ + rc = lpfc_mbox_dev_check(phba); + if (unlikely(rc)) + /* Driver clean routine will clean up pending mailbox */ + goto out_not_finished; + + /* Prepare the mbox command to be posted */ + mqe = &mboxq->u.mqe; + mbx_cmnd = bf_get(lpfc_mqe_command, mqe); + + /* Start timer for the mbox_tmo and log some mailbox post messages */ + mod_timer(&psli->mbox_tmo, (jiffies + + (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd)))); + + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "(%d):0355 Mailbox cmd x%x (x%x) issue Data: " + "x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, + lpfc_sli4_mbox_opcode_get(phba, mboxq), + phba->pport->port_state, psli->sli_flag); + + if (mbx_cmnd != MBX_HEARTBEAT) { + if (mboxq->vport) { + lpfc_debugfs_disc_trc(mboxq->vport, + LPFC_DISC_TRC_MBOX_VPORT, + "MBOX Send vport: cmd:x%x mb:x%x x%x", + mbx_cmnd, mqe->un.mb_words[0], + mqe->un.mb_words[1]); + } else { + lpfc_debugfs_disc_trc(phba->pport, + LPFC_DISC_TRC_MBOX, + "MBOX Send: cmd:x%x mb:x%x x%x", + mbx_cmnd, mqe->un.mb_words[0], + mqe->un.mb_words[1]); + } + } + psli->slistat.mbox_cmd++; + + /* Post the mailbox command to the port */ + rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2533 Mailbox command x%x (x%x) " + "cannot issue Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, mboxq), + psli->sli_flag, MBX_NOWAIT); + goto out_not_finished; + } + + return rc; + +out_not_finished: + spin_lock_irqsave(&phba->hbalock, iflags); + mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; + __lpfc_mbox_cmpl_put(phba, mboxq); + /* Release the token */ + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + phba->sli.mbox_active = NULL; + spin_unlock_irqrestore(&phba->hbalock, iflags); + + return MBX_NOT_FINISHED; +} + +/** + * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command + * @phba: Pointer to HBA context object. + * @pmbox: Pointer to mailbox object. + * @flag: Flag indicating how the mailbox need to be processed. + * + * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from + * the API jump table function pointer from the lpfc_hba struct. + * + * Return codes the caller owns the mailbox command after the return of the + * function. + **/ +int +lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) +{ + return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); +} + +/** + * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table + * @phba: The hba struct for which this call is being executed. + * @dev_grp: The HBA PCI-Device group number. + * + * This routine sets up the mbox interface API function jump table in @phba + * struct. + * Returns: 0 - success, -ENODEV - failure. + **/ +int +lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) +{ + + switch (dev_grp) { + case LPFC_PCI_DEV_LP: + phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; + phba->lpfc_sli_handle_slow_ring_event = + lpfc_sli_handle_slow_ring_event_s3; + phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; + phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; + phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; + break; + case LPFC_PCI_DEV_OC: + phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; + phba->lpfc_sli_handle_slow_ring_event = + lpfc_sli_handle_slow_ring_event_s4; + phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; + phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; + phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1420 Invalid HBA PCI-device group: 0x%x\n", + dev_grp); + return -ENODEV; + break; + } + return 0; +} + /** * __lpfc_sli_ringtx_put - Add an iocb to the txq * @phba: Pointer to HBA context object. @@ -4501,28 +5629,42 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) /* Return any active mbox cmds */ del_timer_sync(&psli->mbox_tmo); - spin_lock_irqsave(&phba->hbalock, flags); - spin_lock(&phba->pport->work_port_lock); + spin_lock_irqsave(&phba->pport->work_port_lock, flags); phba->pport->work_port_events &= ~WORKER_MBOX_TMO; - spin_unlock(&phba->pport->work_port_lock); + spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); - /* Return any pending or completed mbox cmds */ - list_splice_init(&phba->sli.mboxq, &completions); - if (psli->mbox_active) { - list_add_tail(&psli->mbox_active->list, &completions); - psli->mbox_active = NULL; - psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; - } - list_splice_init(&phba->sli.mboxq_cmpl, &completions); - spin_unlock_irqrestore(&phba->hbalock, flags); + return 1; +} + +/** + * lpfc_sli4_hba_down - PCI function resource cleanup for the SLI4 HBA + * @phba: Pointer to HBA context object. + * + * This function cleans up all queues, iocb, buffers, mailbox commands while + * shutting down the SLI4 HBA FCoE function. This function is called with no + * lock held and always returns 1. + * + * This function does the following to cleanup driver FCoE function resources: + * - Free discovery resources for each virtual port + * - Cleanup any pending fabric iocbs + * - Iterate through the iocb txq and free each entry in the list. + * - Free up any buffer posted to the HBA. + * - Clean up all the queue entries: WQ, RQ, MQ, EQ, CQ, etc. + * - Free mailbox commands in the mailbox queue. + **/ +int +lpfc_sli4_hba_down(struct lpfc_hba *phba) +{ + /* Stop the SLI4 device port */ + lpfc_stop_port(phba); + + /* Tear down the queues in the HBA */ + lpfc_sli4_queue_unset(phba); + + /* unregister default FCFI from the HBA */ + lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi); - while (!list_empty(&completions)) { - list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); - pmb->mb.mbxStatus = MBX_NOT_FINISHED; - if (pmb->mbox_cmpl) - pmb->mbox_cmpl(phba,pmb); - } return 1; } @@ -4853,7 +5995,10 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, iabt = &abtsiocbp->iocb; iabt->un.acxri.abortType = ABORT_TYPE_ABTS; iabt->un.acxri.abortContextTag = icmd->ulpContext; - iabt->un.acxri.abortIoTag = icmd->ulpIoTag; + if (phba->sli_rev == LPFC_SLI_REV4) + iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; + else + iabt->un.acxri.abortIoTag = icmd->ulpIoTag; iabt->ulpLe = 1; iabt->ulpClass = icmd->ulpClass; @@ -4869,7 +6014,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, "abort cmd iotag x%x\n", iabt->un.acxri.abortContextTag, iabt->un.acxri.abortIoTag, abtsiocbp->iotag); - retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); + retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0); if (retval) __lpfc_sli_release_iocbq(phba, abtsiocbp); @@ -5052,7 +6197,10 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, cmd = &iocbq->iocb; abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; - abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; + if (phba->sli_rev == LPFC_SLI_REV4) + abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; + else + abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; abtsiocb->iocb.ulpLe = 1; abtsiocb->iocb.ulpClass = cmd->ulpClass; abtsiocb->vport = phba->pport; @@ -5064,7 +6212,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, /* Setup callback routine and issue the command. */ abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; - ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0); + ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, + abtsiocb, 0); if (ret_val == IOCB_ERROR) { lpfc_sli_release_iocbq(phba, abtsiocb); errcnt++; @@ -5145,7 +6294,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, **/ int lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, - struct lpfc_sli_ring *pring, + uint32_t ring_number, struct lpfc_iocbq *piocb, struct lpfc_iocbq *prspiocbq, uint32_t timeout) @@ -5176,7 +6325,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, readl(phba->HCregaddr); /* flush */ } - retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0); + retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0); if (retval == IOCB_SUCCESS) { timeout_req = timeout * HZ; timeleft = wait_event_timeout(done_q, @@ -5384,6 +6533,58 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) return 0; } +/** + * lpfc_sli4_eratt_read - read sli-4 error attention events + * @phba: Pointer to HBA context. + * + * This function is called to read the SLI4 device error attention registers + * for possible error attention events. The caller must hold the hostlock + * with spin_lock_irq(). + * + * This fucntion returns 1 when there is Error Attention in the Host Attention + * Register and returns 0 otherwise. + **/ +static int +lpfc_sli4_eratt_read(struct lpfc_hba *phba) +{ + uint32_t uerr_sta_hi, uerr_sta_lo; + uint32_t onlnreg0, onlnreg1; + + /* For now, use the SLI4 device internal unrecoverable error + * registers for error attention. This can be changed later. + */ + onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); + onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); + if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { + uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr); + uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr); + if (uerr_sta_lo || uerr_sta_hi) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1423 HBA Unrecoverable error: " + "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " + "online0_reg=0x%x, online1_reg=0x%x\n", + uerr_sta_lo, uerr_sta_hi, + onlnreg0, onlnreg1); + /* TEMP: as the driver error recover logic is not + * fully developed, we just log the error message + * and the device error attention action is now + * temporarily disabled. + */ + return 0; + phba->work_status[0] = uerr_sta_lo; + phba->work_status[1] = uerr_sta_hi; + spin_lock_irq(&phba->hbalock); + /* Set the driver HA work bitmap */ + phba->work_ha |= HA_ERATT; + /* Indicate polling handles this ERATT */ + phba->hba_flag |= HBA_ERATT_HANDLED; + spin_unlock_irq(&phba->hbalock); + return 1; + } + } + return 0; +} + /** * lpfc_sli_check_eratt - check error attention events * @phba: Pointer to HBA context. @@ -5434,6 +6635,10 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba) /* Read chip Host Attention (HA) register */ ha_copy = lpfc_sli_eratt_read(phba); break; + case LPFC_SLI_REV4: + /* Read devcie Uncoverable Error (UERR) registers */ + ha_copy = lpfc_sli4_eratt_read(phba); + break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0299 Invalid SLI revision (%d)\n", diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 883938652a6a..e6c88ee8ee96 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -29,13 +29,23 @@ typedef enum _lpfc_ctx_cmd { LPFC_CTX_HOST } lpfc_ctx_cmd; +/* This structure is used to carry the needed response IOCB states */ +struct lpfc_sli4_rspiocb_info { + uint8_t hw_status; + uint8_t bfield; +#define LPFC_XB 0x1 +#define LPFC_PV 0x2 + uint8_t priority; + uint8_t reserved; +}; + /* This structure is used to handle IOCB requests / responses */ struct lpfc_iocbq { /* lpfc_iocbqs are used in double linked lists */ struct list_head list; struct list_head clist; uint16_t iotag; /* pre-assigned IO tag */ - uint16_t rsvd1; + uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ IOCB_t iocb; /* IOCB cmd */ uint8_t retry; /* retry counter for IOCB cmd - if needed */ @@ -65,7 +75,7 @@ struct lpfc_iocbq { struct lpfc_iocbq *); void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); - + struct lpfc_sli4_rspiocb_info sli4_info; }; #define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ @@ -81,14 +91,18 @@ struct lpfc_iocbq { typedef struct lpfcMboxq { /* MBOXQs are used in single linked lists */ struct list_head list; /* ptr to next mailbox command */ - MAILBOX_t mb; /* Mailbox cmd */ - struct lpfc_vport *vport;/* virutal port pointer */ + union { + MAILBOX_t mb; /* Mailbox cmd */ + struct lpfc_mqe mqe; + } u; + struct lpfc_vport *vport;/* virtual port pointer */ void *context1; /* caller context information */ void *context2; /* caller context information */ void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *); uint8_t mbox_flag; - + struct lpfc_mcqe mcqe; + struct lpfc_mbx_nembed_sge_virt *sge_array; } LPFC_MBOXQ_t; #define MBX_POLL 1 /* poll mailbox till command done, then @@ -234,6 +248,7 @@ struct lpfc_sli { #define LPFC_PROCESS_LA 0x400 /* Able to process link attention */ #define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ #define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */ +#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */ struct lpfc_sli_ring ring[LPFC_MAX_RING]; int fcp_ring; /* ring used for FCP initiator commands */ @@ -261,6 +276,8 @@ struct lpfc_sli { #define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox command */ +#define LPFC_MBOX_SLI4_CONFIG_TMO 60 /* Sec tmo for outstanding mbox + command */ #define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write * or erase cmds. This is especially * long because of the potential of diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h new file mode 100644 index 000000000000..5196b46608d7 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -0,0 +1,467 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2009 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#define LPFC_ACTIVE_MBOX_WAIT_CNT 100 +#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 +#define LPFC_GET_QE_REL_INT 32 +#define LPFC_RPI_LOW_WATER_MARK 10 +/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */ +#define LPFC_NEMBED_MBOX_SGL_CNT 254 + +/* Multi-queue arrangement for fast-path FCP work queues */ +#define LPFC_FN_EQN_MAX 8 +#define LPFC_SP_EQN_DEF 1 +#define LPFC_FP_EQN_DEF 1 +#define LPFC_FP_EQN_MIN 1 +#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF) + +#define LPFC_FN_WQN_MAX 32 +#define LPFC_SP_WQN_DEF 1 +#define LPFC_FP_WQN_DEF 4 +#define LPFC_FP_WQN_MIN 1 +#define LPFC_FP_WQN_MAX (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF) + +/* + * Provide the default FCF Record attributes used by the driver + * when nonFIP mode is configured and there is no other default + * FCF Record attributes. + */ +#define LPFC_FCOE_FCF_DEF_INDEX 0 +#define LPFC_FCOE_FCF_GET_FIRST 0xFFFF +#define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF + +/* First 3 bytes of default FCF MAC is specified by FC_MAP */ +#define LPFC_FCOE_FCF_MAC3 0xFF +#define LPFC_FCOE_FCF_MAC4 0xFF +#define LPFC_FCOE_FCF_MAC5 0xFE +#define LPFC_FCOE_FCF_MAP0 0x0E +#define LPFC_FCOE_FCF_MAP1 0xFC +#define LPFC_FCOE_FCF_MAP2 0x00 +#define LPFC_FCOE_MAX_RCV_SIZE 0x5AC +#define LPFC_FCOE_FKA_ADV_PER 0 +#define LPFC_FCOE_FIP_PRIORITY 0x80 + +enum lpfc_sli4_queue_type { + LPFC_EQ, + LPFC_GCQ, + LPFC_MCQ, + LPFC_WCQ, + LPFC_RCQ, + LPFC_MQ, + LPFC_WQ, + LPFC_HRQ, + LPFC_DRQ +}; + +/* The queue sub-type defines the functional purpose of the queue */ +enum lpfc_sli4_queue_subtype { + LPFC_NONE, + LPFC_MBOX, + LPFC_FCP, + LPFC_ELS, + LPFC_USOL +}; + +union sli4_qe { + void *address; + struct lpfc_eqe *eqe; + struct lpfc_cqe *cqe; + struct lpfc_mcqe *mcqe; + struct lpfc_wcqe_complete *wcqe_complete; + struct lpfc_wcqe_release *wcqe_release; + struct sli4_wcqe_xri_aborted *wcqe_xri_aborted; + struct lpfc_rcqe_complete *rcqe_complete; + struct lpfc_mqe *mqe; + union lpfc_wqe *wqe; + struct lpfc_rqe *rqe; +}; + +struct lpfc_queue { + struct list_head list; + enum lpfc_sli4_queue_type type; + enum lpfc_sli4_queue_subtype subtype; + struct lpfc_hba *phba; + struct list_head child_list; + uint32_t entry_count; /* Number of entries to support on the queue */ + uint32_t entry_size; /* Size of each queue entry. */ + uint32_t queue_id; /* Queue ID assigned by the hardware */ + struct list_head page_list; + uint32_t page_count; /* Number of pages allocated for this queue */ + + uint32_t host_index; /* The host's index for putting or getting */ + uint32_t hba_index; /* The last known hba index for get or put */ + union sli4_qe qe[1]; /* array to index entries (must be last) */ +}; + +struct lpfc_cq_event { + struct list_head list; + union { + struct lpfc_mcqe mcqe_cmpl; + struct lpfc_acqe_link acqe_link; + struct lpfc_acqe_fcoe acqe_fcoe; + struct lpfc_acqe_dcbx acqe_dcbx; + struct lpfc_rcqe rcqe_cmpl; + struct sli4_wcqe_xri_aborted wcqe_axri; + } cqe; +}; + +struct lpfc_sli4_link { + uint8_t speed; + uint8_t duplex; + uint8_t status; + uint8_t physical; + uint8_t fault; +}; + +struct lpfc_fcf { + uint8_t fabric_name[8]; + uint8_t mac_addr[6]; + uint16_t fcf_indx; + uint16_t fcfi; + uint32_t fcf_flag; +#define FCF_AVAILABLE 0x01 /* FCF available for discovery */ +#define FCF_REGISTERED 0x02 /* FCF registered with FW */ +#define FCF_DISCOVERED 0x04 /* FCF discovery started */ +#define FCF_BOOT_ENABLE 0x08 /* Boot bios use this FCF */ +#define FCF_IN_USE 0x10 /* Atleast one discovery completed */ +#define FCF_VALID_VLAN 0x20 /* Use the vlan id specified */ + uint32_t priority; + uint32_t addr_mode; + uint16_t vlan_id; +}; + +#define LPFC_REGION23_SIGNATURE "RG23" +#define LPFC_REGION23_VERSION 1 +#define LPFC_REGION23_LAST_REC 0xff +struct lpfc_fip_param_hdr { + uint8_t type; +#define FCOE_PARAM_TYPE 0xA0 + uint8_t length; +#define FCOE_PARAM_LENGTH 2 + uint8_t parm_version; +#define FIPP_VERSION 0x01 + uint8_t parm_flags; +#define lpfc_fip_param_hdr_fipp_mode_SHIFT 6 +#define lpfc_fip_param_hdr_fipp_mode_MASK 0x3 +#define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags +#define FIPP_MODE_ON 0x2 +#define FIPP_MODE_OFF 0x0 +#define FIPP_VLAN_VALID 0x1 +}; + +struct lpfc_fcoe_params { + uint8_t fc_map[3]; + uint8_t reserved1; + uint16_t vlan_tag; + uint8_t reserved[2]; +}; + +struct lpfc_fcf_conn_hdr { + uint8_t type; +#define FCOE_CONN_TBL_TYPE 0xA1 + uint8_t length; /* words */ + uint8_t reserved[2]; +}; + +struct lpfc_fcf_conn_rec { + uint16_t flags; +#define FCFCNCT_VALID 0x0001 +#define FCFCNCT_BOOT 0x0002 +#define FCFCNCT_PRIMARY 0x0004 /* if not set, Secondary */ +#define FCFCNCT_FBNM_VALID 0x0008 +#define FCFCNCT_SWNM_VALID 0x0010 +#define FCFCNCT_VLAN_VALID 0x0020 +#define FCFCNCT_AM_VALID 0x0040 +#define FCFCNCT_AM_PREFERRED 0x0080 /* if not set, AM Required */ +#define FCFCNCT_AM_SPMA 0x0100 /* if not set, FPMA */ + + uint16_t vlan_tag; + uint8_t fabric_name[8]; + uint8_t switch_name[8]; +}; + +struct lpfc_fcf_conn_entry { + struct list_head list; + struct lpfc_fcf_conn_rec conn_rec; +}; + +/* + * Define the host's bootstrap mailbox. This structure contains + * the member attributes needed to create, use, and destroy the + * bootstrap mailbox region. + * + * The macro definitions for the bmbx data structure are defined + * in lpfc_hw4.h with the register definition. + */ +struct lpfc_bmbx { + struct lpfc_dmabuf *dmabuf; + struct dma_address dma_address; + void *avirt; + dma_addr_t aphys; + uint32_t bmbx_size; +}; + +#define LPFC_EQE_SIZE LPFC_EQE_SIZE_4 + +#define LPFC_EQE_SIZE_4B 4 +#define LPFC_EQE_SIZE_16B 16 +#define LPFC_CQE_SIZE 16 +#define LPFC_WQE_SIZE 64 +#define LPFC_MQE_SIZE 256 +#define LPFC_RQE_SIZE 8 + +#define LPFC_EQE_DEF_COUNT 1024 +#define LPFC_CQE_DEF_COUNT 256 +#define LPFC_WQE_DEF_COUNT 64 +#define LPFC_MQE_DEF_COUNT 16 +#define LPFC_RQE_DEF_COUNT 512 + +#define LPFC_QUEUE_NOARM false +#define LPFC_QUEUE_REARM true + + +/* + * SLI4 CT field defines + */ +#define SLI4_CT_RPI 0 +#define SLI4_CT_VPI 1 +#define SLI4_CT_VFI 2 +#define SLI4_CT_FCFI 3 + +#define LPFC_SLI4_MAX_SEGMENT_SIZE 0x10000 + +/* + * SLI4 specific data structures + */ +struct lpfc_max_cfg_param { + uint16_t max_xri; + uint16_t xri_base; + uint16_t xri_used; + uint16_t max_rpi; + uint16_t rpi_base; + uint16_t rpi_used; + uint16_t max_vpi; + uint16_t vpi_base; + uint16_t vpi_used; + uint16_t max_vfi; + uint16_t vfi_base; + uint16_t vfi_used; + uint16_t max_fcfi; + uint16_t fcfi_base; + uint16_t fcfi_used; + uint16_t max_eq; + uint16_t max_rq; + uint16_t max_cq; + uint16_t max_wq; +}; + +struct lpfc_hba; +/* SLI4 HBA multi-fcp queue handler struct */ +struct lpfc_fcp_eq_hdl { + uint32_t idx; + struct lpfc_hba *phba; +}; + +/* SLI4 HBA data structure entries */ +struct lpfc_sli4_hba { + void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for + PCI BAR0, config space registers */ + void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for + PCI BAR1, control registers */ + void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for + PCI BAR2, doorbell registers */ + /* BAR0 PCI config space register memory map */ + void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */ + void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */ + void __iomem *ONLINE0regaddr; /* Address to components of internal UE */ + void __iomem *ONLINE1regaddr; /* Address to components of internal UE */ +#define LPFC_ONLINE_NERR 0xFFFFFFFF + void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */ + /* BAR1 FCoE function CSR register memory map */ + void __iomem *STAregaddr; /* Address to HST_STATE register */ + void __iomem *ISRregaddr; /* Address to HST_ISR register */ + void __iomem *IMRregaddr; /* Address to HST_IMR register */ + void __iomem *ISCRregaddr; /* Address to HST_ISCR register */ + /* BAR2 VF-0 doorbell register memory map */ + void __iomem *RQDBregaddr; /* Address to RQ_DOORBELL register */ + void __iomem *WQDBregaddr; /* Address to WQ_DOORBELL register */ + void __iomem *EQCQDBregaddr; /* Address to EQCQ_DOORBELL register */ + void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */ + void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */ + + struct msix_entry *msix_entries; + uint32_t cfg_eqn; + struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ + /* Pointers to the constructed SLI4 queues */ + struct lpfc_queue **fp_eq; /* Fast-path event queue */ + struct lpfc_queue *sp_eq; /* Slow-path event queue */ + struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */ + struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */ + struct lpfc_queue *els_wq; /* Slow-path ELS work queue */ + struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ + struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ + struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */ + struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */ + struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */ + struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */ + + /* Setup information for various queue parameters */ + int eq_esize; + int eq_ecount; + int cq_esize; + int cq_ecount; + int wq_esize; + int wq_ecount; + int mq_esize; + int mq_ecount; + int rq_esize; + int rq_ecount; +#define LPFC_SP_EQ_MAX_INTR_SEC 10000 +#define LPFC_FP_EQ_MAX_INTR_SEC 10000 + + uint32_t intr_enable; + struct lpfc_bmbx bmbx; + struct lpfc_max_cfg_param max_cfg_param; + uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */ + uint16_t next_rpi; + uint16_t scsi_xri_max; + uint16_t scsi_xri_cnt; + struct list_head lpfc_free_sgl_list; + struct list_head lpfc_sgl_list; + struct lpfc_sglq **lpfc_els_sgl_array; + struct list_head lpfc_abts_els_sgl_list; + struct lpfc_scsi_buf **lpfc_scsi_psb_array; + struct list_head lpfc_abts_scsi_buf_list; + uint32_t total_sglq_bufs; + struct lpfc_sglq **lpfc_sglq_active_list; + struct list_head lpfc_rpi_hdr_list; + unsigned long *rpi_bmask; + uint16_t rpi_count; + struct lpfc_sli4_flags sli4_flags; + struct list_head sp_rspiocb_work_queue; + struct list_head sp_cqe_event_pool; + struct list_head sp_asynce_work_queue; + struct list_head sp_fcp_xri_aborted_work_queue; + struct list_head sp_els_xri_aborted_work_queue; + struct list_head sp_unsol_work_queue; + struct lpfc_sli4_link link_state; + spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ + spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */ +}; + +enum lpfc_sge_type { + GEN_BUFF_TYPE, + SCSI_BUFF_TYPE +}; + +struct lpfc_sglq { + /* lpfc_sglqs are used in double linked lists */ + struct list_head list; + struct list_head clist; + enum lpfc_sge_type buff_type; /* is this a scsi sgl */ + uint16_t iotag; /* pre-assigned IO tag */ + uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ + struct sli4_sge *sgl; /* pre-assigned SGL */ + void *virt; /* virtual address. */ + dma_addr_t phys; /* physical address */ +}; + +struct lpfc_rpi_hdr { + struct list_head list; + uint32_t len; + struct lpfc_dmabuf *dmabuf; + uint32_t page_count; + uint32_t start_rpi; +}; + +/* + * SLI4 specific function prototypes + */ +int lpfc_pci_function_reset(struct lpfc_hba *); +int lpfc_sli4_hba_setup(struct lpfc_hba *); +int lpfc_sli4_hba_down(struct lpfc_hba *); +int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t, + uint8_t, uint32_t, bool); +void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *); +void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t); +void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t, + struct lpfc_mbx_sge *); + +void lpfc_sli4_hba_reset(struct lpfc_hba *); +struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, + uint32_t); +void lpfc_sli4_queue_free(struct lpfc_queue *); +uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t); +uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_queue *, uint32_t, uint32_t); +uint32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_queue *, uint32_t); +uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_queue *, uint32_t); +uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_queue *, struct lpfc_queue *, uint32_t); +uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *); +uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *); +uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *); +uint32_t lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *); +uint32_t lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_queue *); +int lpfc_sli4_queue_setup(struct lpfc_hba *); +void lpfc_sli4_queue_unset(struct lpfc_hba *); +int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t); +int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *); +int lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *); +uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); +int lpfc_sli4_post_async_mbox(struct lpfc_hba *); +int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba); +int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int); +struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *); +struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *); +void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *); +void lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *); +int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *); +int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *, struct lpfc_rpi_hdr *); +int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *); +struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *); +void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *); +int lpfc_sli4_alloc_rpi(struct lpfc_hba *); +void lpfc_sli4_free_rpi(struct lpfc_hba *, int); +void lpfc_sli4_remove_rpis(struct lpfc_hba *); +void lpfc_sli4_async_event_proc(struct lpfc_hba *); +int lpfc_sli4_resume_rpi(struct lpfc_nodelist *); +void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *); +void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *); +void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *, + struct sli4_wcqe_xri_aborted *); +void lpfc_sli4_els_xri_aborted(struct lpfc_hba *, + struct sli4_wcqe_xri_aborted *); +int lpfc_sli4_brdreset(struct lpfc_hba *); +int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *); +void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *); +int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *); +int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t); +uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool); +uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool); +void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t); +int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t); +void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_sli4_post_status_check(struct lpfc_hba *); +uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *); + diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 917ad56b0aff..59e67f7ee531 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -32,8 +32,10 @@ #include #include #include +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -89,6 +91,8 @@ lpfc_alloc_vpi(struct lpfc_hba *phba) vpi = 0; else set_bit(vpi, phba->vpi_bmask); + if (phba->sli_rev == LPFC_SLI_REV4) + phba->sli4_hba.max_cfg_param.vpi_used++; spin_unlock_irq(&phba->hbalock); return vpi; } @@ -96,8 +100,12 @@ lpfc_alloc_vpi(struct lpfc_hba *phba) static void lpfc_free_vpi(struct lpfc_hba *phba, int vpi) { + if (vpi == 0) + return; spin_lock_irq(&phba->hbalock); clear_bit(vpi, phba->vpi_bmask); + if (phba->sli_rev == LPFC_SLI_REV4) + phba->sli4_hba.max_cfg_param.vpi_used--; spin_unlock_irq(&phba->hbalock); } @@ -308,6 +316,21 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) goto error_out; } + /* + * In SLI4, the vpi must be activated before it can be used + * by the port. + */ + if (phba->sli_rev == LPFC_SLI_REV4) { + rc = lpfc_sli4_init_vpi(phba, vpi); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, + "1838 Failed to INIT_VPI on vpi %d " + "status %d\n", vpi, rc); + rc = VPORT_NORESOURCES; + lpfc_free_vpi(phba, vpi); + goto error_out; + } + } /* Assign an unused board number */ if ((instance = lpfc_get_instance()) < 0) { -- cgit v1.2.3 From 4f774513f7b3fe96648b8936f60f835e6ceaa88e Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 22 May 2009 14:52:35 -0400 Subject: [SCSI] lpfc 8.3.2 : Addition of SLI4 Interface - Queues Adds support for the new queues in the SLI-4 interface. There are : - Work Queues - host-to-adapter for fast-path traffic - Mailbox Queues - host-to-adapter for control (slow-path) - Buffer Queues - host-to-adapter for posting buffers for async receive - Completion Queues - adapter-to-host for posting async events, completions for fast or slow patch work, receipt of async receive traffic - Event Queues - tied to MSI-X vectors, binds completion queues with interrupts These patches add the all the support code to tie into command submission and response paths, updates the interrupt handling, etc. Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_sli.c | 3532 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 3457 insertions(+), 75 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 706bb22a6e8e..cf42ada3ffcd 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -70,6 +70,350 @@ typedef enum _lpfc_iocb_type { LPFC_ABORT_IOCB } lpfc_iocb_type; + +/* Provide function prototypes local to this module. */ +static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, + uint32_t); +static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, + uint8_t *, uint32_t *); + +static IOCB_t * +lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) +{ + return &iocbq->iocb; +} + +/** + * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue + * @q: The Work Queue to operate on. + * @wqe: The work Queue Entry to put on the Work queue. + * + * This routine will copy the contents of @wqe to the next available entry on + * the @q. This function will then ring the Work Queue Doorbell to signal the + * HBA to start processing the Work Queue Entry. This function returns 0 if + * successful. If no entries are available on @q then this function will return + * -ENOMEM. + * The caller is expected to hold the hbalock when calling this routine. + **/ +static uint32_t +lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) +{ + union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe; + struct lpfc_register doorbell; + uint32_t host_index; + + /* If the host has not yet processed the next entry then we are done */ + if (((q->host_index + 1) % q->entry_count) == q->hba_index) + return -ENOMEM; + /* set consumption flag every once in a while */ + if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL)) + bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1); + + lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); + + /* Update the host index before invoking device */ + host_index = q->host_index; + q->host_index = ((q->host_index + 1) % q->entry_count); + + /* Ring Doorbell */ + doorbell.word0 = 0; + bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1); + bf_set(lpfc_wq_doorbell_index, &doorbell, host_index); + bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id); + writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr); + readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */ + + return 0; +} + +/** + * lpfc_sli4_wq_release - Updates internal hba index for WQ + * @q: The Work Queue to operate on. + * @index: The index to advance the hba index to. + * + * This routine will update the HBA index of a queue to reflect consumption of + * Work Queue Entries by the HBA. When the HBA indicates that it has consumed + * an entry the host calls this function to update the queue's internal + * pointers. This routine returns the number of entries that were consumed by + * the HBA. + **/ +static uint32_t +lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) +{ + uint32_t released = 0; + + if (q->hba_index == index) + return 0; + do { + q->hba_index = ((q->hba_index + 1) % q->entry_count); + released++; + } while (q->hba_index != index); + return released; +} + +/** + * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue + * @q: The Mailbox Queue to operate on. + * @wqe: The Mailbox Queue Entry to put on the Work queue. + * + * This routine will copy the contents of @mqe to the next available entry on + * the @q. This function will then ring the Work Queue Doorbell to signal the + * HBA to start processing the Work Queue Entry. This function returns 0 if + * successful. If no entries are available on @q then this function will return + * -ENOMEM. + * The caller is expected to hold the hbalock when calling this routine. + **/ +static uint32_t +lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) +{ + struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe; + struct lpfc_register doorbell; + uint32_t host_index; + + /* If the host has not yet processed the next entry then we are done */ + if (((q->host_index + 1) % q->entry_count) == q->hba_index) + return -ENOMEM; + lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size); + /* Save off the mailbox pointer for completion */ + q->phba->mbox = (MAILBOX_t *)temp_mqe; + + /* Update the host index before invoking device */ + host_index = q->host_index; + q->host_index = ((q->host_index + 1) % q->entry_count); + + /* Ring Doorbell */ + doorbell.word0 = 0; + bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); + bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); + writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); + readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */ + return 0; +} + +/** + * lpfc_sli4_mq_release - Updates internal hba index for MQ + * @q: The Mailbox Queue to operate on. + * + * This routine will update the HBA index of a queue to reflect consumption of + * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed + * an entry the host calls this function to update the queue's internal + * pointers. This routine returns the number of entries that were consumed by + * the HBA. + **/ +static uint32_t +lpfc_sli4_mq_release(struct lpfc_queue *q) +{ + /* Clear the mailbox pointer for completion */ + q->phba->mbox = NULL; + q->hba_index = ((q->hba_index + 1) % q->entry_count); + return 1; +} + +/** + * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ + * @q: The Event Queue to get the first valid EQE from + * + * This routine will get the first valid Event Queue Entry from @q, update + * the queue's internal hba index, and return the EQE. If no valid EQEs are in + * the Queue (no more work to do), or the Queue is full of EQEs that have been + * processed, but not popped back to the HBA then this routine will return NULL. + **/ +static struct lpfc_eqe * +lpfc_sli4_eq_get(struct lpfc_queue *q) +{ + struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe; + + /* If the next EQE is not valid then we are done */ + if (!bf_get(lpfc_eqe_valid, eqe)) + return NULL; + /* If the host has not yet processed the next entry then we are done */ + if (((q->hba_index + 1) % q->entry_count) == q->host_index) + return NULL; + + q->hba_index = ((q->hba_index + 1) % q->entry_count); + return eqe; +} + +/** + * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ + * @q: The Event Queue that the host has completed processing for. + * @arm: Indicates whether the host wants to arms this CQ. + * + * This routine will mark all Event Queue Entries on @q, from the last + * known completed entry to the last entry that was processed, as completed + * by clearing the valid bit for each completion queue entry. Then it will + * notify the HBA, by ringing the doorbell, that the EQEs have been processed. + * The internal host index in the @q will be updated by this routine to indicate + * that the host has finished processing the entries. The @arm parameter + * indicates that the queue should be rearmed when ringing the doorbell. + * + * This function will return the number of EQEs that were popped. + **/ +uint32_t +lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) +{ + uint32_t released = 0; + struct lpfc_eqe *temp_eqe; + struct lpfc_register doorbell; + + /* while there are valid entries */ + while (q->hba_index != q->host_index) { + temp_eqe = q->qe[q->host_index].eqe; + bf_set(lpfc_eqe_valid, temp_eqe, 0); + released++; + q->host_index = ((q->host_index + 1) % q->entry_count); + } + if (unlikely(released == 0 && !arm)) + return 0; + + /* ring doorbell for number popped */ + doorbell.word0 = 0; + if (arm) { + bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); + bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); + } + bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); + bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); + bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id); + writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); + return released; +} + +/** + * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ + * @q: The Completion Queue to get the first valid CQE from + * + * This routine will get the first valid Completion Queue Entry from @q, update + * the queue's internal hba index, and return the CQE. If no valid CQEs are in + * the Queue (no more work to do), or the Queue is full of CQEs that have been + * processed, but not popped back to the HBA then this routine will return NULL. + **/ +static struct lpfc_cqe * +lpfc_sli4_cq_get(struct lpfc_queue *q) +{ + struct lpfc_cqe *cqe; + + /* If the next CQE is not valid then we are done */ + if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) + return NULL; + /* If the host has not yet processed the next entry then we are done */ + if (((q->hba_index + 1) % q->entry_count) == q->host_index) + return NULL; + + cqe = q->qe[q->hba_index].cqe; + q->hba_index = ((q->hba_index + 1) % q->entry_count); + return cqe; +} + +/** + * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ + * @q: The Completion Queue that the host has completed processing for. + * @arm: Indicates whether the host wants to arms this CQ. + * + * This routine will mark all Completion queue entries on @q, from the last + * known completed entry to the last entry that was processed, as completed + * by clearing the valid bit for each completion queue entry. Then it will + * notify the HBA, by ringing the doorbell, that the CQEs have been processed. + * The internal host index in the @q will be updated by this routine to indicate + * that the host has finished processing the entries. The @arm parameter + * indicates that the queue should be rearmed when ringing the doorbell. + * + * This function will return the number of CQEs that were released. + **/ +uint32_t +lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) +{ + uint32_t released = 0; + struct lpfc_cqe *temp_qe; + struct lpfc_register doorbell; + + /* while there are valid entries */ + while (q->hba_index != q->host_index) { + temp_qe = q->qe[q->host_index].cqe; + bf_set(lpfc_cqe_valid, temp_qe, 0); + released++; + q->host_index = ((q->host_index + 1) % q->entry_count); + } + if (unlikely(released == 0 && !arm)) + return 0; + + /* ring doorbell for number popped */ + doorbell.word0 = 0; + if (arm) + bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); + bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); + bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); + bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id); + writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); + return released; +} + +/** + * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue + * @q: The Header Receive Queue to operate on. + * @wqe: The Receive Queue Entry to put on the Receive queue. + * + * This routine will copy the contents of @wqe to the next available entry on + * the @q. This function will then ring the Receive Queue Doorbell to signal the + * HBA to start processing the Receive Queue Entry. This function returns the + * index that the rqe was copied to if successful. If no entries are available + * on @q then this function will return -ENOMEM. + * The caller is expected to hold the hbalock when calling this routine. + **/ +static int +lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, + struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) +{ + struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe; + struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe; + struct lpfc_register doorbell; + int put_index = hq->host_index; + + if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) + return -EINVAL; + if (hq->host_index != dq->host_index) + return -EINVAL; + /* If the host has not yet processed the next entry then we are done */ + if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index) + return -EBUSY; + lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); + lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); + + /* Update the host index to point to the next slot */ + hq->host_index = ((hq->host_index + 1) % hq->entry_count); + dq->host_index = ((dq->host_index + 1) % dq->entry_count); + + /* Ring The Header Receive Queue Doorbell */ + if (!(hq->host_index % LPFC_RQ_POST_BATCH)) { + doorbell.word0 = 0; + bf_set(lpfc_rq_doorbell_num_posted, &doorbell, + LPFC_RQ_POST_BATCH); + bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id); + writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr); + } + return put_index; +} + +/** + * lpfc_sli4_rq_release - Updates internal hba index for RQ + * @q: The Header Receive Queue to operate on. + * + * This routine will update the HBA index of a queue to reflect consumption of + * one Receive Queue Entry by the HBA. When the HBA indicates that it has + * consumed an entry the host calls this function to update the queue's + * internal pointers. This routine returns the number of entries that were + * consumed by the HBA. + **/ +static uint32_t +lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) +{ + if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) + return 0; + hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); + dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); + return 1; +} + /** * lpfc_cmd_iocb - Get next command iocb entry in the ring * @phba: Pointer to HBA context object. @@ -214,6 +558,59 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba) return iocbq; } +/** + * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool + * @phba: Pointer to HBA context object. + * @iocbq: Pointer to driver iocb object. + * + * This function is called with hbalock held to release driver + * iocb object to the iocb pool. The iotag in the iocb object + * does not change for each use of the iocb object. This function + * clears all other fields of the iocb object when it is freed. + * The sqlq structure that holds the xritag and phys and virtual + * mappings for the scatter gather list is retrieved from the + * active array of sglq. The get of the sglq pointer also clears + * the entry in the array. If the status of the IO indiactes that + * this IO was aborted then the sglq entry it put on the + * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the + * IO has good status or fails for any other reason then the sglq + * entry is added to the free list (lpfc_sgl_list). + **/ +static void +__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) +{ + struct lpfc_sglq *sglq; + size_t start_clean = offsetof(struct lpfc_iocbq, iocb); + unsigned long iflag; + + if (iocbq->sli4_xritag == NO_XRI) + sglq = NULL; + else + sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); + if (sglq) { + if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED + || ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) + && (iocbq->iocb.un.ulpWord[4] + == IOERR_SLI_ABORTED))) { + spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, + iflag); + list_add(&sglq->list, + &phba->sli4_hba.lpfc_abts_els_sgl_list); + spin_unlock_irqrestore( + &phba->sli4_hba.abts_sgl_list_lock, iflag); + } else + list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); + } + + + /* + * Clean all volatile data fields, preserve iotag and node struct. + */ + memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); + iocbq->sli4_xritag = NO_XRI; + list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); +} + /** * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool * @phba: Pointer to HBA context object. @@ -959,6 +1356,37 @@ lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, return -ENOMEM; } +/** + * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware + * @phba: Pointer to HBA context object. + * @hbqno: HBQ number. + * @hbq_buf: Pointer to HBQ buffer. + * + * This function is called with the hbalock held to post an RQE to the SLI4 + * firmware. If able to post the RQE to the RQ it will queue the hbq entry to + * the hbq_buffer_list and return zero, otherwise it will return an error. + **/ +static int +lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, + struct hbq_dmabuf *hbq_buf) +{ + int rc; + struct lpfc_rqe hrqe; + struct lpfc_rqe drqe; + + hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); + hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); + drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); + drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); + rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, + &hrqe, &drqe); + if (rc < 0) + return rc; + hbq_buf->tag = rc; + list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); + return 0; +} + /* HBQ for ELS and CT traffic. */ static struct lpfc_hbq_init lpfc_els_hbq = { .rn = 1, @@ -2574,6 +3002,36 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, return; } +/** + * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @mask: Host attention register mask for this ring. + * + * This function is called from the worker thread when there is a pending + * ELS response iocb on the driver internal slow-path response iocb worker + * queue. The caller does not hold any lock. The function will remove each + * response iocb from the response worker queue and calls the handle + * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. + **/ +static void +lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, uint32_t mask) +{ + struct lpfc_iocbq *irspiocbq; + unsigned long iflag; + + while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) { + /* Get the response iocb from the head of work queue */ + spin_lock_irqsave(&phba->hbalock, iflag); + list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue, + irspiocbq, struct lpfc_iocbq, list); + spin_unlock_irqrestore(&phba->hbalock, iflag); + /* Process the response iocb */ + lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq); + } +} + /** * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring * @phba: Pointer to HBA context object. @@ -3375,6 +3833,26 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba) return 0; } +/** + * lpfc_sli4_rb_setup - Initialize and post RBs to HBA + * @phba: Pointer to HBA context object. + * + * This function is called during the SLI initialization to configure + * all the HBQs and post buffers to the HBQ. The caller is not + * required to hold any locks. This function will return zero if successful + * else it will return negative error code. + **/ +static int +lpfc_sli4_rb_setup(struct lpfc_hba *phba) +{ + phba->hbq_in_use = 1; + phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count; + phba->hbq_count = 1; + /* Initially populate or replenish the HBQs */ + lpfc_sli_hbqbuf_init_hbqs(phba, 0); + return 0; +} + /** * lpfc_sli_config_port - Issue config port mailbox command * @phba: Pointer to HBA context object. @@ -5130,104 +5608,550 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, } /** - * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb - * - * This routine wraps the actual lockless version for issusing IOCB function - * pointer from the lpfc_hba struct. - * - * Return codes: - * IOCB_ERROR - Error - * IOCB_SUCCESS - Success - * IOCB_BUSY - Busy - **/ -static inline int -__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, - struct lpfc_iocbq *piocb, uint32_t flag) -{ - return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); -} - -/** - * lpfc_sli_api_table_setup - Set up sli api fucntion jump table - * @phba: The hba struct for which this call is being executed. - * @dev_grp: The HBA PCI-Device group number. - * - * This routine sets up the SLI interface API function jump table in @phba - * struct. - * Returns: 0 - success, -ENODEV - failure. - **/ -int -lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) + * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. + * @phba: Pointer to HBA context object. + * @piocb: Pointer to command iocb. + * @sglq: Pointer to the scatter gather queue object. + * + * This routine converts the bpl or bde that is in the IOCB + * to a sgl list for the sli4 hardware. The physical address + * of the bpl/bde is converted back to a virtual address. + * If the IOCB contains a BPL then the list of BDE's is + * converted to sli4_sge's. If the IOCB contains a single + * BDE then it is converted to a single sli_sge. + * The IOCB is still in cpu endianess so the contents of + * the bpl can be used without byte swapping. + * + * Returns valid XRI = Success, NO_XRI = Failure. +**/ +static uint16_t +lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, + struct lpfc_sglq *sglq) { + uint16_t xritag = NO_XRI; + struct ulp_bde64 *bpl = NULL; + struct ulp_bde64 bde; + struct sli4_sge *sgl = NULL; + IOCB_t *icmd; + int numBdes = 0; + int i = 0; - switch (dev_grp) { - case LPFC_PCI_DEV_LP: - phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; - phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; - break; - default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1419 Invalid HBA PCI-device group: 0x%x\n", - dev_grp); - return -ENODEV; - break; - } - phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; - return 0; + if (!piocbq || !sglq) + return xritag; + + sgl = (struct sli4_sge *)sglq->sgl; + icmd = &piocbq->iocb; + if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { + numBdes = icmd->un.genreq64.bdl.bdeSize / + sizeof(struct ulp_bde64); + /* The addrHigh and addrLow fields within the IOCB + * have not been byteswapped yet so there is no + * need to swap them back. + */ + bpl = (struct ulp_bde64 *) + ((struct lpfc_dmabuf *)piocbq->context3)->virt; + + if (!bpl) + return xritag; + + for (i = 0; i < numBdes; i++) { + /* Should already be byte swapped. */ + sgl->addr_hi = bpl->addrHigh; + sgl->addr_lo = bpl->addrLow; + /* swap the size field back to the cpu so we + * can assign it to the sgl. + */ + bde.tus.w = le32_to_cpu(bpl->tus.w); + bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize); + if ((i+1) == numBdes) + bf_set(lpfc_sli4_sge_last, sgl, 1); + else + bf_set(lpfc_sli4_sge_last, sgl, 0); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->word3 = cpu_to_le32(sgl->word3); + bpl++; + sgl++; + } + } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { + /* The addrHigh and addrLow fields of the BDE have not + * been byteswapped yet so they need to be swapped + * before putting them in the sgl. + */ + sgl->addr_hi = + cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); + sgl->addr_lo = + cpu_to_le32(icmd->un.genreq64.bdl.addrLow); + bf_set(lpfc_sli4_sge_len, sgl, + icmd->un.genreq64.bdl.bdeSize); + bf_set(lpfc_sli4_sge_last, sgl, 1); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->word3 = cpu_to_le32(sgl->word3); + } + return sglq->sli4_xritag; } /** - * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb + * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution * @phba: Pointer to HBA context object. - * @pring: Pointer to driver SLI ring object. * @piocb: Pointer to command iocb. - * @flag: Flag indicating if this command can be put into txq. * - * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb - * function. This function gets the hbalock and calls - * __lpfc_sli_issue_iocb function and will return the error returned - * by __lpfc_sli_issue_iocb function. This wrapper is used by - * functions which do not hold hbalock. + * This routine performs a round robin SCSI command to SLI4 FCP WQ index + * distribution. + * + * Return: index into SLI4 fast-path FCP queue index. **/ -int -lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, - struct lpfc_iocbq *piocb, uint32_t flag) +static uint32_t +lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) { - unsigned long iflags; - int rc; - - spin_lock_irqsave(&phba->hbalock, iflags); - rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); - spin_unlock_irqrestore(&phba->hbalock, iflags); + static uint32_t fcp_qidx; - return rc; + return fcp_qidx++ % phba->cfg_fcp_wq_count; } /** - * lpfc_extra_ring_setup - Extra ring setup function + * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. * @phba: Pointer to HBA context object. + * @piocb: Pointer to command iocb. + * @wqe: Pointer to the work queue entry. * - * This function is called while driver attaches with the - * HBA to setup the extra ring. The extra ring is used - * only when driver needs to support target mode functionality - * or IP over FC functionalities. + * This routine converts the iocb command to its Work Queue Entry + * equivalent. The wqe pointer should not have any fields set when + * this routine is called because it will memcpy over them. + * This routine does not set the CQ_ID or the WQEC bits in the + * wqe. * - * This function is called with no lock held. + * Returns: 0 = Success, IOCB_ERROR = Failure. **/ static int -lpfc_extra_ring_setup( struct lpfc_hba *phba) +lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, + union lpfc_wqe *wqe) { - struct lpfc_sli *psli; - struct lpfc_sli_ring *pring; + uint32_t payload_len = 0; + uint8_t ct = 0; + uint32_t fip; + uint32_t abort_tag; + uint8_t command_type = ELS_COMMAND_NON_FIP; + uint8_t cmnd; + uint16_t xritag; + struct ulp_bde64 *bpl = NULL; + + fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags); + /* The fcp commands will set command type */ + if ((!(iocbq->iocb_flag & LPFC_IO_FCP)) && (!fip)) + command_type = ELS_COMMAND_NON_FIP; + else if (!(iocbq->iocb_flag & LPFC_IO_FCP)) + command_type = ELS_COMMAND_FIP; + else if (iocbq->iocb_flag & LPFC_IO_FCP) + command_type = FCP_COMMAND; + else { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2019 Invalid cmd 0x%x\n", + iocbq->iocb.ulpCommand); + return IOCB_ERROR; + } + /* Some of the fields are in the right position already */ + memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); + abort_tag = (uint32_t) iocbq->iotag; + xritag = iocbq->sli4_xritag; + wqe->words[7] = 0; /* The ct field has moved so reset */ + /* words0-2 bpl convert bde */ + if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { + bpl = (struct ulp_bde64 *) + ((struct lpfc_dmabuf *)iocbq->context3)->virt; + if (!bpl) + return IOCB_ERROR; - psli = &phba->sli; + /* Should already be byte swapped. */ + wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); + wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); + /* swap the size field back to the cpu so we + * can assign it to the sgl. + */ + wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); + payload_len = wqe->generic.bde.tus.f.bdeSize; + } else + payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; - /* Adjust cmd/rsp ring iocb entries more evenly */ + iocbq->iocb.ulpIoTag = iocbq->iotag; + cmnd = iocbq->iocb.ulpCommand; - /* Take some away from the FCP ring */ - pring = &psli->ring[psli->fcp_ring]; - pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; - pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; + switch (iocbq->iocb.ulpCommand) { + case CMD_ELS_REQUEST64_CR: + if (!iocbq->iocb.ulpLe) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2007 Only Limited Edition cmd Format" + " supported 0x%x\n", + iocbq->iocb.ulpCommand); + return IOCB_ERROR; + } + wqe->els_req.payload_len = payload_len; + /* Els_reguest64 has a TMO */ + bf_set(wqe_tmo, &wqe->els_req.wqe_com, + iocbq->iocb.ulpTimeout); + /* Need a VF for word 4 set the vf bit*/ + bf_set(els_req64_vf, &wqe->els_req, 0); + /* And a VFID for word 12 */ + bf_set(els_req64_vfid, &wqe->els_req, 0); + /* + * Set ct field to 3, indicates that the context_tag field + * contains the FCFI and remote N_Port_ID is + * in word 5. + */ + + ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); + bf_set(lpfc_wqe_gen_context, &wqe->generic, + iocbq->iocb.ulpContext); + + if (iocbq->vport->fc_myDID != 0) { + bf_set(els_req64_sid, &wqe->els_req, + iocbq->vport->fc_myDID); + bf_set(els_req64_sp, &wqe->els_req, 1); + } + bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct); + bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); + /* CCP CCPE PV PRI in word10 were set in the memcpy */ + break; + case CMD_XMIT_SEQUENCE64_CR: + /* word3 iocb=io_tag32 wqe=payload_offset */ + /* payload offset used for multilpe outstanding + * sequences on the same exchange + */ + wqe->words[3] = 0; + /* word4 relative_offset memcpy */ + /* word5 r_ctl/df_ctl memcpy */ + bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); + wqe->xmit_sequence.xmit_len = payload_len; + break; + case CMD_XMIT_BCAST64_CN: + /* word3 iocb=iotag32 wqe=payload_len */ + wqe->words[3] = 0; /* no definition for this in wqe */ + /* word4 iocb=rsvd wqe=rsvd */ + /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ + /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ + bf_set(lpfc_wqe_gen_ct, &wqe->generic, + ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); + break; + case CMD_FCP_IWRITE64_CR: + command_type = FCP_COMMAND_DATA_OUT; + /* The struct for wqe fcp_iwrite has 3 fields that are somewhat + * confusing. + * word3 is payload_len: byte offset to the sgl entry for the + * fcp_command. + * word4 is total xfer len, same as the IOCB->ulpParameter. + * word5 is initial xfer len 0 = wait for xfer-ready + */ + + /* Always wait for xfer-ready before sending data */ + wqe->fcp_iwrite.initial_xfer_len = 0; + /* word 4 (xfer length) should have been set on the memcpy */ + + /* allow write to fall through to read */ + case CMD_FCP_IREAD64_CR: + /* FCP_CMD is always the 1st sgl entry */ + wqe->fcp_iread.payload_len = + payload_len + sizeof(struct fcp_rsp); + + /* word 4 (xfer length) should have been set on the memcpy */ + + bf_set(lpfc_wqe_gen_erp, &wqe->generic, + iocbq->iocb.ulpFCP2Rcvy); + bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS); + /* The XC bit and the XS bit are similar. The driver never + * tracked whether or not the exchange was previouslly open. + * XC = Exchange create, 0 is create. 1 is already open. + * XS = link cmd: 1 do not close the exchange after command. + * XS = 0 close exchange when command completes. + * The only time we would not set the XC bit is when the XS bit + * is set and we are sending our 2nd or greater command on + * this exchange. + */ + + /* ALLOW read & write to fall through to ICMD64 */ + case CMD_FCP_ICMND64_CR: + /* Always open the exchange */ + bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); + + wqe->words[10] &= 0xffff0000; /* zero out ebde count */ + bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); + break; + case CMD_GEN_REQUEST64_CR: + /* word3 command length is described as byte offset to the + * rsp_data. Would always be 16, sizeof(struct sli4_sge) + * sgl[0] = cmnd + * sgl[1] = rsp. + * + */ + wqe->gen_req.command_len = payload_len; + /* Word4 parameter copied in the memcpy */ + /* Word5 [rctl, type, df_ctl, la] copied in memcpy */ + /* word6 context tag copied in memcpy */ + if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { + ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2015 Invalid CT %x command 0x%x\n", + ct, iocbq->iocb.ulpCommand); + return IOCB_ERROR; + } + bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0); + bf_set(wqe_tmo, &wqe->gen_req.wqe_com, + iocbq->iocb.ulpTimeout); + + bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); + command_type = OTHER_COMMAND; + break; + case CMD_XMIT_ELS_RSP64_CX: + /* words0-2 BDE memcpy */ + /* word3 iocb=iotag32 wqe=rsvd */ + wqe->words[3] = 0; + /* word4 iocb=did wge=rsvd. */ + wqe->words[4] = 0; + /* word5 iocb=rsvd wge=did */ + bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, + iocbq->iocb.un.elsreq64.remoteID); + + bf_set(lpfc_wqe_gen_ct, &wqe->generic, + ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); + + bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); + bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext); + if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) + bf_set(lpfc_wqe_gen_context, &wqe->generic, + iocbq->vport->vpi + phba->vpi_base); + command_type = OTHER_COMMAND; + break; + case CMD_CLOSE_XRI_CN: + case CMD_ABORT_XRI_CN: + case CMD_ABORT_XRI_CX: + /* words 0-2 memcpy should be 0 rserved */ + /* port will send abts */ + if (iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) + /* + * The link is down so the fw does not need to send abts + * on the wire. + */ + bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); + else + bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); + bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); + abort_tag = iocbq->iocb.un.acxri.abortIoTag; + wqe->words[5] = 0; + bf_set(lpfc_wqe_gen_ct, &wqe->generic, + ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); + abort_tag = iocbq->iocb.un.acxri.abortIoTag; + wqe->generic.abort_tag = abort_tag; + /* + * The abort handler will send us CMD_ABORT_XRI_CN or + * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX + */ + bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX); + cmnd = CMD_ABORT_XRI_CX; + command_type = OTHER_COMMAND; + xritag = 0; + break; + case CMD_XRI_ABORTED_CX: + case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ + /* words0-2 are all 0's no bde */ + /* word3 and word4 are rsvrd */ + wqe->words[3] = 0; + wqe->words[4] = 0; + /* word5 iocb=rsvd wge=did */ + /* There is no remote port id in the IOCB? */ + /* Let this fall through and fail */ + case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ + case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ + case CMD_FCP_TRSP64_CX: /* Target mode rcv */ + case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2014 Invalid command 0x%x\n", + iocbq->iocb.ulpCommand); + return IOCB_ERROR; + break; + + } + bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag); + bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag); + wqe->generic.abort_tag = abort_tag; + bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type); + bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd); + bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass); + bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT); + + return 0; +} + +/** + * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb + * @phba: Pointer to HBA context object. + * @ring_number: SLI ring number to issue iocb on. + * @piocb: Pointer to command iocb. + * @flag: Flag indicating if this command can be put into txq. + * + * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue + * an iocb command to an HBA with SLI-4 interface spec. + * + * This function is called with hbalock held. The function will return success + * after it successfully submit the iocb to firmware or after adding to the + * txq. + **/ +static int +__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, + struct lpfc_iocbq *piocb, uint32_t flag) +{ + struct lpfc_sglq *sglq; + uint16_t xritag; + union lpfc_wqe wqe; + struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; + uint32_t fcp_wqidx; + + if (piocb->sli4_xritag == NO_XRI) { + if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || + piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) + sglq = NULL; + else { + sglq = __lpfc_sli_get_sglq(phba); + if (!sglq) + return IOCB_ERROR; + piocb->sli4_xritag = sglq->sli4_xritag; + } + } else if (piocb->iocb_flag & LPFC_IO_FCP) { + sglq = NULL; /* These IO's already have an XRI and + * a mapped sgl. + */ + } else { + /* This is a continuation of a commandi,(CX) so this + * sglq is on the active list + */ + sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag); + if (!sglq) + return IOCB_ERROR; + } + + if (sglq) { + xritag = lpfc_sli4_bpl2sgl(phba, piocb, sglq); + if (xritag != sglq->sli4_xritag) + return IOCB_ERROR; + } + + if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) + return IOCB_ERROR; + + if (piocb->iocb_flag & LPFC_IO_FCP) { + fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb); + if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe)) + return IOCB_ERROR; + } else { + if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) + return IOCB_ERROR; + } + lpfc_sli_ringtxcmpl_put(phba, pring, piocb); + + return 0; +} + +/** + * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb + * + * This routine wraps the actual lockless version for issusing IOCB function + * pointer from the lpfc_hba struct. + * + * Return codes: + * IOCB_ERROR - Error + * IOCB_SUCCESS - Success + * IOCB_BUSY - Busy + **/ +static inline int +__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, + struct lpfc_iocbq *piocb, uint32_t flag) +{ + return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); +} + +/** + * lpfc_sli_api_table_setup - Set up sli api fucntion jump table + * @phba: The hba struct for which this call is being executed. + * @dev_grp: The HBA PCI-Device group number. + * + * This routine sets up the SLI interface API function jump table in @phba + * struct. + * Returns: 0 - success, -ENODEV - failure. + **/ +int +lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) +{ + + switch (dev_grp) { + case LPFC_PCI_DEV_LP: + phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; + phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; + break; + case LPFC_PCI_DEV_OC: + phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; + phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1419 Invalid HBA PCI-device group: 0x%x\n", + dev_grp); + return -ENODEV; + break; + } + phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; + return 0; +} + +/** + * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @piocb: Pointer to command iocb. + * @flag: Flag indicating if this command can be put into txq. + * + * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb + * function. This function gets the hbalock and calls + * __lpfc_sli_issue_iocb function and will return the error returned + * by __lpfc_sli_issue_iocb function. This wrapper is used by + * functions which do not hold hbalock. + **/ +int +lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, + struct lpfc_iocbq *piocb, uint32_t flag) +{ + unsigned long iflags; + int rc; + + spin_lock_irqsave(&phba->hbalock, iflags); + rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); + spin_unlock_irqrestore(&phba->hbalock, iflags); + + return rc; +} + +/** + * lpfc_extra_ring_setup - Extra ring setup function + * @phba: Pointer to HBA context object. + * + * This function is called while driver attaches with the + * HBA to setup the extra ring. The extra ring is used + * only when driver needs to support target mode functionality + * or IP over FC functionalities. + * + * This function is called with no lock held. + **/ +static int +lpfc_extra_ring_setup( struct lpfc_hba *phba) +{ + struct lpfc_sli *psli; + struct lpfc_sli_ring *pring; + + psli = &phba->sli; + + /* Adjust cmd/rsp ring iocb entries more evenly */ + + /* Take some away from the FCP ring */ + pring = &psli->ring[psli->fcp_ring]; + pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; + pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; @@ -7152,3 +8076,2461 @@ lpfc_sli_intr_handler(int irq, void *dev_id) /* Return device-level interrupt handling status */ return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; } /* lpfc_sli_intr_handler */ + +/** + * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked by the worker thread to process all the pending + * SLI4 FCP abort XRI events. + **/ +void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event; + + /* First, declare the fcp xri abort event has been handled */ + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~FCP_XRI_ABORT_EVENT; + spin_unlock_irq(&phba->hbalock); + /* Now, handle all the fcp xri abort events */ + while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) { + /* Get the first event from the head of the event queue */ + spin_lock_irq(&phba->hbalock); + list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, + cq_event, struct lpfc_cq_event, list); + spin_unlock_irq(&phba->hbalock); + /* Notify aborted XRI for FCP work queue */ + lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri); + /* Free the event processed back to the free pool */ + lpfc_sli4_cq_event_release(phba, cq_event); + } +} + +/** + * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked by the worker thread to process all the pending + * SLI4 els abort xri events. + **/ +void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event; + + /* First, declare the els xri abort event has been handled */ + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; + spin_unlock_irq(&phba->hbalock); + /* Now, handle all the els xri abort events */ + while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { + /* Get the first event from the head of the event queue */ + spin_lock_irq(&phba->hbalock); + list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, + cq_event, struct lpfc_cq_event, list); + spin_unlock_irq(&phba->hbalock); + /* Notify aborted XRI for ELS work queue */ + lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); + /* Free the event processed back to the free pool */ + lpfc_sli4_cq_event_release(phba, cq_event); + } +} + +static void +lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn, + struct lpfc_iocbq *pIocbOut, + struct lpfc_wcqe_complete *wcqe) +{ + size_t offset = offsetof(struct lpfc_iocbq, iocb); + + memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, + sizeof(struct lpfc_iocbq) - offset); + memset(&pIocbIn->sli4_info, 0, + sizeof(struct lpfc_sli4_rspiocb_info)); + /* Map WCQE parameters into irspiocb parameters */ + pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); + if (pIocbOut->iocb_flag & LPFC_IO_FCP) + if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) + pIocbIn->iocb.un.fcpi.fcpi_parm = + pIocbOut->iocb.un.fcpi.fcpi_parm - + wcqe->total_data_placed; + else + pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; + else + pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; + /* Load in additional WCQE parameters */ + pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe); + pIocbIn->sli4_info.bfield = 0; + if (bf_get(lpfc_wcqe_c_xb, wcqe)) + pIocbIn->sli4_info.bfield |= LPFC_XB; + if (bf_get(lpfc_wcqe_c_pv, wcqe)) { + pIocbIn->sli4_info.bfield |= LPFC_PV; + pIocbIn->sli4_info.priority = + bf_get(lpfc_wcqe_c_priority, wcqe); + } +} + +/** + * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event + * @phba: Pointer to HBA context object. + * @wcqe: Pointer to work-queue completion queue entry. + * + * This routine handles an ELS work-queue completion event. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, + struct lpfc_wcqe_complete *wcqe) +{ + struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + struct lpfc_iocbq *cmdiocbq; + struct lpfc_iocbq *irspiocbq; + unsigned long iflags; + bool workposted = false; + + spin_lock_irqsave(&phba->hbalock, iflags); + pring->stats.iocb_event++; + /* Look up the ELS command IOCB and create pseudo response IOCB */ + cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, + bf_get(lpfc_wcqe_c_request_tag, wcqe)); + spin_unlock_irqrestore(&phba->hbalock, iflags); + + if (unlikely(!cmdiocbq)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0386 ELS complete with no corresponding " + "cmdiocb: iotag (%d)\n", + bf_get(lpfc_wcqe_c_request_tag, wcqe)); + return workposted; + } + + /* Fake the irspiocbq and copy necessary response information */ + irspiocbq = lpfc_sli_get_iocbq(phba); + if (!irspiocbq) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0387 Failed to allocate an iocbq\n"); + return workposted; + } + lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe); + + /* Add the irspiocb to the response IOCB work list */ + spin_lock_irqsave(&phba->hbalock, iflags); + list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue); + /* Indicate ELS ring attention */ + phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING)); + spin_unlock_irqrestore(&phba->hbalock, iflags); + workposted = true; + + return workposted; +} + +/** + * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event + * @phba: Pointer to HBA context object. + * @wcqe: Pointer to work-queue completion queue entry. + * + * This routine handles slow-path WQ entry comsumed event by invoking the + * proper WQ release routine to the slow-path WQ. + **/ +static void +lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, + struct lpfc_wcqe_release *wcqe) +{ + /* Check for the slow-path ELS work queue */ + if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) + lpfc_sli4_wq_release(phba->sli4_hba.els_wq, + bf_get(lpfc_wcqe_r_wqe_index, wcqe)); + else + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "2579 Slow-path wqe consume event carries " + "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", + bf_get(lpfc_wcqe_r_wqe_index, wcqe), + phba->sli4_hba.els_wq->queue_id); +} + +/** + * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event + * @phba: Pointer to HBA context object. + * @cq: Pointer to a WQ completion queue. + * @wcqe: Pointer to work-queue completion queue entry. + * + * This routine handles an XRI abort event. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, + struct lpfc_queue *cq, + struct sli4_wcqe_xri_aborted *wcqe) +{ + bool workposted = false; + struct lpfc_cq_event *cq_event; + unsigned long iflags; + + /* Allocate a new internal CQ_EVENT entry */ + cq_event = lpfc_sli4_cq_event_alloc(phba); + if (!cq_event) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0602 Failed to allocate CQ_EVENT entry\n"); + return false; + } + + /* Move the CQE into the proper xri abort event list */ + memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); + switch (cq->subtype) { + case LPFC_FCP: + spin_lock_irqsave(&phba->hbalock, iflags); + list_add_tail(&cq_event->list, + &phba->sli4_hba.sp_fcp_xri_aborted_work_queue); + /* Set the fcp xri abort event flag */ + phba->hba_flag |= FCP_XRI_ABORT_EVENT; + spin_unlock_irqrestore(&phba->hbalock, iflags); + workposted = true; + break; + case LPFC_ELS: + spin_lock_irqsave(&phba->hbalock, iflags); + list_add_tail(&cq_event->list, + &phba->sli4_hba.sp_els_xri_aborted_work_queue); + /* Set the els xri abort event flag */ + phba->hba_flag |= ELS_XRI_ABORT_EVENT; + spin_unlock_irqrestore(&phba->hbalock, iflags); + workposted = true; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0603 Invalid work queue CQE subtype (x%x)\n", + cq->subtype); + workposted = false; + break; + } + return workposted; +} + +/** + * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry + * @phba: Pointer to HBA context object. + * @cq: Pointer to the completion queue. + * @wcqe: Pointer to a completion queue entry. + * + * This routine process a slow-path work-queue completion queue entry. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, + struct lpfc_cqe *cqe) +{ + struct lpfc_wcqe_complete wcqe; + bool workposted = false; + + /* Copy the work queue CQE and convert endian order if needed */ + lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); + + /* Check and process for different type of WCQE and dispatch */ + switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { + case CQE_CODE_COMPL_WQE: + /* Process the WQ complete event */ + workposted = lpfc_sli4_sp_handle_els_wcqe(phba, + (struct lpfc_wcqe_complete *)&wcqe); + break; + case CQE_CODE_RELEASE_WQE: + /* Process the WQ release event */ + lpfc_sli4_sp_handle_rel_wcqe(phba, + (struct lpfc_wcqe_release *)&wcqe); + break; + case CQE_CODE_XRI_ABORTED: + /* Process the WQ XRI abort event */ + workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, + (struct sli4_wcqe_xri_aborted *)&wcqe); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0388 Not a valid WCQE code: x%x\n", + bf_get(lpfc_wcqe_c_code, &wcqe)); + break; + } + return workposted; +} + +/** + * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry + * @phba: Pointer to HBA context object. + * @rcqe: Pointer to receive-queue completion queue entry. + * + * This routine process a receive-queue completion queue entry. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) +{ + struct lpfc_rcqe rcqe; + bool workposted = false; + struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; + struct lpfc_queue *drq = phba->sli4_hba.dat_rq; + struct hbq_dmabuf *dma_buf; + uint32_t status; + unsigned long iflags; + + /* Copy the receive queue CQE and convert endian order if needed */ + lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe)); + lpfc_sli4_rq_release(hrq, drq); + if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE) + goto out; + if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id) + goto out; + + status = bf_get(lpfc_rcqe_status, &rcqe); + switch (status) { + case FC_STATUS_RQ_BUF_LEN_EXCEEDED: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2537 Receive Frame Truncated!!\n"); + case FC_STATUS_RQ_SUCCESS: + spin_lock_irqsave(&phba->hbalock, iflags); + dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); + if (!dma_buf) { + spin_unlock_irqrestore(&phba->hbalock, iflags); + goto out; + } + memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe)); + /* save off the frame for the word thread to process */ + list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list); + /* Frame received */ + phba->hba_flag |= HBA_RECEIVE_BUFFER; + spin_unlock_irqrestore(&phba->hbalock, iflags); + workposted = true; + break; + case FC_STATUS_INSUFF_BUF_NEED_BUF: + case FC_STATUS_INSUFF_BUF_FRM_DISC: + /* Post more buffers if possible */ + spin_lock_irqsave(&phba->hbalock, iflags); + phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; + spin_unlock_irqrestore(&phba->hbalock, iflags); + workposted = true; + break; + } +out: + return workposted; + +} + +/** + * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry + * @phba: Pointer to HBA context object. + * @eqe: Pointer to fast-path event queue entry. + * + * This routine process a event queue entry from the slow-path event queue. + * It will check the MajorCode and MinorCode to determine this is for a + * completion event on a completion queue, if not, an error shall be logged + * and just return. Otherwise, it will get to the corresponding completion + * queue and process all the entries on that completion queue, rearm the + * completion queue, and then return. + * + **/ +static void +lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) +{ + struct lpfc_queue *cq = NULL, *childq, *speq; + struct lpfc_cqe *cqe; + bool workposted = false; + int ecount = 0; + uint16_t cqid; + + if (bf_get(lpfc_eqe_major_code, eqe) != 0 || + bf_get(lpfc_eqe_minor_code, eqe) != 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0359 Not a valid slow-path completion " + "event: majorcode=x%x, minorcode=x%x\n", + bf_get(lpfc_eqe_major_code, eqe), + bf_get(lpfc_eqe_minor_code, eqe)); + return; + } + + /* Get the reference to the corresponding CQ */ + cqid = bf_get(lpfc_eqe_resource_id, eqe); + + /* Search for completion queue pointer matching this cqid */ + speq = phba->sli4_hba.sp_eq; + list_for_each_entry(childq, &speq->child_list, list) { + if (childq->queue_id == cqid) { + cq = childq; + break; + } + } + if (unlikely(!cq)) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0365 Slow-path CQ identifier (%d) does " + "not exist\n", cqid); + return; + } + + /* Process all the entries to the CQ */ + switch (cq->type) { + case LPFC_MCQ: + while ((cqe = lpfc_sli4_cq_get(cq))) { + workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); + if (!(++ecount % LPFC_GET_QE_REL_INT)) + lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); + } + break; + case LPFC_WCQ: + while ((cqe = lpfc_sli4_cq_get(cq))) { + workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe); + if (!(++ecount % LPFC_GET_QE_REL_INT)) + lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); + } + break; + case LPFC_RCQ: + while ((cqe = lpfc_sli4_cq_get(cq))) { + workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe); + if (!(++ecount % LPFC_GET_QE_REL_INT)) + lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); + } + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0370 Invalid completion queue type (%d)\n", + cq->type); + return; + } + + /* Catch the no cq entry condition, log an error */ + if (unlikely(ecount == 0)) + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0371 No entry from the CQ: identifier " + "(x%x), type (%d)\n", cq->queue_id, cq->type); + + /* In any case, flash and re-arm the RCQ */ + lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); + + /* wake up worker thread if there are works to be done */ + if (workposted) + lpfc_worker_wake_up(phba); +} + +/** + * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry + * @eqe: Pointer to fast-path completion queue entry. + * + * This routine process a fast-path work queue completion entry from fast-path + * event queue for FCP command response completion. + **/ +static void +lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, + struct lpfc_wcqe_complete *wcqe) +{ + struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; + struct lpfc_iocbq *cmdiocbq; + struct lpfc_iocbq irspiocbq; + unsigned long iflags; + + spin_lock_irqsave(&phba->hbalock, iflags); + pring->stats.iocb_event++; + spin_unlock_irqrestore(&phba->hbalock, iflags); + + /* Check for response status */ + if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { + /* If resource errors reported from HBA, reduce queue + * depth of the SCSI device. + */ + if ((bf_get(lpfc_wcqe_c_status, wcqe) == + IOSTAT_LOCAL_REJECT) && + (wcqe->parameter == IOERR_NO_RESOURCES)) { + phba->lpfc_rampdown_queue_depth(phba); + } + /* Log the error status */ + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0373 FCP complete error: status=x%x, " + "hw_status=x%x, total_data_specified=%d, " + "parameter=x%x, word3=x%x\n", + bf_get(lpfc_wcqe_c_status, wcqe), + bf_get(lpfc_wcqe_c_hw_status, wcqe), + wcqe->total_data_placed, wcqe->parameter, + wcqe->word3); + } + + /* Look up the FCP command IOCB and create pseudo response IOCB */ + spin_lock_irqsave(&phba->hbalock, iflags); + cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, + bf_get(lpfc_wcqe_c_request_tag, wcqe)); + spin_unlock_irqrestore(&phba->hbalock, iflags); + if (unlikely(!cmdiocbq)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0374 FCP complete with no corresponding " + "cmdiocb: iotag (%d)\n", + bf_get(lpfc_wcqe_c_request_tag, wcqe)); + return; + } + if (unlikely(!cmdiocbq->iocb_cmpl)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0375 FCP cmdiocb not callback function " + "iotag: (%d)\n", + bf_get(lpfc_wcqe_c_request_tag, wcqe)); + return; + } + + /* Fake the irspiocb and copy necessary response information */ + lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe); + + /* Pass the cmd_iocb and the rsp state to the upper layer */ + (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); +} + +/** + * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event + * @phba: Pointer to HBA context object. + * @cq: Pointer to completion queue. + * @wcqe: Pointer to work-queue completion queue entry. + * + * This routine handles an fast-path WQ entry comsumed event by invoking the + * proper WQ release routine to the slow-path WQ. + **/ +static void +lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, + struct lpfc_wcqe_release *wcqe) +{ + struct lpfc_queue *childwq; + bool wqid_matched = false; + uint16_t fcp_wqid; + + /* Check for fast-path FCP work queue release */ + fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); + list_for_each_entry(childwq, &cq->child_list, list) { + if (childwq->queue_id == fcp_wqid) { + lpfc_sli4_wq_release(childwq, + bf_get(lpfc_wcqe_r_wqe_index, wcqe)); + wqid_matched = true; + break; + } + } + /* Report warning log message if no match found */ + if (wqid_matched != true) + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "2580 Fast-path wqe consume event carries " + "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid); +} + +/** + * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry + * @cq: Pointer to the completion queue. + * @eqe: Pointer to fast-path completion queue entry. + * + * This routine process a fast-path work queue completion entry from fast-path + * event queue for FCP command response completion. + **/ +static int +lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, + struct lpfc_cqe *cqe) +{ + struct lpfc_wcqe_release wcqe; + bool workposted = false; + + /* Copy the work queue CQE and convert endian order if needed */ + lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); + + /* Check and process for different type of WCQE and dispatch */ + switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { + case CQE_CODE_COMPL_WQE: + /* Process the WQ complete event */ + lpfc_sli4_fp_handle_fcp_wcqe(phba, + (struct lpfc_wcqe_complete *)&wcqe); + break; + case CQE_CODE_RELEASE_WQE: + /* Process the WQ release event */ + lpfc_sli4_fp_handle_rel_wcqe(phba, cq, + (struct lpfc_wcqe_release *)&wcqe); + break; + case CQE_CODE_XRI_ABORTED: + /* Process the WQ XRI abort event */ + workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, + (struct sli4_wcqe_xri_aborted *)&wcqe); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0144 Not a valid WCQE code: x%x\n", + bf_get(lpfc_wcqe_c_code, &wcqe)); + break; + } + return workposted; +} + +/** + * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry + * @phba: Pointer to HBA context object. + * @eqe: Pointer to fast-path event queue entry. + * + * This routine process a event queue entry from the fast-path event queue. + * It will check the MajorCode and MinorCode to determine this is for a + * completion event on a completion queue, if not, an error shall be logged + * and just return. Otherwise, it will get to the corresponding completion + * queue and process all the entries on the completion queue, rearm the + * completion queue, and then return. + **/ +static void +lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, + uint32_t fcp_cqidx) +{ + struct lpfc_queue *cq; + struct lpfc_cqe *cqe; + bool workposted = false; + uint16_t cqid; + int ecount = 0; + + if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) || + unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0366 Not a valid fast-path completion " + "event: majorcode=x%x, minorcode=x%x\n", + bf_get(lpfc_eqe_major_code, eqe), + bf_get(lpfc_eqe_minor_code, eqe)); + return; + } + + cq = phba->sli4_hba.fcp_cq[fcp_cqidx]; + if (unlikely(!cq)) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0367 Fast-path completion queue does not " + "exist\n"); + return; + } + + /* Get the reference to the corresponding CQ */ + cqid = bf_get(lpfc_eqe_resource_id, eqe); + if (unlikely(cqid != cq->queue_id)) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0368 Miss-matched fast-path completion " + "queue identifier: eqcqid=%d, fcpcqid=%d\n", + cqid, cq->queue_id); + return; + } + + /* Process all the entries to the CQ */ + while ((cqe = lpfc_sli4_cq_get(cq))) { + workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); + if (!(++ecount % LPFC_GET_QE_REL_INT)) + lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); + } + + /* Catch the no cq entry condition */ + if (unlikely(ecount == 0)) + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0369 No entry from fast-path completion " + "queue fcpcqid=%d\n", cq->queue_id); + + /* In any case, flash and re-arm the CQ */ + lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); + + /* wake up worker thread if there are works to be done */ + if (workposted) + lpfc_worker_wake_up(phba); +} + +static void +lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) +{ + struct lpfc_eqe *eqe; + + /* walk all the EQ entries and drop on the floor */ + while ((eqe = lpfc_sli4_eq_get(eq))) + ; + + /* Clear and re-arm the EQ */ + lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); +} + +/** + * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device + * @irq: Interrupt number. + * @dev_id: The device context pointer. + * + * This function is directly called from the PCI layer as an interrupt + * service routine when device with SLI-4 interface spec is enabled with + * MSI-X multi-message interrupt mode and there are slow-path events in + * the HBA. However, when the device is enabled with either MSI or Pin-IRQ + * interrupt mode, this function is called as part of the device-level + * interrupt handler. When the PCI slot is in error recovery or the HBA is + * undergoing initialization, the interrupt handler will not process the + * interrupt. The link attention and ELS ring attention events are handled + * by the worker thread. The interrupt handler signals the worker thread + * and returns for these events. This function is called without any lock + * held. It gets the hbalock to access and update SLI data structures. + * + * This function returns IRQ_HANDLED when interrupt is handled else it + * returns IRQ_NONE. + **/ +irqreturn_t +lpfc_sli4_sp_intr_handler(int irq, void *dev_id) +{ + struct lpfc_hba *phba; + struct lpfc_queue *speq; + struct lpfc_eqe *eqe; + unsigned long iflag; + int ecount = 0; + + /* + * Get the driver's phba structure from the dev_id + */ + phba = (struct lpfc_hba *)dev_id; + + if (unlikely(!phba)) + return IRQ_NONE; + + /* Get to the EQ struct associated with this vector */ + speq = phba->sli4_hba.sp_eq; + + /* Check device state for handling interrupt */ + if (unlikely(lpfc_intr_state_check(phba))) { + /* Check again for link_state with lock held */ + spin_lock_irqsave(&phba->hbalock, iflag); + if (phba->link_state < LPFC_LINK_DOWN) + /* Flush, clear interrupt, and rearm the EQ */ + lpfc_sli4_eq_flush(phba, speq); + spin_unlock_irqrestore(&phba->hbalock, iflag); + return IRQ_NONE; + } + + /* + * Process all the event on FCP slow-path EQ + */ + while ((eqe = lpfc_sli4_eq_get(speq))) { + lpfc_sli4_sp_handle_eqe(phba, eqe); + if (!(++ecount % LPFC_GET_QE_REL_INT)) + lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM); + } + + /* Always clear and re-arm the slow-path EQ */ + lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM); + + /* Catch the no cq entry condition */ + if (unlikely(ecount == 0)) { + if (phba->intr_type == MSIX) + /* MSI-X treated interrupt served as no EQ share INT */ + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0357 MSI-X interrupt with no EQE\n"); + else + /* Non MSI-X treated on interrupt as EQ share INT */ + return IRQ_NONE; + } + + return IRQ_HANDLED; +} /* lpfc_sli4_sp_intr_handler */ + +/** + * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device + * @irq: Interrupt number. + * @dev_id: The device context pointer. + * + * This function is directly called from the PCI layer as an interrupt + * service routine when device with SLI-4 interface spec is enabled with + * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB + * ring event in the HBA. However, when the device is enabled with either + * MSI or Pin-IRQ interrupt mode, this function is called as part of the + * device-level interrupt handler. When the PCI slot is in error recovery + * or the HBA is undergoing initialization, the interrupt handler will not + * process the interrupt. The SCSI FCP fast-path ring event are handled in + * the intrrupt context. This function is called without any lock held. + * It gets the hbalock to access and update SLI data structures. Note that, + * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is + * equal to that of FCP CQ index. + * + * This function returns IRQ_HANDLED when interrupt is handled else it + * returns IRQ_NONE. + **/ +irqreturn_t +lpfc_sli4_fp_intr_handler(int irq, void *dev_id) +{ + struct lpfc_hba *phba; + struct lpfc_fcp_eq_hdl *fcp_eq_hdl; + struct lpfc_queue *fpeq; + struct lpfc_eqe *eqe; + unsigned long iflag; + int ecount = 0; + uint32_t fcp_eqidx; + + /* Get the driver's phba structure from the dev_id */ + fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; + phba = fcp_eq_hdl->phba; + fcp_eqidx = fcp_eq_hdl->idx; + + if (unlikely(!phba)) + return IRQ_NONE; + + /* Get to the EQ struct associated with this vector */ + fpeq = phba->sli4_hba.fp_eq[fcp_eqidx]; + + /* Check device state for handling interrupt */ + if (unlikely(lpfc_intr_state_check(phba))) { + /* Check again for link_state with lock held */ + spin_lock_irqsave(&phba->hbalock, iflag); + if (phba->link_state < LPFC_LINK_DOWN) + /* Flush, clear interrupt, and rearm the EQ */ + lpfc_sli4_eq_flush(phba, fpeq); + spin_unlock_irqrestore(&phba->hbalock, iflag); + return IRQ_NONE; + } + + /* + * Process all the event on FCP fast-path EQ + */ + while ((eqe = lpfc_sli4_eq_get(fpeq))) { + lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx); + if (!(++ecount % LPFC_GET_QE_REL_INT)) + lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); + } + + /* Always clear and re-arm the fast-path EQ */ + lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); + + if (unlikely(ecount == 0)) { + if (phba->intr_type == MSIX) + /* MSI-X treated interrupt served as no EQ share INT */ + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0358 MSI-X interrupt with no EQE\n"); + else + /* Non MSI-X treated on interrupt as EQ share INT */ + return IRQ_NONE; + } + + return IRQ_HANDLED; +} /* lpfc_sli4_fp_intr_handler */ + +/** + * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device + * @irq: Interrupt number. + * @dev_id: The device context pointer. + * + * This function is the device-level interrupt handler to device with SLI-4 + * interface spec, called from the PCI layer when either MSI or Pin-IRQ + * interrupt mode is enabled and there is an event in the HBA which requires + * driver attention. This function invokes the slow-path interrupt attention + * handling function and fast-path interrupt attention handling function in + * turn to process the relevant HBA attention events. This function is called + * without any lock held. It gets the hbalock to access and update SLI data + * structures. + * + * This function returns IRQ_HANDLED when interrupt is handled, else it + * returns IRQ_NONE. + **/ +irqreturn_t +lpfc_sli4_intr_handler(int irq, void *dev_id) +{ + struct lpfc_hba *phba; + irqreturn_t sp_irq_rc, fp_irq_rc; + bool fp_handled = false; + uint32_t fcp_eqidx; + + /* Get the driver's phba structure from the dev_id */ + phba = (struct lpfc_hba *)dev_id; + + if (unlikely(!phba)) + return IRQ_NONE; + + /* + * Invokes slow-path host attention interrupt handling as appropriate. + */ + sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id); + + /* + * Invoke fast-path host attention interrupt handling as appropriate. + */ + for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { + fp_irq_rc = lpfc_sli4_fp_intr_handler(irq, + &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]); + if (fp_irq_rc == IRQ_HANDLED) + fp_handled |= true; + } + + return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc; +} /* lpfc_sli4_intr_handler */ + +/** + * lpfc_sli4_queue_free - free a queue structure and associated memory + * @queue: The queue structure to free. + * + * This function frees a queue structure and the DMAable memeory used for + * the host resident queue. This function must be called after destroying the + * queue on the HBA. + **/ +void +lpfc_sli4_queue_free(struct lpfc_queue *queue) +{ + struct lpfc_dmabuf *dmabuf; + + if (!queue) + return; + + while (!list_empty(&queue->page_list)) { + list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, + list); + dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE, + dmabuf->virt, dmabuf->phys); + kfree(dmabuf); + } + kfree(queue); + return; +} + +/** + * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure + * @phba: The HBA that this queue is being created on. + * @entry_size: The size of each queue entry for this queue. + * @entry count: The number of entries that this queue will handle. + * + * This function allocates a queue structure and the DMAable memory used for + * the host resident queue. This function must be called before creating the + * queue on the HBA. + **/ +struct lpfc_queue * +lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, + uint32_t entry_count) +{ + struct lpfc_queue *queue; + struct lpfc_dmabuf *dmabuf; + int x, total_qe_count; + void *dma_pointer; + + + queue = kzalloc(sizeof(struct lpfc_queue) + + (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); + if (!queue) + return NULL; + queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE; + INIT_LIST_HEAD(&queue->list); + INIT_LIST_HEAD(&queue->page_list); + INIT_LIST_HEAD(&queue->child_list); + for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { + dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!dmabuf) + goto out_fail; + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, + PAGE_SIZE, &dmabuf->phys, + GFP_KERNEL); + if (!dmabuf->virt) { + kfree(dmabuf); + goto out_fail; + } + dmabuf->buffer_tag = x; + list_add_tail(&dmabuf->list, &queue->page_list); + /* initialize queue's entry array */ + dma_pointer = dmabuf->virt; + for (; total_qe_count < entry_count && + dma_pointer < (PAGE_SIZE + dmabuf->virt); + total_qe_count++, dma_pointer += entry_size) { + queue->qe[total_qe_count].address = dma_pointer; + } + } + queue->entry_size = entry_size; + queue->entry_count = entry_count; + queue->phba = phba; + + return queue; +out_fail: + lpfc_sli4_queue_free(queue); + return NULL; +} + +/** + * lpfc_eq_create - Create an Event Queue on the HBA + * @phba: HBA structure that indicates port to create a queue on. + * @eq: The queue structure to use to create the event queue. + * @imax: The maximum interrupt per second limit. + * + * This function creates an event queue, as detailed in @eq, on a port, + * described by @phba by sending an EQ_CREATE mailbox command to the HBA. + * + * The @phba struct is used to send mailbox command to HBA. The @eq struct + * is used to get the entry count and entry size that are necessary to + * determine the number of pages to allocate and use for this queue. This + * function will send the EQ_CREATE mailbox command to the HBA to setup the + * event queue. This function is asynchronous and will wait for the mailbox + * command to finish before continuing. + * + * On success this function will return a zero. If unable to allocate enough + * memory this function will return ENOMEM. If the queue create mailbox command + * fails this function will return ENXIO. + **/ +uint32_t +lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax) +{ + struct lpfc_mbx_eq_create *eq_create; + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + struct lpfc_dmabuf *dmabuf; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + uint16_t dmult; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_eq_create) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_EQ_CREATE, + length, LPFC_SLI4_MBX_EMBED); + eq_create = &mbox->u.mqe.un.eq_create; + bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, + eq->page_count); + bf_set(lpfc_eq_context_size, &eq_create->u.request.context, + LPFC_EQE_SIZE); + bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); + /* Calculate delay multiper from maximum interrupt per second */ + dmult = LPFC_DMULT_CONST/imax - 1; + bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, + dmult); + switch (eq->entry_count) { + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0360 Unsupported EQ count. (%d)\n", + eq->entry_count); + if (eq->entry_count < 256) + return -EINVAL; + /* otherwise default to smallest count (drop through) */ + case 256: + bf_set(lpfc_eq_context_count, &eq_create->u.request.context, + LPFC_EQ_CNT_256); + break; + case 512: + bf_set(lpfc_eq_context_count, &eq_create->u.request.context, + LPFC_EQ_CNT_512); + break; + case 1024: + bf_set(lpfc_eq_context_count, &eq_create->u.request.context, + LPFC_EQ_CNT_1024); + break; + case 2048: + bf_set(lpfc_eq_context_count, &eq_create->u.request.context, + LPFC_EQ_CNT_2048); + break; + case 4096: + bf_set(lpfc_eq_context_count, &eq_create->u.request.context, + LPFC_EQ_CNT_4096); + break; + } + list_for_each_entry(dmabuf, &eq->page_list, list) { + eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = + putPaddrLow(dmabuf->phys); + eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = + putPaddrHigh(dmabuf->phys); + } + mbox->vport = phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->context1 = NULL; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2500 EQ_CREATE mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + } + eq->type = LPFC_EQ; + eq->subtype = LPFC_NONE; + eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); + if (eq->queue_id == 0xFFFF) + status = -ENXIO; + eq->host_index = 0; + eq->hba_index = 0; + + if (rc != MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_cq_create - Create a Completion Queue on the HBA + * @phba: HBA structure that indicates port to create a queue on. + * @cq: The queue structure to use to create the completion queue. + * @eq: The event queue to bind this completion queue to. + * + * This function creates a completion queue, as detailed in @wq, on a port, + * described by @phba by sending a CQ_CREATE mailbox command to the HBA. + * + * The @phba struct is used to send mailbox command to HBA. The @cq struct + * is used to get the entry count and entry size that are necessary to + * determine the number of pages to allocate and use for this queue. The @eq + * is used to indicate which event queue to bind this completion queue to. This + * function will send the CQ_CREATE mailbox command to the HBA to setup the + * completion queue. This function is asynchronous and will wait for the mailbox + * command to finish before continuing. + * + * On success this function will return a zero. If unable to allocate enough + * memory this function will return ENOMEM. If the queue create mailbox command + * fails this function will return ENXIO. + **/ +uint32_t +lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, + struct lpfc_queue *eq, uint32_t type, uint32_t subtype) +{ + struct lpfc_mbx_cq_create *cq_create; + struct lpfc_dmabuf *dmabuf; + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_cq_create) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_CQ_CREATE, + length, LPFC_SLI4_MBX_EMBED); + cq_create = &mbox->u.mqe.un.cq_create; + bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, + cq->page_count); + bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); + bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); + bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id); + switch (cq->entry_count) { + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0361 Unsupported CQ count. (%d)\n", + cq->entry_count); + if (cq->entry_count < 256) + return -EINVAL; + /* otherwise default to smallest count (drop through) */ + case 256: + bf_set(lpfc_cq_context_count, &cq_create->u.request.context, + LPFC_CQ_CNT_256); + break; + case 512: + bf_set(lpfc_cq_context_count, &cq_create->u.request.context, + LPFC_CQ_CNT_512); + break; + case 1024: + bf_set(lpfc_cq_context_count, &cq_create->u.request.context, + LPFC_CQ_CNT_1024); + break; + } + list_for_each_entry(dmabuf, &cq->page_list, list) { + cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = + putPaddrLow(dmabuf->phys); + cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = + putPaddrHigh(dmabuf->phys); + } + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2501 CQ_CREATE mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + goto out; + } + cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); + if (cq->queue_id == 0xFFFF) { + status = -ENXIO; + goto out; + } + /* link the cq onto the parent eq child list */ + list_add_tail(&cq->list, &eq->child_list); + /* Set up completion queue's type and subtype */ + cq->type = type; + cq->subtype = subtype; + cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); + cq->host_index = 0; + cq->hba_index = 0; +out: + + if (rc != MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_wq_create - Create a Work Queue on the HBA + * @phba: HBA structure that indicates port to create a queue on. + * @wq: The queue structure to use to create the work queue. + * @cq: The completion queue to bind this work queue to. + * @subtype: The subtype of the work queue indicating its functionality. + * + * This function creates a work queue, as detailed in @wq, on a port, described + * by @phba by sending a WQ_CREATE mailbox command to the HBA. + * + * The @phba struct is used to send mailbox command to HBA. The @wq struct + * is used to get the entry count and entry size that are necessary to + * determine the number of pages to allocate and use for this queue. The @cq + * is used to indicate which completion queue to bind this work queue to. This + * function will send the WQ_CREATE mailbox command to the HBA to setup the + * work queue. This function is asynchronous and will wait for the mailbox + * command to finish before continuing. + * + * On success this function will return a zero. If unable to allocate enough + * memory this function will return ENOMEM. If the queue create mailbox command + * fails this function will return ENXIO. + **/ +uint32_t +lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, + struct lpfc_queue *cq, uint32_t subtype) +{ + struct lpfc_mbx_wq_create *wq_create; + struct lpfc_dmabuf *dmabuf; + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_wq_create) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, + length, LPFC_SLI4_MBX_EMBED); + wq_create = &mbox->u.mqe.un.wq_create; + bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, + wq->page_count); + bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, + cq->queue_id); + list_for_each_entry(dmabuf, &wq->page_list, list) { + wq_create->u.request.page[dmabuf->buffer_tag].addr_lo = + putPaddrLow(dmabuf->phys); + wq_create->u.request.page[dmabuf->buffer_tag].addr_hi = + putPaddrHigh(dmabuf->phys); + } + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2503 WQ_CREATE mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + goto out; + } + wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response); + if (wq->queue_id == 0xFFFF) { + status = -ENXIO; + goto out; + } + wq->type = LPFC_WQ; + wq->subtype = subtype; + wq->host_index = 0; + wq->hba_index = 0; + + /* link the wq onto the parent cq child list */ + list_add_tail(&wq->list, &cq->child_list); +out: + if (rc == MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_rq_create - Create a Receive Queue on the HBA + * @phba: HBA structure that indicates port to create a queue on. + * @hrq: The queue structure to use to create the header receive queue. + * @drq: The queue structure to use to create the data receive queue. + * @cq: The completion queue to bind this work queue to. + * + * This function creates a receive buffer queue pair , as detailed in @hrq and + * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command + * to the HBA. + * + * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq + * struct is used to get the entry count that is necessary to determine the + * number of pages to use for this queue. The @cq is used to indicate which + * completion queue to bind received buffers that are posted to these queues to. + * This function will send the RQ_CREATE mailbox command to the HBA to setup the + * receive queue pair. This function is asynchronous and will wait for the + * mailbox command to finish before continuing. + * + * On success this function will return a zero. If unable to allocate enough + * memory this function will return ENOMEM. If the queue create mailbox command + * fails this function will return ENXIO. + **/ +uint32_t +lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, + struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) +{ + struct lpfc_mbx_rq_create *rq_create; + struct lpfc_dmabuf *dmabuf; + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + if (hrq->entry_count != drq->entry_count) + return -EINVAL; + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_rq_create) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, + length, LPFC_SLI4_MBX_EMBED); + rq_create = &mbox->u.mqe.un.rq_create; + switch (hrq->entry_count) { + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2535 Unsupported RQ count. (%d)\n", + hrq->entry_count); + if (hrq->entry_count < 512) + return -EINVAL; + /* otherwise default to smallest count (drop through) */ + case 512: + bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_512); + break; + case 1024: + bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_1024); + break; + case 2048: + bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_2048); + break; + case 4096: + bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_4096); + break; + } + bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, + cq->queue_id); + bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, + hrq->page_count); + bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, + LPFC_HDR_BUF_SIZE); + list_for_each_entry(dmabuf, &hrq->page_list, list) { + rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = + putPaddrLow(dmabuf->phys); + rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = + putPaddrHigh(dmabuf->phys); + } + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2504 RQ_CREATE mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + goto out; + } + hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); + if (hrq->queue_id == 0xFFFF) { + status = -ENXIO; + goto out; + } + hrq->type = LPFC_HRQ; + hrq->subtype = subtype; + hrq->host_index = 0; + hrq->hba_index = 0; + + /* now create the data queue */ + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, + length, LPFC_SLI4_MBX_EMBED); + switch (drq->entry_count) { + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2536 Unsupported RQ count. (%d)\n", + drq->entry_count); + if (drq->entry_count < 512) + return -EINVAL; + /* otherwise default to smallest count (drop through) */ + case 512: + bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_512); + break; + case 1024: + bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_1024); + break; + case 2048: + bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_2048); + break; + case 4096: + bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_4096); + break; + } + bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, + cq->queue_id); + bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, + drq->page_count); + bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, + LPFC_DATA_BUF_SIZE); + list_for_each_entry(dmabuf, &drq->page_list, list) { + rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = + putPaddrLow(dmabuf->phys); + rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = + putPaddrHigh(dmabuf->phys); + } + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + status = -ENXIO; + goto out; + } + drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); + if (drq->queue_id == 0xFFFF) { + status = -ENXIO; + goto out; + } + drq->type = LPFC_DRQ; + drq->subtype = subtype; + drq->host_index = 0; + drq->hba_index = 0; + + /* link the header and data RQs onto the parent cq child list */ + list_add_tail(&hrq->list, &cq->child_list); + list_add_tail(&drq->list, &cq->child_list); + +out: + if (rc != MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_eq_destroy - Destroy an event Queue on the HBA + * @eq: The queue structure associated with the queue to destroy. + * + * This function destroys a queue, as detailed in @eq by sending an mailbox + * command, specific to the type of queue, to the HBA. + * + * The @eq struct is used to get the queue ID of the queue to destroy. + * + * On success this function will return a zero. If the queue destroy mailbox + * command fails this function will return ENXIO. + **/ +uint32_t +lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) +{ + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + if (!eq) + return -ENODEV; + mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_eq_destroy) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_EQ_DESTROY, + length, LPFC_SLI4_MBX_EMBED); + bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, + eq->queue_id); + mbox->vport = eq->phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + + rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) + &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2505 EQ_DESTROY mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + } + + /* Remove eq from any list */ + list_del_init(&eq->list); + if (rc != MBX_TIMEOUT) + mempool_free(mbox, eq->phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_cq_destroy - Destroy a Completion Queue on the HBA + * @cq: The queue structure associated with the queue to destroy. + * + * This function destroys a queue, as detailed in @cq by sending an mailbox + * command, specific to the type of queue, to the HBA. + * + * The @cq struct is used to get the queue ID of the queue to destroy. + * + * On success this function will return a zero. If the queue destroy mailbox + * command fails this function will return ENXIO. + **/ +uint32_t +lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) +{ + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + if (!cq) + return -ENODEV; + mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_cq_destroy) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_CQ_DESTROY, + length, LPFC_SLI4_MBX_EMBED); + bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, + cq->queue_id); + mbox->vport = cq->phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) + &mbox->u.mqe.un.wq_create.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2506 CQ_DESTROY mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + } + /* Remove cq from any list */ + list_del_init(&cq->list); + if (rc != MBX_TIMEOUT) + mempool_free(mbox, cq->phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_wq_destroy - Destroy a Work Queue on the HBA + * @wq: The queue structure associated with the queue to destroy. + * + * This function destroys a queue, as detailed in @wq by sending an mailbox + * command, specific to the type of queue, to the HBA. + * + * The @wq struct is used to get the queue ID of the queue to destroy. + * + * On success this function will return a zero. If the queue destroy mailbox + * command fails this function will return ENXIO. + **/ +uint32_t +lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) +{ + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + if (!wq) + return -ENODEV; + mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_wq_destroy) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, + length, LPFC_SLI4_MBX_EMBED); + bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, + wq->queue_id); + mbox->vport = wq->phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); + shdr = (union lpfc_sli4_cfg_shdr *) + &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2508 WQ_DESTROY mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + } + /* Remove wq from any list */ + list_del_init(&wq->list); + if (rc != MBX_TIMEOUT) + mempool_free(mbox, wq->phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_rq_destroy - Destroy a Receive Queue on the HBA + * @rq: The queue structure associated with the queue to destroy. + * + * This function destroys a queue, as detailed in @rq by sending an mailbox + * command, specific to the type of queue, to the HBA. + * + * The @rq struct is used to get the queue ID of the queue to destroy. + * + * On success this function will return a zero. If the queue destroy mailbox + * command fails this function will return ENXIO. + **/ +uint32_t +lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, + struct lpfc_queue *drq) +{ + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + if (!hrq || !drq) + return -ENODEV; + mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_rq_destroy) - + sizeof(struct mbox_header)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, + length, LPFC_SLI4_MBX_EMBED); + bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, + hrq->queue_id); + mbox->vport = hrq->phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) + &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2509 RQ_DESTROY mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + if (rc != MBX_TIMEOUT) + mempool_free(mbox, hrq->phba->mbox_mem_pool); + return -ENXIO; + } + bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, + drq->queue_id); + rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); + shdr = (union lpfc_sli4_cfg_shdr *) + &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2510 RQ_DESTROY mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + } + list_del_init(&hrq->list); + list_del_init(&drq->list); + if (rc != MBX_TIMEOUT) + mempool_free(mbox, hrq->phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA + * @phba: The virtual port for which this call being executed. + * @pdma_phys_addr0: Physical address of the 1st SGL page. + * @pdma_phys_addr1: Physical address of the 2nd SGL page. + * @xritag: the xritag that ties this io to the SGL pages. + * + * This routine will post the sgl pages for the IO that has the xritag + * that is in the iocbq structure. The xritag is assigned during iocbq + * creation and persists for as long as the driver is loaded. + * if the caller has fewer than 256 scatter gather segments to map then + * pdma_phys_addr1 should be 0. + * If the caller needs to map more than 256 scatter gather segment then + * pdma_phys_addr1 should be a valid physical address. + * physical address for SGLs must be 64 byte aligned. + * If you are going to map 2 SGL's then the first one must have 256 entries + * the second sgl can have between 1 and 256 entries. + * + * Return codes: + * 0 - Success + * -ENXIO, -ENOMEM - Failure + **/ +int +lpfc_sli4_post_sgl(struct lpfc_hba *phba, + dma_addr_t pdma_phys_addr0, + dma_addr_t pdma_phys_addr1, + uint16_t xritag) +{ + struct lpfc_mbx_post_sgl_pages *post_sgl_pages; + LPFC_MBOXQ_t *mbox; + int rc; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + if (xritag == NO_XRI) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0364 Invalid param:\n"); + return -EINVAL; + } + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, + sizeof(struct lpfc_mbx_post_sgl_pages) - + sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED); + + post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) + &mbox->u.mqe.un.post_sgl_pages; + bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); + bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); + + post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = + cpu_to_le32(putPaddrLow(pdma_phys_addr0)); + post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = + cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); + + post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = + cpu_to_le32(putPaddrLow(pdma_phys_addr1)); + post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = + cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + else + rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (rc != MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2511 POST_SGL mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + rc = -ENXIO; + } + return 0; +} +/** + * lpfc_sli4_remove_all_sgl_pages - Post scatter gather list for an XRI to HBA + * @phba: The virtual port for which this call being executed. + * + * This routine will remove all of the sgl pages registered with the hba. + * + * Return codes: + * 0 - Success + * -ENXIO, -ENOMEM - Failure + **/ +int +lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mbox; + int rc; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES, 0, + LPFC_SLI4_MBX_EMBED); + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + else + rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) + &mbox->u.mqe.un.sli4_config.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (rc != MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2512 REMOVE_ALL_SGL_PAGES mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + rc = -ENXIO; + } + return rc; +} + +/** + * lpfc_sli4_next_xritag - Get an xritag for the io + * @phba: Pointer to HBA context object. + * + * This function gets an xritag for the iocb. If there is no unused xritag + * it will return 0xffff. + * The function returns the allocated xritag if successful, else returns zero. + * Zero is not a valid xritag. + * The caller is not required to hold any lock. + **/ +uint16_t +lpfc_sli4_next_xritag(struct lpfc_hba *phba) +{ + uint16_t xritag; + + spin_lock_irq(&phba->hbalock); + xritag = phba->sli4_hba.next_xri; + if ((xritag != (uint16_t) -1) && xritag < + (phba->sli4_hba.max_cfg_param.max_xri + + phba->sli4_hba.max_cfg_param.xri_base)) { + phba->sli4_hba.next_xri++; + phba->sli4_hba.max_cfg_param.xri_used++; + spin_unlock_irq(&phba->hbalock); + return xritag; + } + spin_unlock_irq(&phba->hbalock); + + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2004 Failed to allocate XRI.last XRITAG is %d" + " Max XRI is %d, Used XRI is %d\n", + phba->sli4_hba.next_xri, + phba->sli4_hba.max_cfg_param.max_xri, + phba->sli4_hba.max_cfg_param.xri_used); + return -1; +} + +/** + * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to post a block of driver's sgl pages to the + * HBA using non-embedded mailbox command. No Lock is held. This routine + * is only called when the driver is loading and after all IO has been + * stopped. + **/ +int +lpfc_sli4_post_sgl_list(struct lpfc_hba *phba) +{ + struct lpfc_sglq *sglq_entry; + struct lpfc_mbx_post_uembed_sgl_page1 *sgl; + struct sgl_page_pairs *sgl_pg_pairs; + void *viraddr; + LPFC_MBOXQ_t *mbox; + uint32_t reqlen, alloclen, pg_pairs; + uint32_t mbox_tmo; + uint16_t xritag_start = 0; + int els_xri_cnt, rc = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + /* The number of sgls to be posted */ + els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); + + reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) + + sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); + if (reqlen > PAGE_SIZE) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "2559 Block sgl registration required DMA " + "size (%d) great than a page\n", reqlen); + return -ENOMEM; + } + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2560 Failed to allocate mbox cmd memory\n"); + return -ENOMEM; + } + + /* Allocate DMA memory and set up the non-embedded mailbox command */ + alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, + LPFC_SLI4_MBX_NEMBED); + + if (alloclen < reqlen) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0285 Allocated DMA memory size (%d) is " + "less than the requested DMA memory " + "size (%d)\n", alloclen, reqlen); + lpfc_sli4_mbox_cmd_free(phba, mbox); + return -ENOMEM; + } + + /* Get the first SGE entry from the non-embedded DMA memory */ + if (unlikely(!mbox->sge_array)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "2525 Failed to get the non-embedded SGE " + "virtual address\n"); + lpfc_sli4_mbox_cmd_free(phba, mbox); + return -ENOMEM; + } + viraddr = mbox->sge_array->addr[0]; + + /* Set up the SGL pages in the non-embedded DMA pages */ + sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; + sgl_pg_pairs = &sgl->sgl_pg_pairs; + + for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) { + sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs]; + /* Set up the sge entry */ + sgl_pg_pairs->sgl_pg0_addr_lo = + cpu_to_le32(putPaddrLow(sglq_entry->phys)); + sgl_pg_pairs->sgl_pg0_addr_hi = + cpu_to_le32(putPaddrHigh(sglq_entry->phys)); + sgl_pg_pairs->sgl_pg1_addr_lo = + cpu_to_le32(putPaddrLow(0)); + sgl_pg_pairs->sgl_pg1_addr_hi = + cpu_to_le32(putPaddrHigh(0)); + /* Keep the first xritag on the list */ + if (pg_pairs == 0) + xritag_start = sglq_entry->sli4_xritag; + sgl_pg_pairs++; + } + bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); + pg_pairs = (pg_pairs > 0) ? (pg_pairs - 1) : pg_pairs; + bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); + /* Perform endian conversion if necessary */ + sgl->word0 = cpu_to_le32(sgl->word0); + + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + else { + mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); + } + shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (rc != MBX_TIMEOUT) + lpfc_sli4_mbox_cmd_free(phba, mbox); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2513 POST_SGL_BLOCK mailbox command failed " + "status x%x add_status x%x mbx status x%x\n", + shdr_status, shdr_add_status, rc); + rc = -ENXIO; + } + return rc; +} + +/** + * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware + * @phba: pointer to lpfc hba data structure. + * @sblist: pointer to scsi buffer list. + * @count: number of scsi buffers on the list. + * + * This routine is invoked to post a block of @count scsi sgl pages from a + * SCSI buffer list @sblist to the HBA using non-embedded mailbox command. + * No Lock is held. + * + **/ +int +lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist, + int cnt) +{ + struct lpfc_scsi_buf *psb; + struct lpfc_mbx_post_uembed_sgl_page1 *sgl; + struct sgl_page_pairs *sgl_pg_pairs; + void *viraddr; + LPFC_MBOXQ_t *mbox; + uint32_t reqlen, alloclen, pg_pairs; + uint32_t mbox_tmo; + uint16_t xritag_start = 0; + int rc = 0; + uint32_t shdr_status, shdr_add_status; + dma_addr_t pdma_phys_bpl1; + union lpfc_sli4_cfg_shdr *shdr; + + /* Calculate the requested length of the dma memory */ + reqlen = cnt * sizeof(struct sgl_page_pairs) + + sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); + if (reqlen > PAGE_SIZE) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0217 Block sgl registration required DMA " + "size (%d) great than a page\n", reqlen); + return -ENOMEM; + } + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0283 Failed to allocate mbox cmd memory\n"); + return -ENOMEM; + } + + /* Allocate DMA memory and set up the non-embedded mailbox command */ + alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, + LPFC_SLI4_MBX_NEMBED); + + if (alloclen < reqlen) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2561 Allocated DMA memory size (%d) is " + "less than the requested DMA memory " + "size (%d)\n", alloclen, reqlen); + lpfc_sli4_mbox_cmd_free(phba, mbox); + return -ENOMEM; + } + + /* Get the first SGE entry from the non-embedded DMA memory */ + if (unlikely(!mbox->sge_array)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "2565 Failed to get the non-embedded SGE " + "virtual address\n"); + lpfc_sli4_mbox_cmd_free(phba, mbox); + return -ENOMEM; + } + viraddr = mbox->sge_array->addr[0]; + + /* Set up the SGL pages in the non-embedded DMA pages */ + sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; + sgl_pg_pairs = &sgl->sgl_pg_pairs; + + pg_pairs = 0; + list_for_each_entry(psb, sblist, list) { + /* Set up the sge entry */ + sgl_pg_pairs->sgl_pg0_addr_lo = + cpu_to_le32(putPaddrLow(psb->dma_phys_bpl)); + sgl_pg_pairs->sgl_pg0_addr_hi = + cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl)); + if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) + pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; + else + pdma_phys_bpl1 = 0; + sgl_pg_pairs->sgl_pg1_addr_lo = + cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); + sgl_pg_pairs->sgl_pg1_addr_hi = + cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); + /* Keep the first xritag on the list */ + if (pg_pairs == 0) + xritag_start = psb->cur_iocbq.sli4_xritag; + sgl_pg_pairs++; + pg_pairs++; + } + bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); + bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); + /* Perform endian conversion if necessary */ + sgl->word0 = cpu_to_le32(sgl->word0); + + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + else { + mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); + } + shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (rc != MBX_TIMEOUT) + lpfc_sli4_mbox_cmd_free(phba, mbox); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2564 POST_SGL_BLOCK mailbox command failed " + "status x%x add_status x%x mbx status x%x\n", + shdr_status, shdr_add_status, rc); + rc = -ENXIO; + } + return rc; +} + +/** + * lpfc_fc_frame_check - Check that this frame is a valid frame to handle + * @phba: pointer to lpfc_hba struct that the frame was received on + * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) + * + * This function checks the fields in the @fc_hdr to see if the FC frame is a + * valid type of frame that the LPFC driver will handle. This function will + * return a zero if the frame is a valid frame or a non zero value when the + * frame does not pass the check. + **/ +static int +lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) +{ + char *rctl_names[] = FC_RCTL_NAMES_INIT; + char *type_names[] = FC_TYPE_NAMES_INIT; + struct fc_vft_header *fc_vft_hdr; + + switch (fc_hdr->fh_r_ctl) { + case FC_RCTL_DD_UNCAT: /* uncategorized information */ + case FC_RCTL_DD_SOL_DATA: /* solicited data */ + case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ + case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ + case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ + case FC_RCTL_DD_DATA_DESC: /* data descriptor */ + case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ + case FC_RCTL_DD_CMD_STATUS: /* command status */ + case FC_RCTL_ELS_REQ: /* extended link services request */ + case FC_RCTL_ELS_REP: /* extended link services reply */ + case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ + case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ + case FC_RCTL_BA_NOP: /* basic link service NOP */ + case FC_RCTL_BA_ABTS: /* basic link service abort */ + case FC_RCTL_BA_RMC: /* remove connection */ + case FC_RCTL_BA_ACC: /* basic accept */ + case FC_RCTL_BA_RJT: /* basic reject */ + case FC_RCTL_BA_PRMT: + case FC_RCTL_ACK_1: /* acknowledge_1 */ + case FC_RCTL_ACK_0: /* acknowledge_0 */ + case FC_RCTL_P_RJT: /* port reject */ + case FC_RCTL_F_RJT: /* fabric reject */ + case FC_RCTL_P_BSY: /* port busy */ + case FC_RCTL_F_BSY: /* fabric busy to data frame */ + case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ + case FC_RCTL_LCR: /* link credit reset */ + case FC_RCTL_END: /* end */ + break; + case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ + fc_vft_hdr = (struct fc_vft_header *)fc_hdr; + fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; + return lpfc_fc_frame_check(phba, fc_hdr); + default: + goto drop; + } + switch (fc_hdr->fh_type) { + case FC_TYPE_BLS: + case FC_TYPE_ELS: + case FC_TYPE_FCP: + case FC_TYPE_CT: + break; + case FC_TYPE_IP: + case FC_TYPE_ILS: + default: + goto drop; + } + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "2538 Received frame rctl:%s type:%s\n", + rctl_names[fc_hdr->fh_r_ctl], + type_names[fc_hdr->fh_type]); + return 0; +drop: + lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, + "2539 Dropped frame rctl:%s type:%s\n", + rctl_names[fc_hdr->fh_r_ctl], + type_names[fc_hdr->fh_type]); + return 1; +} + +/** + * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame + * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) + * + * This function processes the FC header to retrieve the VFI from the VF + * header, if one exists. This function will return the VFI if one exists + * or 0 if no VSAN Header exists. + **/ +static uint32_t +lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) +{ + struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; + + if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) + return 0; + return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); +} + +/** + * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to + * @phba: Pointer to the HBA structure to search for the vport on + * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) + * @fcfi: The FC Fabric ID that the frame came from + * + * This function searches the @phba for a vport that matches the content of the + * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the + * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function + * returns the matching vport pointer or NULL if unable to match frame to a + * vport. + **/ +static struct lpfc_vport * +lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, + uint16_t fcfi) +{ + struct lpfc_vport **vports; + struct lpfc_vport *vport = NULL; + int i; + uint32_t did = (fc_hdr->fh_d_id[0] << 16 | + fc_hdr->fh_d_id[1] << 8 | + fc_hdr->fh_d_id[2]); + + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) + for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + if (phba->fcf.fcfi == fcfi && + vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && + vports[i]->fc_myDID == did) { + vport = vports[i]; + break; + } + } + lpfc_destroy_vport_work_array(phba, vports); + return vport; +} + +/** + * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences + * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame + * + * This function searches through the existing incomplete sequences that have + * been sent to this @vport. If the frame matches one of the incomplete + * sequences then the dbuf in the @dmabuf is added to the list of frames that + * make up that sequence. If no sequence is found that matches this frame then + * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list + * This function returns a pointer to the first dmabuf in the sequence list that + * the frame was linked to. + **/ +static struct hbq_dmabuf * +lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) +{ + struct fc_frame_header *new_hdr; + struct fc_frame_header *temp_hdr; + struct lpfc_dmabuf *d_buf; + struct lpfc_dmabuf *h_buf; + struct hbq_dmabuf *seq_dmabuf = NULL; + struct hbq_dmabuf *temp_dmabuf = NULL; + + new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; + /* Use the hdr_buf to find the sequence that this frame belongs to */ + list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { + temp_hdr = (struct fc_frame_header *)h_buf->virt; + if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || + (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || + (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) + continue; + /* found a pending sequence that matches this frame */ + seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); + break; + } + if (!seq_dmabuf) { + /* + * This indicates first frame received for this sequence. + * Queue the buffer on the vport's rcv_buffer_list. + */ + list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); + return dmabuf; + } + temp_hdr = seq_dmabuf->hbuf.virt; + if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) { + list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list); + return dmabuf; + } + /* find the correct place in the sequence to insert this frame */ + list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { + temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); + temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; + /* + * If the frame's sequence count is greater than the frame on + * the list then insert the frame right after this frame + */ + if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) { + list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); + return seq_dmabuf; + } + } + return NULL; +} + +/** + * lpfc_seq_complete - Indicates if a sequence is complete + * @dmabuf: pointer to a dmabuf that describes the FC sequence + * + * This function checks the sequence, starting with the frame described by + * @dmabuf, to see if all the frames associated with this sequence are present. + * the frames associated with this sequence are linked to the @dmabuf using the + * dbuf list. This function looks for two major things. 1) That the first frame + * has a sequence count of zero. 2) There is a frame with last frame of sequence + * set. 3) That there are no holes in the sequence count. The function will + * return 1 when the sequence is complete, otherwise it will return 0. + **/ +static int +lpfc_seq_complete(struct hbq_dmabuf *dmabuf) +{ + struct fc_frame_header *hdr; + struct lpfc_dmabuf *d_buf; + struct hbq_dmabuf *seq_dmabuf; + uint32_t fctl; + int seq_count = 0; + + hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; + /* make sure first fame of sequence has a sequence count of zero */ + if (hdr->fh_seq_cnt != seq_count) + return 0; + fctl = (hdr->fh_f_ctl[0] << 16 | + hdr->fh_f_ctl[1] << 8 | + hdr->fh_f_ctl[2]); + /* If last frame of sequence we can return success. */ + if (fctl & FC_FC_END_SEQ) + return 1; + list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { + seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); + hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; + /* If there is a hole in the sequence count then fail. */ + if (++seq_count != hdr->fh_seq_cnt) + return 0; + fctl = (hdr->fh_f_ctl[0] << 16 | + hdr->fh_f_ctl[1] << 8 | + hdr->fh_f_ctl[2]); + /* If last frame of sequence we can return success. */ + if (fctl & FC_FC_END_SEQ) + return 1; + } + return 0; +} + +/** + * lpfc_prep_seq - Prep sequence for ULP processing + * @vport: Pointer to the vport on which this sequence was received + * @dmabuf: pointer to a dmabuf that describes the FC sequence + * + * This function takes a sequence, described by a list of frames, and creates + * a list of iocbq structures to describe the sequence. This iocbq list will be + * used to issue to the generic unsolicited sequence handler. This routine + * returns a pointer to the first iocbq in the list. If the function is unable + * to allocate an iocbq then it throw out the received frames that were not + * able to be described and return a pointer to the first iocbq. If unable to + * allocate any iocbqs (including the first) this function will return NULL. + **/ +static struct lpfc_iocbq * +lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) +{ + struct lpfc_dmabuf *d_buf, *n_buf; + struct lpfc_iocbq *first_iocbq, *iocbq; + struct fc_frame_header *fc_hdr; + uint32_t sid; + + fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; + /* remove from receive buffer list */ + list_del_init(&seq_dmabuf->hbuf.list); + /* get the Remote Port's SID */ + sid = (fc_hdr->fh_s_id[0] << 16 | + fc_hdr->fh_s_id[1] << 8 | + fc_hdr->fh_s_id[2]); + /* Get an iocbq struct to fill in. */ + first_iocbq = lpfc_sli_get_iocbq(vport->phba); + if (first_iocbq) { + /* Initialize the first IOCB. */ + first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; + first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; + first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id); + first_iocbq->iocb.unsli3.rcvsli3.vpi = + vport->vpi + vport->phba->vpi_base; + /* put the first buffer into the first IOCBq */ + first_iocbq->context2 = &seq_dmabuf->dbuf; + first_iocbq->context3 = NULL; + first_iocbq->iocb.ulpBdeCount = 1; + first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = + LPFC_DATA_BUF_SIZE; + first_iocbq->iocb.un.rcvels.remoteID = sid; + } + iocbq = first_iocbq; + /* + * Each IOCBq can have two Buffers assigned, so go through the list + * of buffers for this sequence and save two buffers in each IOCBq + */ + list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { + if (!iocbq) { + lpfc_in_buf_free(vport->phba, d_buf); + continue; + } + if (!iocbq->context3) { + iocbq->context3 = d_buf; + iocbq->iocb.ulpBdeCount++; + iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize = + LPFC_DATA_BUF_SIZE; + } else { + iocbq = lpfc_sli_get_iocbq(vport->phba); + if (!iocbq) { + if (first_iocbq) { + first_iocbq->iocb.ulpStatus = + IOSTAT_FCP_RSP_ERROR; + first_iocbq->iocb.un.ulpWord[4] = + IOERR_NO_RESOURCES; + } + lpfc_in_buf_free(vport->phba, d_buf); + continue; + } + iocbq->context2 = d_buf; + iocbq->context3 = NULL; + iocbq->iocb.ulpBdeCount = 1; + iocbq->iocb.un.cont64[0].tus.f.bdeSize = + LPFC_DATA_BUF_SIZE; + iocbq->iocb.un.rcvels.remoteID = sid; + list_add_tail(&iocbq->list, &first_iocbq->list); + } + } + return first_iocbq; +} + +/** + * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware + * @phba: Pointer to HBA context object. + * + * This function is called with no lock held. This function processes all + * the received buffers and gives it to upper layers when a received buffer + * indicates that it is the final frame in the sequence. The interrupt + * service routine processes received buffers at interrupt contexts and adds + * received dma buffers to the rb_pend_list queue and signals the worker thread. + * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the + * appropriate receive function when the final frame in a sequence is received. + **/ +int +lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba) +{ + LIST_HEAD(cmplq); + struct hbq_dmabuf *dmabuf, *seq_dmabuf; + struct fc_frame_header *fc_hdr; + struct lpfc_vport *vport; + uint32_t fcfi; + struct lpfc_iocbq *iocbq; + + /* Clear hba flag and get all received buffers into the cmplq */ + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~HBA_RECEIVE_BUFFER; + list_splice_init(&phba->rb_pend_list, &cmplq); + spin_unlock_irq(&phba->hbalock); + + /* Process each received buffer */ + while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) { + fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; + /* check to see if this a valid type of frame */ + if (lpfc_fc_frame_check(phba, fc_hdr)) { + lpfc_in_buf_free(phba, &dmabuf->dbuf); + continue; + } + fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe); + vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); + if (!vport) { + /* throw out the frame */ + lpfc_in_buf_free(phba, &dmabuf->dbuf); + continue; + } + /* Link this frame */ + seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); + if (!seq_dmabuf) { + /* unable to add frame to vport - throw it out */ + lpfc_in_buf_free(phba, &dmabuf->dbuf); + continue; + } + /* If not last frame in sequence continue processing frames. */ + if (!lpfc_seq_complete(seq_dmabuf)) { + /* + * When saving off frames post a new one and mark this + * frame to be freed when it is finished. + **/ + lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1); + dmabuf->tag = -1; + continue; + } + fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; + iocbq = lpfc_prep_seq(vport, seq_dmabuf); + if (!lpfc_complete_unsol_iocb(phba, + &phba->sli.ring[LPFC_ELS_RING], + iocbq, fc_hdr->fh_r_ctl, + fc_hdr->fh_type)) + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "2540 Ring %d handler: unexpected Rctl " + "x%x Type x%x received\n", + LPFC_ELS_RING, + fc_hdr->fh_r_ctl, fc_hdr->fh_type); + }; + return 0; +} -- cgit v1.2.3 From 04c684968487eb4f98728363a97b8da48f3bb958 Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 22 May 2009 14:52:52 -0400 Subject: [SCSI] lpfc 8.3.2 : Addition of SLI4 Interface - Mailbox handling The mailbox commands themselves are the same, or very similar to their SLI3 counterparts. This patch genericizes mailbox command handling and adds support for the new SLI4 mailbox queue. Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_attr.c | 47 +-- drivers/scsi/lpfc/lpfc_crtn.h | 1 + drivers/scsi/lpfc/lpfc_els.c | 6 +- drivers/scsi/lpfc/lpfc_hbadisc.c | 22 +- drivers/scsi/lpfc/lpfc_init.c | 22 +- drivers/scsi/lpfc/lpfc_mbox.c | 632 ++++++++++++++++++++++++++++++++++--- drivers/scsi/lpfc/lpfc_nportdisc.c | 13 +- drivers/scsi/lpfc/lpfc_sli.c | 505 +++++++++++++++++++++++++---- 8 files changed, 1101 insertions(+), 147 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 82016fc672b1..463104d96867 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -507,12 +507,14 @@ lpfc_issue_lip(struct Scsi_Host *shost) return -ENOMEM; memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); - pmboxq->mb.mbxCommand = MBX_DOWN_LINK; - pmboxq->mb.mbxOwner = OWN_HOST; + pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; + pmboxq->u.mb.mbxOwner = OWN_HOST; mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); - if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) { + if ((mbxstatus == MBX_SUCCESS) && + (pmboxq->u.mb.mbxStatus == 0 || + pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) { memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); lpfc_init_link(phba, pmboxq, phba->cfg_topology, phba->cfg_link_speed); @@ -791,7 +793,8 @@ lpfc_get_hba_info(struct lpfc_hba *phba, uint32_t *mrpi, uint32_t *arpi, uint32_t *mvpi, uint32_t *avpi) { - struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_mbx_read_config *rd_config; LPFC_MBOXQ_t *pmboxq; MAILBOX_t *pmb; int rc = 0; @@ -813,7 +816,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba, return 0; memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); - pmb = &pmboxq->mb; + pmb = &pmboxq->u.mb; pmb->mbxCommand = MBX_READ_CONFIG; pmb->mbxOwner = OWN_HOST; pmboxq->context1 = NULL; @@ -3247,7 +3250,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr, } } - memcpy((uint8_t *) & phba->sysfs_mbox.mbox->mb + off, + memcpy((uint8_t *) &phba->sysfs_mbox.mbox->u.mb + off, buf, count); phba->sysfs_mbox.offset = off + count; @@ -3289,6 +3292,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int rc; + MAILBOX_t *pmb; if (off > MAILBOX_CMD_SIZE) return -ERANGE; @@ -3313,8 +3317,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, if (off == 0 && phba->sysfs_mbox.state == SMBOX_WRITING && phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) { - - switch (phba->sysfs_mbox.mbox->mb.mbxCommand) { + pmb = &phba->sysfs_mbox.mbox->u.mb; + switch (pmb->mbxCommand) { /* Offline only */ case MBX_INIT_LINK: case MBX_DOWN_LINK: @@ -3331,7 +3335,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, if (!(vport->fc_flag & FC_OFFLINE_MODE)) { printk(KERN_WARNING "mbox_read:Command 0x%x " "is illegal in on-line state\n", - phba->sysfs_mbox.mbox->mb.mbxCommand); + pmb->mbxCommand); sysfs_mbox_idle(phba); spin_unlock_irq(&phba->hbalock); return -EPERM; @@ -3367,13 +3371,13 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, case MBX_CONFIG_PORT: case MBX_RUN_BIU_DIAG: printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n", - phba->sysfs_mbox.mbox->mb.mbxCommand); + pmb->mbxCommand); sysfs_mbox_idle(phba); spin_unlock_irq(&phba->hbalock); return -EPERM; default: printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n", - phba->sysfs_mbox.mbox->mb.mbxCommand); + pmb->mbxCommand); sysfs_mbox_idle(phba); spin_unlock_irq(&phba->hbalock); return -EPERM; @@ -3383,14 +3387,14 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, * or RESTART mailbox commands until the HBA is restarted. */ if (phba->pport->stopped && - phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_DUMP_MEMORY && - phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_RESTART && - phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_VPARMS && - phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_WWN) + pmb->mbxCommand != MBX_DUMP_MEMORY && + pmb->mbxCommand != MBX_RESTART && + pmb->mbxCommand != MBX_WRITE_VPARMS && + pmb->mbxCommand != MBX_WRITE_WWN) lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, "1259 mbox: Issued mailbox cmd " "0x%x while in stopped state.\n", - phba->sysfs_mbox.mbox->mb.mbxCommand); + pmb->mbxCommand); phba->sysfs_mbox.mbox->vport = vport; @@ -3416,8 +3420,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, spin_unlock_irq(&phba->hbalock); rc = lpfc_sli_issue_mbox_wait (phba, phba->sysfs_mbox.mbox, - lpfc_mbox_tmo_val(phba, - phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ); + lpfc_mbox_tmo_val(phba, pmb->mbxCommand) * HZ); spin_lock_irq(&phba->hbalock); } @@ -3439,7 +3442,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, return -EAGAIN; } - memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count); + memcpy(buf, (uint8_t *) &pmb + off, count); phba->sysfs_mbox.offset = off + count; @@ -3711,14 +3714,14 @@ lpfc_get_stats(struct Scsi_Host *shost) return NULL; memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); - pmb = &pmboxq->mb; + pmb = &pmboxq->u.mb; pmb->mbxCommand = MBX_READ_STATUS; pmb->mbxOwner = OWN_HOST; pmboxq->context1 = NULL; pmboxq->vport = vport; if ((vport->fc_flag & FC_OFFLINE_MODE) || - (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) + (!(psli->sli_flag & LPFC_SLI_ACTIVE))) rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); @@ -3817,7 +3820,7 @@ lpfc_reset_stats(struct Scsi_Host *shost) return; memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); - pmb = &pmboxq->mb; + pmb = &pmboxq->u.mb; pmb->mbxCommand = MBX_READ_STATUS; pmb->mbxOwner = OWN_HOST; pmb->un.varWords[0] = 0x1; /* reset request */ diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 3802e455734f..e0f1cd4b3d3d 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -209,6 +209,7 @@ void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *); uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t); +void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_reset_barrier(struct lpfc_hba * phba); int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 9fe36bf6fd14..2c034a554c88 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -4277,7 +4277,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, lpfc_init_link(phba, mbox, phba->cfg_topology, phba->cfg_link_speed); - mbox->mb.un.varInitLnk.lipsr_AL_PA = 0; + mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->vport = vport; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); @@ -4426,7 +4426,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) uint16_t xri, status; uint32_t cmdsize; - mb = &pmb->mb; + mb = &pmb->u.mb; ndlp = (struct lpfc_nodelist *) pmb->context2; xri = (uint16_t) ((unsigned long)(pmb->context1)); @@ -5755,7 +5755,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 0fc66005d545..2270d9a7c8e3 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -879,7 +879,7 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_sli *psli = &phba->sli; - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; uint32_t control; /* Since we don't do discovery right now, turn these off here */ @@ -942,7 +942,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; - if (pmb->mb.mbxStatus) + if (pmb->u.mb.mbxStatus) goto out; mempool_free(pmb, phba->mbox_mem_pool); @@ -970,7 +970,7 @@ out: lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "0306 CONFIG_LINK mbxStatus error x%x " "HBA state x%x\n", - pmb->mb.mbxStatus, vport->port_state); + pmb->u.mb.mbxStatus, vport->port_state); mempool_free(pmb, phba->mbox_mem_pool); lpfc_linkdown(phba); @@ -1202,7 +1202,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); READ_LA_VAR *la; - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); /* Unblock ELS traffic */ @@ -1217,7 +1217,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) goto lpfc_mbx_cmpl_read_la_free_mbuf; } - la = (READ_LA_VAR *) & pmb->mb.un.varReadLA; + la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; memcpy(&phba->alpa_map[0], mp->virt, 128); @@ -1355,7 +1355,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) static void lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); @@ -1408,7 +1408,7 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; switch (mb->mbxStatus) { case 0x0011: @@ -2279,7 +2279,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ if ((mb = phba->sli.mbox_active)) { - if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && + if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { mb->context2 = NULL; mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; @@ -2288,7 +2288,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { - if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && + if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { mp = (struct lpfc_dmabuf *) (mb->context1); if (mp) { @@ -2970,7 +2970,7 @@ restart_disc: lpfc_linkdown(phba); lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, phba->cfg_link_speed); - initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0; + initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; initlinkmbox->vport = vport; initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT); @@ -3069,7 +3069,7 @@ restart_disc: void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; struct lpfc_vport *vport = pmb->vport; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index e9e4a1df8989..ff821bb77167 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -108,7 +108,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba) return -ENOMEM; } - mb = &pmb->mb; + mb = &pmb->u.mb; phba->link_state = LPFC_INIT_MBX_CMDS; if (lpfc_is_LC_HBA(phba->pcidev->device)) { @@ -221,6 +221,11 @@ lpfc_config_port_prep(struct lpfc_hba *phba) mb->mbxCommand, mb->mbxStatus); mb->un.varDmp.word_cnt = 0; } + /* dump mem may return a zero when finished or we got a + * mailbox error, either way we are done. + */ + if (mb->un.varDmp.word_cnt == 0) + break; if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, @@ -249,7 +254,7 @@ out_free_mbox: static void lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) { - if (pmboxq->mb.mbxStatus == MBX_SUCCESS) + if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) phba->temp_sensor_support = 1; else phba->temp_sensor_support = 0; @@ -276,7 +281,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) /* character array used for decoding dist type. */ char dist_char[] = "nabx"; - if (pmboxq->mb.mbxStatus != MBX_SUCCESS) { + if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { mempool_free(pmboxq, phba->mbox_mem_pool); return; } @@ -284,7 +289,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) prg = (struct prog_id *) &prog_id_word; /* word 7 contain option rom version */ - prog_id_word = pmboxq->mb.un.varWords[7]; + prog_id_word = pmboxq->u.mb.un.varWords[7]; /* Decode the Option rom version word to a readable string */ if (prg->dist < 4) @@ -341,7 +346,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) phba->link_state = LPFC_HBA_ERROR; return -ENOMEM; } - mb = &pmb->mb; + mb = &pmb->u.mb; /* Get login parameters for NID. */ lpfc_read_sparam(phba, pmb, 0); @@ -476,17 +481,18 @@ lpfc_config_port_post(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "0352 Config MSI mailbox command " "failed, mbxCmd x%x, mbxStatus x%x\n", - pmb->mb.mbxCommand, pmb->mb.mbxStatus); + pmb->u.mb.mbxCommand, + pmb->u.mb.mbxStatus); mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } } + spin_lock_irq(&phba->hbalock); /* Initialize ERATT handling flag */ phba->hba_flag &= ~HBA_ERATT_HANDLED; /* Enable appropriate host interrupts */ - spin_lock_irq(&phba->hbalock); status = readl(phba->HCregaddr); status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; if (psli->num_rings > 0) @@ -2201,7 +2207,7 @@ lpfc_offline_prep(struct lpfc_hba * phba) } lpfc_destroy_vport_work_array(phba, vports); - lpfc_sli_flush_mbox_queue(phba); + lpfc_sli_mbox_sys_shutdown(phba); } /** diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 7f5899b70bd2..6aeb1c668e22 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -60,7 +60,7 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset) MAILBOX_t *mb; void *ctx; - mb = &pmb->mb; + mb = &pmb->u.mb; ctx = pmb->context2; /* Setup to dump VPD region */ @@ -92,7 +92,7 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) MAILBOX_t *mb; void *ctx; - mb = &pmb->mb; + mb = &pmb->u.mb; /* Save context so that we can restore after memset */ ctx = pmb->context2; @@ -127,7 +127,7 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; - mb = &pmb->mb; + mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_READ_NV; mb->mbxOwner = OWN_HOST; @@ -153,7 +153,7 @@ lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, { MAILBOX_t *mb; - mb = &pmb->mb; + mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_ASYNCEVT_ENABLE; mb->un.varCfgAsyncEvent.ring = ring; @@ -179,7 +179,7 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; - mb = &pmb->mb; + mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_HEARTBEAT; mb->mbxOwner = OWN_HOST; @@ -213,7 +213,7 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp) struct lpfc_sli *psli; psli = &phba->sli; - mb = &pmb->mb; + mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); INIT_LIST_HEAD(&mp->list); @@ -250,7 +250,7 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; - mb = &pmb->mb; + mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varClearLA.eventTag = phba->fc_eventTag; @@ -277,7 +277,7 @@ void lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { struct lpfc_vport *vport = phba->pport; - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); /* NEW_FEATURE @@ -323,7 +323,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) int lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; uint32_t attentionConditions[2]; /* Sanity check */ @@ -407,7 +407,7 @@ lpfc_init_link(struct lpfc_hba * phba, struct lpfc_sli *psli; MAILBOX_t *mb; - mb = &pmb->mb; + mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); psli = &phba->sli; @@ -494,7 +494,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi) struct lpfc_sli *psli; psli = &phba->sli; - mb = &pmb->mb; + mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxOwner = OWN_HOST; @@ -517,7 +517,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi) mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys); mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys); - mb->un.varRdSparm.vpi = vpi; + mb->un.varRdSparm.vpi = vpi + phba->vpi_base; /* save address for completion */ pmb->context1 = mp; @@ -546,10 +546,12 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did, { MAILBOX_t *mb; - mb = &pmb->mb; + mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varUnregDID.did = did; + if (vpi != 0xffff) + vpi += phba->vpi_base; mb->un.varUnregDID.vpi = vpi; mb->mbxCommand = MBX_UNREG_D_ID; @@ -575,7 +577,7 @@ lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; - mb = &pmb->mb; + mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_READ_CONFIG; @@ -600,7 +602,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; - mb = &pmb->mb; + mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_READ_LNK_STAT; @@ -609,7 +611,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) } /** - * lpfc_reg_login - Prepare a mailbox command for registering remote login + * lpfc_reg_rpi - Prepare a mailbox command for registering remote login * @phba: pointer to lpfc hba data structure. * @vpi: virtual N_Port identifier. * @did: remote port identifier. @@ -633,17 +635,23 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) * 1 - DMA memory allocation failed **/ int -lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, +lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag) { - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; uint8_t *sparam; struct lpfc_dmabuf *mp; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varRegLogin.rpi = 0; - mb->un.varRegLogin.vpi = vpi; + if (phba->sli_rev == LPFC_SLI_REV4) { + mb->un.varRegLogin.rpi = lpfc_sli4_alloc_rpi(phba); + if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR) + return 1; + } + + mb->un.varRegLogin.vpi = vpi + phba->vpi_base; mb->un.varRegLogin.did = did; mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */ @@ -699,15 +707,16 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi, { MAILBOX_t *mb; - mb = &pmb->mb; + mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varUnregLogin.rpi = (uint16_t) rpi; mb->un.varUnregLogin.rsvd1 = 0; - mb->un.varUnregLogin.vpi = vpi; + mb->un.varUnregLogin.vpi = vpi + phba->vpi_base; mb->mbxCommand = MBX_UNREG_LOGIN; mb->mbxOwner = OWN_HOST; + return; } @@ -727,15 +736,15 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi, * This routine prepares the mailbox command for registering a virtual N_Port. **/ void -lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid, - LPFC_MBOXQ_t *pmb) +lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb) { - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); - mb->un.varRegVpi.vpi = vpi; - mb->un.varRegVpi.sid = sid; + mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base; + mb->un.varRegVpi.sid = vport->fc_myDID; + mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base; mb->mbxCommand = MBX_REG_VPI; mb->mbxOwner = OWN_HOST; @@ -762,10 +771,10 @@ lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid, void lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb) { - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); - mb->un.varUnregVpi.vpi = vpi; + mb->un.varUnregVpi.vpi = vpi + phba->vpi_base; mb->mbxCommand = MBX_UNREG_VPI; mb->mbxOwner = OWN_HOST; @@ -854,7 +863,7 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba) void lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varRdRev.cv = 1; mb->un.varRdRev.v3req = 1; /* Request SLI3 info */ @@ -947,7 +956,7 @@ lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id, uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb) { int i; - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; struct config_hbq_var *hbqmb = &mb->un.varCfgHbq; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); @@ -1022,7 +1031,7 @@ void lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) { int i; - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; struct lpfc_sli *psli; struct lpfc_sli_ring *pring; @@ -1077,7 +1086,7 @@ void lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr; - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; dma_addr_t pdma_addr; uint32_t bar_low, bar_high; size_t offset; @@ -1101,21 +1110,22 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* If HBA supports SLI=3 ask for it */ - if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) { + if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) { if (phba->cfg_enable_bg) mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */ + mb->un.varCfgPort.cdss = 1; /* Configure Security */ mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */ mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); if (phba->max_vpi && phba->cfg_enable_npiv && phba->vpd.sli3Feat.cmv) { - mb->un.varCfgPort.max_vpi = phba->max_vpi; + mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI; mb->un.varCfgPort.cmv = 1; } else mb->un.varCfgPort.max_vpi = phba->max_vpi = 0; } else - phba->sli_rev = 2; + phba->sli_rev = LPFC_SLI_REV2; mb->un.varCfgPort.sli_mode = phba->sli_rev; /* Now setup pcb */ @@ -1247,7 +1257,7 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) void lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); mb->mbxCommand = MBX_KILL_BOARD; @@ -1306,29 +1316,98 @@ lpfc_mbox_get(struct lpfc_hba * phba) return mbq; } +/** + * __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list + * @phba: pointer to lpfc hba data structure. + * @mbq: pointer to the driver internal queue element for mailbox command. + * + * This routine put the completed mailbox command into the mailbox command + * complete list. This is the unlocked version of the routine. The mailbox + * complete list is used by the driver worker thread to process mailbox + * complete callback functions outside the driver interrupt handler. + **/ +void +__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq) +{ + list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl); +} + /** * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list * @phba: pointer to lpfc hba data structure. * @mbq: pointer to the driver internal queue element for mailbox command. * * This routine put the completed mailbox command into the mailbox command - * complete list. This routine is called from driver interrupt handler - * context.The mailbox complete list is used by the driver worker thread - * to process mailbox complete callback functions outside the driver interrupt - * handler. + * complete list. This is the locked version of the routine. The mailbox + * complete list is used by the driver worker thread to process mailbox + * complete callback functions outside the driver interrupt handler. **/ void -lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) +lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq) { unsigned long iflag; /* This function expects to be called from interrupt context */ spin_lock_irqsave(&phba->hbalock, iflag); - list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl); + __lpfc_mbox_cmpl_put(phba, mbq); spin_unlock_irqrestore(&phba->hbalock, iflag); return; } +/** + * lpfc_mbox_cmd_check - Check the validality of a mailbox command + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to the driver internal queue element for mailbox command. + * + * This routine is to check whether a mailbox command is valid to be issued. + * This check will be performed by both the mailbox issue API when a client + * is to issue a mailbox command to the mailbox transport. + * + * Return 0 - pass the check, -ENODEV - fail the check + **/ +int +lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + /* Mailbox command that have a completion handler must also have a + * vport specified. + */ + if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl && + mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) { + if (!mboxq->vport) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT, + "1814 Mbox x%x failed, no vport\n", + mboxq->u.mb.mbxCommand); + dump_stack(); + return -ENODEV; + } + } + return 0; +} + +/** + * lpfc_mbox_dev_check - Check the device state for issuing a mailbox command + * @phba: pointer to lpfc hba data structure. + * + * This routine is to check whether the HBA device is ready for posting a + * mailbox command. It is used by the mailbox transport API at the time the + * to post a mailbox command to the device. + * + * Return 0 - pass the check, -ENODEV - fail the check + **/ +int +lpfc_mbox_dev_check(struct lpfc_hba *phba) +{ + /* If the PCI channel is in offline state, do not issue mbox */ + if (unlikely(pci_channel_offline(phba->pcidev))) + return -ENODEV; + + /* If the HBA is in error state, do not issue mbox */ + if (phba->link_state == LPFC_HBA_ERROR) + return -ENODEV; + + return 0; +} + /** * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value * @phba: pointer to lpfc hba data structure. @@ -1352,6 +1431,475 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd) case MBX_WRITE_WWN: /* 0x98 */ case MBX_LOAD_EXP_ROM: /* 0x9C */ return LPFC_MBOX_TMO_FLASH_CMD; + case MBX_SLI4_CONFIG: /* 0x9b */ + return LPFC_MBOX_SLI4_CONFIG_TMO; } return LPFC_MBOX_TMO; } + +/** + * lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command + * @mbox: pointer to lpfc mbox command. + * @sgentry: sge entry index. + * @phyaddr: physical address for the sge + * @length: Length of the sge. + * + * This routine sets up an entry in the non-embedded mailbox command at the sge + * index location. + **/ +void +lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry, + dma_addr_t phyaddr, uint32_t length) +{ + struct lpfc_mbx_nembed_cmd *nembed_sge; + + nembed_sge = (struct lpfc_mbx_nembed_cmd *) + &mbox->u.mqe.un.nembed_cmd; + nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr); + nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr); + nembed_sge->sge[sgentry].length = length; +} + +/** + * lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command + * @mbox: pointer to lpfc mbox command. + * @sgentry: sge entry index. + * + * This routine gets an entry from the non-embedded mailbox command at the sge + * index location. + **/ +void +lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry, + struct lpfc_mbx_sge *sge) +{ + struct lpfc_mbx_nembed_cmd *nembed_sge; + + nembed_sge = (struct lpfc_mbx_nembed_cmd *) + &mbox->u.mqe.un.nembed_cmd; + sge->pa_lo = nembed_sge->sge[sgentry].pa_lo; + sge->pa_hi = nembed_sge->sge[sgentry].pa_hi; + sge->length = nembed_sge->sge[sgentry].length; +} + +/** + * lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command + * @phba: pointer to lpfc hba data structure. + * @mbox: pointer to lpfc mbox command. + * + * This routine frees SLI4 specific mailbox command for sending IOCTL command. + **/ +void +lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox) +{ + struct lpfc_mbx_sli4_config *sli4_cfg; + struct lpfc_mbx_sge sge; + dma_addr_t phyaddr; + uint32_t sgecount, sgentry; + + sli4_cfg = &mbox->u.mqe.un.sli4_config; + + /* For embedded mbox command, just free the mbox command */ + if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) { + mempool_free(mbox, phba->mbox_mem_pool); + return; + } + + /* For non-embedded mbox command, we need to free the pages first */ + sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr); + /* There is nothing we can do if there is no sge address array */ + if (unlikely(!mbox->sge_array)) { + mempool_free(mbox, phba->mbox_mem_pool); + return; + } + /* Each non-embedded DMA memory was allocated in the length of a page */ + for (sgentry = 0; sgentry < sgecount; sgentry++) { + lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge); + phyaddr = getPaddr(sge.pa_hi, sge.pa_lo); + dma_free_coherent(&phba->pcidev->dev, PAGE_SIZE, + mbox->sge_array->addr[sgentry], phyaddr); + } + /* Free the sge address array memory */ + kfree(mbox->sge_array); + /* Finally, free the mailbox command itself */ + mempool_free(mbox, phba->mbox_mem_pool); +} + +/** + * lpfc_sli4_config - Initialize the SLI4 Config Mailbox command + * @phba: pointer to lpfc hba data structure. + * @mbox: pointer to lpfc mbox command. + * @subsystem: The sli4 config sub mailbox subsystem. + * @opcode: The sli4 config sub mailbox command opcode. + * @length: Length of the sli4 config mailbox command. + * + * This routine sets up the header fields of SLI4 specific mailbox command + * for sending IOCTL command. + * + * Return: the actual length of the mbox command allocated (mostly useful + * for none embedded mailbox command). + **/ +int +lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox, + uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb) +{ + struct lpfc_mbx_sli4_config *sli4_config; + union lpfc_sli4_cfg_shdr *cfg_shdr = NULL; + uint32_t alloc_len; + uint32_t resid_len; + uint32_t pagen, pcount; + void *viraddr; + dma_addr_t phyaddr; + + /* Set up SLI4 mailbox command header fields */ + memset(mbox, 0, sizeof(*mbox)); + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG); + + /* Set up SLI4 ioctl command header fields */ + sli4_config = &mbox->u.mqe.un.sli4_config; + + /* Setup for the embedded mbox command */ + if (emb) { + /* Set up main header fields */ + bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1); + sli4_config->header.cfg_mhdr.payload_length = + LPFC_MBX_CMD_HDR_LENGTH + length; + /* Set up sub-header fields following main header */ + bf_set(lpfc_mbox_hdr_opcode, + &sli4_config->header.cfg_shdr.request, opcode); + bf_set(lpfc_mbox_hdr_subsystem, + &sli4_config->header.cfg_shdr.request, subsystem); + sli4_config->header.cfg_shdr.request.request_length = length; + return length; + } + + /* Setup for the none-embedded mbox command */ + pcount = (PAGE_ALIGN(length))/PAGE_SIZE; + pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ? + LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount; + /* Allocate record for keeping SGE virtual addresses */ + mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt), + GFP_KERNEL); + if (!mbox->sge_array) + return 0; + + for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) { + /* The DMA memory is always allocated in the length of a + * page even though the last SGE might not fill up to a + * page, this is used as a priori size of PAGE_SIZE for + * the later DMA memory free. + */ + viraddr = dma_alloc_coherent(&phba->pcidev->dev, PAGE_SIZE, + &phyaddr, GFP_KERNEL); + /* In case of malloc fails, proceed with whatever we have */ + if (!viraddr) + break; + mbox->sge_array->addr[pagen] = viraddr; + /* Keep the first page for later sub-header construction */ + if (pagen == 0) + cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr; + resid_len = length - alloc_len; + if (resid_len > PAGE_SIZE) { + lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr, + PAGE_SIZE); + alloc_len += PAGE_SIZE; + } else { + lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr, + resid_len); + alloc_len = length; + } + } + + /* Set up main header fields in mailbox command */ + sli4_config->header.cfg_mhdr.payload_length = alloc_len; + bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen); + + /* Set up sub-header fields into the first page */ + if (pagen > 0) { + bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode); + bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem); + cfg_shdr->request.request_length = + alloc_len - sizeof(union lpfc_sli4_cfg_shdr); + } + /* The sub-header is in DMA memory, which needs endian converstion */ + lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr, + sizeof(union lpfc_sli4_cfg_shdr)); + + return alloc_len; +} + +/** + * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command + * @phba: pointer to lpfc hba data structure. + * @mbox: pointer to lpfc mbox command. + * + * This routine gets the opcode from a SLI4 specific mailbox command for + * sending IOCTL command. If the mailbox command is not MBX_SLI4_CONFIG + * (0x9B) or if the IOCTL sub-header is not present, opcode 0x0 shall be + * returned. + **/ +uint8_t +lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox) +{ + struct lpfc_mbx_sli4_config *sli4_cfg; + union lpfc_sli4_cfg_shdr *cfg_shdr; + + if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG) + return 0; + sli4_cfg = &mbox->u.mqe.un.sli4_config; + + /* For embedded mbox command, get opcode from embedded sub-header*/ + if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) { + cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; + return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request); + } + + /* For non-embedded mbox command, get opcode from first dma page */ + if (unlikely(!mbox->sge_array)) + return 0; + cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0]; + return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request); +} + +/** + * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox + * @mboxq: pointer to lpfc mbox command. + * + * This routine sets up the mailbox for an SLI4 REQUEST_FEATURES + * mailbox command. + **/ +void +lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq) +{ + /* Set up SLI4 mailbox command header fields */ + memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); + bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS); + + /* Set up host requested features. */ + bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1); + + /* Virtual fabrics and FIPs are not supported yet. */ + bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0); + + /* Enable DIF (block guard) only if configured to do so. */ + if (phba->cfg_enable_bg) + bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1); + + /* Enable NPIV only if configured to do so. */ + if (phba->max_vpi && phba->cfg_enable_npiv) + bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1); + + return; +} + +/** + * lpfc_init_vfi - Initialize the INIT_VFI mailbox command + * @mbox: pointer to lpfc mbox command to initialize. + * @vport: Vport associated with the VF. + * + * This routine initializes @mbox to all zeros and then fills in the mailbox + * fields from @vport. INIT_VFI configures virtual fabrics identified by VFI + * in the context of an FCF. The driver issues this command to setup a VFI + * before issuing a FLOGI to login to the VSAN. The driver should also issue a + * REG_VFI after a successful VSAN login. + **/ +void +lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport) +{ + struct lpfc_mbx_init_vfi *init_vfi; + + memset(mbox, 0, sizeof(*mbox)); + init_vfi = &mbox->u.mqe.un.init_vfi; + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI); + bf_set(lpfc_init_vfi_vr, init_vfi, 1); + bf_set(lpfc_init_vfi_vt, init_vfi, 1); + bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base); + bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi); +} + +/** + * lpfc_reg_vfi - Initialize the REG_VFI mailbox command + * @mbox: pointer to lpfc mbox command to initialize. + * @vport: vport associated with the VF. + * @phys: BDE DMA bus address used to send the service parameters to the HBA. + * + * This routine initializes @mbox to all zeros and then fills in the mailbox + * fields from @vport, and uses @buf as a DMAable buffer to send the vport's + * fc service parameters to the HBA for this VFI. REG_VFI configures virtual + * fabrics identified by VFI in the context of an FCF. + **/ +void +lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) +{ + struct lpfc_mbx_reg_vfi *reg_vfi; + + memset(mbox, 0, sizeof(*mbox)); + reg_vfi = &mbox->u.mqe.un.reg_vfi; + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI); + bf_set(lpfc_reg_vfi_vp, reg_vfi, 1); + bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base); + bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi); + bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base); + reg_vfi->bde.addrHigh = putPaddrHigh(phys); + reg_vfi->bde.addrLow = putPaddrLow(phys); + reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); + reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID); +} + +/** + * lpfc_init_vpi - Initialize the INIT_VPI mailbox command + * @mbox: pointer to lpfc mbox command to initialize. + * @vpi: VPI to be initialized. + * + * The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the + * command to activate a virtual N_Port. The HBA assigns a MAC address to use + * with the virtual N Port. The SLI Host issues this command before issuing a + * FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a + * successful virtual NPort login. + **/ +void +lpfc_init_vpi(struct lpfcMboxq *mbox, uint16_t vpi) +{ + memset(mbox, 0, sizeof(*mbox)); + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI); + bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, vpi); +} + +/** + * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command + * @mbox: pointer to lpfc mbox command to initialize. + * @vfi: VFI to be unregistered. + * + * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric + * (logical NPort) into the inactive state. The SLI Host must have logged out + * and unregistered all remote N_Ports to abort any activity on the virtual + * fabric. The SLI Port posts the mailbox response after marking the virtual + * fabric inactive. + **/ +void +lpfc_unreg_vfi(struct lpfcMboxq *mbox, uint16_t vfi) +{ + memset(mbox, 0, sizeof(*mbox)); + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI); + bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vfi); +} + +/** + * lpfc_dump_fcoe_param - Dump config region 23 to get FCoe parameters. + * @phba: pointer to the hba structure containing. + * @mbox: pointer to lpfc mbox command to initialize. + * + * This function create a SLI4 dump mailbox command to dump FCoE + * parameters stored in region 23. + **/ +int +lpfc_dump_fcoe_param(struct lpfc_hba *phba, + struct lpfcMboxq *mbox) +{ + struct lpfc_dmabuf *mp = NULL; + MAILBOX_t *mb; + + memset(mbox, 0, sizeof(*mbox)); + mb = &mbox->u.mb; + + mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (mp) + mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); + + if (!mp || !mp->virt) { + kfree(mp); + /* dump_fcoe_param failed to allocate memory */ + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, + "2569 lpfc_dump_fcoe_param: memory" + " allocation failed \n"); + return 1; + } + + memset(mp->virt, 0, LPFC_BPL_SIZE); + INIT_LIST_HEAD(&mp->list); + + /* save address for completion */ + mbox->context1 = (uint8_t *) mp; + + mb->mbxCommand = MBX_DUMP_MEMORY; + mb->un.varDmp.type = DMP_NV_PARAMS; + mb->un.varDmp.region_id = DMP_REGION_FCOEPARAM; + mb->un.varDmp.sli4_length = DMP_FCOEPARAM_RGN_SIZE; + mb->un.varWords[3] = putPaddrLow(mp->phys); + mb->un.varWords[4] = putPaddrHigh(mp->phys); + return 0; +} + +/** + * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command + * @phba: pointer to the hba structure containing the FCF index and RQ ID. + * @mbox: pointer to lpfc mbox command to initialize. + * + * The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The + * SLI Host uses the command to activate an FCF after it has acquired FCF + * information via a READ_FCF mailbox command. This mailbox command also is used + * to indicate where received unsolicited frames from this FCF will be sent. By + * default this routine will set up the FCF to forward all unsolicited frames + * the the RQ ID passed in the @phba. This can be overridden by the caller for + * more complicated setups. + **/ +void +lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox) +{ + struct lpfc_mbx_reg_fcfi *reg_fcfi; + + memset(mbox, 0, sizeof(*mbox)); + reg_fcfi = &mbox->u.mqe.un.reg_fcfi; + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI); + bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id); + bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID); + bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID); + bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID); + bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.fcf_indx); + /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */ + bf_set(lpfc_reg_fcfi_mam, reg_fcfi, + (~phba->fcf.addr_mode) & 0x3); + if (phba->fcf.fcf_flag & FCF_VALID_VLAN) { + bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1); + bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.vlan_id); + } +} + +/** + * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command + * @mbox: pointer to lpfc mbox command to initialize. + * @fcfi: FCFI to be unregistered. + * + * The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). + * The SLI Host uses the command to inactivate an FCFI. + **/ +void +lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi) +{ + memset(mbox, 0, sizeof(*mbox)); + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI); + bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi); +} + +/** + * lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command + * @mbox: pointer to lpfc mbox command to initialize. + * @ndlp: The nodelist structure that describes the RPI to resume. + * + * The RESUME_RPI mailbox command is used to restart I/O to an RPI after a + * link event. + **/ +void +lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp) +{ + struct lpfc_mbx_resume_rpi *resume_rpi; + + memset(mbox, 0, sizeof(*mbox)); + resume_rpi = &mbox->u.mqe.un.resume_rpi; + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI); + bf_set(lpfc_resume_rpi_rpi, resume_rpi, ndlp->nlp_rpi); + bf_set(lpfc_resume_rpi_vpi, resume_rpi, + ndlp->vport->vpi + ndlp->vport->phba->vpi_base); + bf_set(lpfc_resume_rpi_vfi, resume_rpi, + ndlp->vport->vfi + ndlp->vport->phba->vfi_base); +} diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 6ba5a72f6049..6efe459e8ddf 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1192,7 +1192,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ if ((mb = phba->sli.mbox_active)) { - if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && + if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { lpfc_nlp_put(ndlp); mb->context2 = NULL; @@ -1202,7 +1202,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { - if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && + if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { mp = (struct lpfc_dmabuf *) (mb->context1); if (mp) { @@ -1253,7 +1253,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; uint32_t did = mb->un.varWords[1]; if (mb->mbxStatus) { @@ -1880,11 +1880,12 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport, void *arg, uint32_t evt) { LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; - if (!mb->mbxStatus) + if (!mb->mbxStatus) { ndlp->nlp_rpi = mb->un.varWords[0]; - else { + ndlp->nlp_flag |= NLP_RPI_VALID; + } else { if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index cf42ada3ffcd..b53af9936282 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -43,24 +43,7 @@ #include "lpfc_logmsg.h" #include "lpfc_compat.h" #include "lpfc_debugfs.h" - -/* - * Define macro to log: Mailbox command x%x cannot issue Data - * This allows multiple uses of lpfc_msgBlk0311 - * w/o perturbing log msg utility. - */ -#define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \ - lpfc_printf_log(phba, \ - KERN_INFO, \ - LOG_MBOX | LOG_SLI, \ - "(%d):0311 Mailbox command x%x cannot " \ - "issue Data: x%x x%x x%x\n", \ - pmbox->vport ? pmbox->vport->vpi : 0, \ - pmbox->mb.mbxCommand, \ - phba->pport->port_state, \ - psli->sli_flag, \ - flag) - +#include "lpfc_vport.h" /* There are only four IOCB completion types. */ typedef enum _lpfc_iocb_type { @@ -843,7 +826,7 @@ lpfc_sli_ring_map(struct lpfc_hba *phba) pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) return -ENOMEM; - pmbox = &pmb->mb; + pmbox = &pmb->u.mb; phba->link_state = LPFC_INIT_MBX_CMDS; for (i = 0; i < psli->num_rings; i++) { lpfc_config_ring(phba, i, pmb); @@ -1652,6 +1635,15 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand) case MBX_HEARTBEAT: case MBX_PORT_CAPABILITIES: case MBX_PORT_IOV_CONTROL: + case MBX_SLI4_CONFIG: + case MBX_SLI4_REQ_FTRS: + case MBX_REG_FCFI: + case MBX_UNREG_FCFI: + case MBX_REG_VFI: + case MBX_UNREG_VFI: + case MBX_INIT_VPI: + case MBX_INIT_VFI: + case MBX_RESUME_RPI: ret = mbxCommand; break; default: @@ -1672,7 +1664,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand) * will wake up thread waiting on the wait queue pointed by context1 * of the mailbox. **/ -static void +void lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { wait_queue_head_t *pdone_q; @@ -1706,7 +1698,7 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_dmabuf *mp; - uint16_t rpi; + uint16_t rpi, vpi; int rc; mp = (struct lpfc_dmabuf *) (pmb->context1); @@ -1716,24 +1708,30 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) kfree(mp); } + if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) && + (phba->sli_rev == LPFC_SLI_REV4)) + lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi); + /* * If a REG_LOGIN succeeded after node is destroyed or node * is in re-discovery driver need to cleanup the RPI. */ if (!(phba->pport->load_flag & FC_UNLOADING) && - pmb->mb.mbxCommand == MBX_REG_LOGIN64 && - !pmb->mb.mbxStatus) { - - rpi = pmb->mb.un.varWords[0]; - lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb); + pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && + !pmb->u.mb.mbxStatus) { + rpi = pmb->u.mb.un.varWords[0]; + vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base; + lpfc_unreg_login(phba, vpi, rpi, pmb); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc != MBX_NOT_FINISHED) return; } - mempool_free(pmb, phba->mbox_mem_pool); - return; + if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) + lpfc_sli4_mbox_cmd_free(phba, pmb); + else + mempool_free(pmb, phba->mbox_mem_pool); } /** @@ -1770,7 +1768,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) if (pmb == NULL) break; - pmbox = &pmb->mb; + pmbox = &pmb->u.mb; if (pmbox->mbxCommand != MBX_HEARTBEAT) { if (pmb->vport) { @@ -1799,9 +1797,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) /* Unknow mailbox command compl */ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "(%d):0323 Unknown Mailbox command " - "%x Cmpl\n", + "x%x (x%x) Cmpl\n", pmb->vport ? pmb->vport->vpi : 0, - pmbox->mbxCommand); + pmbox->mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, pmb)); phba->link_state = LPFC_HBA_ERROR; phba->work_hs = HS_FFER3; lpfc_handle_eratt(phba); @@ -1816,29 +1815,29 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) LOG_MBOX | LOG_SLI, "(%d):0305 Mbox cmd cmpl " "error - RETRYing Data: x%x " - "x%x x%x x%x\n", + "(x%x) x%x x%x x%x\n", pmb->vport ? pmb->vport->vpi :0, pmbox->mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, + pmb), pmbox->mbxStatus, pmbox->un.varWords[0], pmb->vport->port_state); pmbox->mbxStatus = 0; pmbox->mbxOwner = OWN_HOST; - spin_lock_irq(&phba->hbalock); - phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; - spin_unlock_irq(&phba->hbalock); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); - if (rc == MBX_SUCCESS) + if (rc != MBX_NOT_FINISHED) continue; } } /* Mailbox cmd Cmpl */ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, - "(%d):0307 Mailbox cmd x%x Cmpl x%p " + "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p " "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", pmb->vport ? pmb->vport->vpi : 0, pmbox->mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, pmb), pmb->mbox_cmpl, *((uint32_t *) pmbox), pmbox->un.varWords[0], @@ -3377,10 +3376,10 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) } spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + psli->mbox_active = NULL; phba->link_flag &= ~LS_IGNORE_ERATT; spin_unlock_irq(&phba->hbalock); - psli->mbox_active = NULL; lpfc_hba_down_post(phba); phba->link_state = LPFC_HBA_ERROR; @@ -3790,7 +3789,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba) if (!pmb) return -ENOMEM; - pmbox = &pmb->mb; + pmbox = &pmb->u.mb; /* Initialize the struct lpfc_sli_hbq structure for each hbq */ phba->link_state = LPFC_INIT_MBX_CMDS; @@ -3917,33 +3916,43 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0442 Adapter failed to init, mbxCmd x%x " "CONFIG_PORT, mbxStatus x%x Data: x%x\n", - pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0); + pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); spin_lock_irq(&phba->hbalock); - phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; + phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); rc = -ENXIO; - } else + } else { + /* Allow asynchronous mailbox command to go through */ + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; + spin_unlock_irq(&phba->hbalock); done = 1; + } } if (!done) { rc = -EINVAL; goto do_prep_failed; } - if (pmb->mb.un.varCfgPort.sli_mode == 3) { - if (!pmb->mb.un.varCfgPort.cMA) { + if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { + if (!pmb->u.mb.un.varCfgPort.cMA) { rc = -ENXIO; goto do_prep_failed; } - if (phba->max_vpi && pmb->mb.un.varCfgPort.gmv) { + if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; - phba->max_vpi = pmb->mb.un.varCfgPort.max_vpi; + phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; + phba->max_vports = (phba->max_vpi > phba->max_vports) ? + phba->max_vpi : phba->max_vports; + } else phba->max_vpi = 0; - if (pmb->mb.un.varCfgPort.gerbm) + if (pmb->u.mb.un.varCfgPort.gdss) + phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; + if (pmb->u.mb.un.varCfgPort.gerbm) phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; - if (pmb->mb.un.varCfgPort.gcrp) + if (pmb->u.mb.un.varCfgPort.gcrp) phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; - if (pmb->mb.un.varCfgPort.ginb) { + if (pmb->u.mb.un.varCfgPort.ginb) { phba->sli3_options |= LPFC_SLI3_INB_ENABLED; phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get; phba->port_gp = phba->mbox->us.s3_inb_pgp.port; @@ -3959,7 +3968,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) } if (phba->cfg_enable_bg) { - if (pmb->mb.un.varCfgPort.gbg) + if (pmb->u.mb.un.varCfgPort.gbg) phba->sli3_options |= LPFC_SLI3_BG_ENABLED; else lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -4054,8 +4063,9 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) if (rc) goto lpfc_sli_hba_setup_error; } - + spin_lock_irq(&phba->hbalock); phba->sli.sli_flag |= LPFC_PROCESS_LA; + spin_unlock_irq(&phba->hbalock); rc = lpfc_config_port_post(phba); if (rc) @@ -4596,7 +4606,7 @@ void lpfc_mbox_timeout_handler(struct lpfc_hba *phba) { LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; - MAILBOX_t *mb = &pmbox->mb; + MAILBOX_t *mb = &pmbox->u.mb; struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; @@ -6413,6 +6423,52 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba) return 1; } +/** + * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system + * @phba: Pointer to HBA context object. + * + * This routine flushes the mailbox command subsystem. It will unconditionally + * flush all the mailbox commands in the three possible stages in the mailbox + * command sub-system: pending mailbox command queue; the outstanding mailbox + * command; and completed mailbox command queue. It is caller's responsibility + * to make sure that the driver is in the proper state to flush the mailbox + * command sub-system. Namely, the posting of mailbox commands into the + * pending mailbox command queue from the various clients must be stopped; + * either the HBA is in a state that it will never works on the outstanding + * mailbox command (such as in EEH or ERATT conditions) or the outstanding + * mailbox command has been completed. + **/ +static void +lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) +{ + LIST_HEAD(completions); + struct lpfc_sli *psli = &phba->sli; + LPFC_MBOXQ_t *pmb; + unsigned long iflag; + + /* Flush all the mailbox commands in the mbox system */ + spin_lock_irqsave(&phba->hbalock, iflag); + /* The pending mailbox command queue */ + list_splice_init(&phba->sli.mboxq, &completions); + /* The outstanding active mailbox command */ + if (psli->mbox_active) { + list_add_tail(&psli->mbox_active->list, &completions); + psli->mbox_active = NULL; + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + } + /* The completed mailbox command queue */ + list_splice_init(&phba->sli.mboxq_cmpl, &completions); + spin_unlock_irqrestore(&phba->hbalock, iflag); + + /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ + while (!list_empty(&completions)) { + list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); + pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; + if (pmb->mbox_cmpl) + pmb->mbox_cmpl(phba, pmb); + } +} + /** * lpfc_sli_host_down - Vport cleanup function * @vport: Pointer to virtual port object. @@ -6506,9 +6562,11 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; struct lpfc_dmabuf *buf_ptr; - LPFC_MBOXQ_t *pmb; - int i; unsigned long flags = 0; + int i; + + /* Shutdown the mailbox command sub-system */ + lpfc_sli_mbox_sys_shutdown(phba); lpfc_hba_down_prep(phba); @@ -7773,7 +7831,7 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { pmb = phba->sli.mbox_active; - pmbox = &pmb->mb; + pmbox = &pmb->u.mb; mbox = phba->mbox; vport = pmb->vport; @@ -8169,6 +8227,183 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn, } } +/** + * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event + * @phba: Pointer to HBA context object. + * @cqe: Pointer to mailbox completion queue entry. + * + * This routine process a mailbox completion queue entry with asynchrous + * event. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) +{ + struct lpfc_cq_event *cq_event; + unsigned long iflags; + + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0392 Async Event: word0:x%x, word1:x%x, " + "word2:x%x, word3:x%x\n", mcqe->word0, + mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); + + /* Allocate a new internal CQ_EVENT entry */ + cq_event = lpfc_sli4_cq_event_alloc(phba); + if (!cq_event) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0394 Failed to allocate CQ_EVENT entry\n"); + return false; + } + + /* Move the CQE into an asynchronous event entry */ + memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe)); + spin_lock_irqsave(&phba->hbalock, iflags); + list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); + /* Set the async event flag */ + phba->hba_flag |= ASYNC_EVENT; + spin_unlock_irqrestore(&phba->hbalock, iflags); + + return true; +} + +/** + * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event + * @phba: Pointer to HBA context object. + * @cqe: Pointer to mailbox completion queue entry. + * + * This routine process a mailbox completion queue entry with mailbox + * completion event. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) +{ + uint32_t mcqe_status; + MAILBOX_t *mbox, *pmbox; + struct lpfc_mqe *mqe; + struct lpfc_vport *vport; + struct lpfc_nodelist *ndlp; + struct lpfc_dmabuf *mp; + unsigned long iflags; + LPFC_MBOXQ_t *pmb; + bool workposted = false; + int rc; + + /* If not a mailbox complete MCQE, out by checking mailbox consume */ + if (!bf_get(lpfc_trailer_completed, mcqe)) + goto out_no_mqe_complete; + + /* Get the reference to the active mbox command */ + spin_lock_irqsave(&phba->hbalock, iflags); + pmb = phba->sli.mbox_active; + if (unlikely(!pmb)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "1832 No pending MBOX command to handle\n"); + spin_unlock_irqrestore(&phba->hbalock, iflags); + goto out_no_mqe_complete; + } + spin_unlock_irqrestore(&phba->hbalock, iflags); + mqe = &pmb->u.mqe; + pmbox = (MAILBOX_t *)&pmb->u.mqe; + mbox = phba->mbox; + vport = pmb->vport; + + /* Reset heartbeat timer */ + phba->last_completion_time = jiffies; + del_timer(&phba->sli.mbox_tmo); + + /* Move mbox data to caller's mailbox region, do endian swapping */ + if (pmb->mbox_cmpl && mbox) + lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); + /* Set the mailbox status with SLI4 range 0x4000 */ + mcqe_status = bf_get(lpfc_mcqe_status, mcqe); + if (mcqe_status != MB_CQE_STATUS_SUCCESS) + bf_set(lpfc_mqe_status, mqe, + (LPFC_MBX_ERROR_RANGE | mcqe_status)); + + if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { + pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, + "MBOX dflt rpi: status:x%x rpi:x%x", + mcqe_status, + pmbox->un.varWords[0], 0); + if (mcqe_status == MB_CQE_STATUS_SUCCESS) { + mp = (struct lpfc_dmabuf *)(pmb->context1); + ndlp = (struct lpfc_nodelist *)pmb->context2; + /* Reg_LOGIN of dflt RPI was successful. Now lets get + * RID of the PPI using the same mbox buffer. + */ + lpfc_unreg_login(phba, vport->vpi, + pmbox->un.varWords[0], pmb); + pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; + pmb->context1 = mp; + pmb->context2 = ndlp; + pmb->vport = vport; + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); + if (rc != MBX_BUSY) + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | + LOG_SLI, "0385 rc should " + "have been MBX_BUSY\n"); + if (rc != MBX_NOT_FINISHED) + goto send_current_mbox; + } + } + spin_lock_irqsave(&phba->pport->work_port_lock, iflags); + phba->pport->work_port_events &= ~WORKER_MBOX_TMO; + spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); + + /* There is mailbox completion work to do */ + spin_lock_irqsave(&phba->hbalock, iflags); + __lpfc_mbox_cmpl_put(phba, pmb); + phba->work_ha |= HA_MBATT; + spin_unlock_irqrestore(&phba->hbalock, iflags); + workposted = true; + +send_current_mbox: + spin_lock_irqsave(&phba->hbalock, iflags); + /* Release the mailbox command posting token */ + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + /* Setting active mailbox pointer need to be in sync to flag clear */ + phba->sli.mbox_active = NULL; + spin_unlock_irqrestore(&phba->hbalock, iflags); + /* Wake up worker thread to post the next pending mailbox command */ + lpfc_worker_wake_up(phba); +out_no_mqe_complete: + if (bf_get(lpfc_trailer_consumed, mcqe)) + lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); + return workposted; +} + +/** + * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry + * @phba: Pointer to HBA context object. + * @cqe: Pointer to mailbox completion queue entry. + * + * This routine process a mailbox completion queue entry, it invokes the + * proper mailbox complete handling or asynchrous event handling routine + * according to the MCQE's async bit. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) +{ + struct lpfc_mcqe mcqe; + bool workposted; + + /* Copy the mailbox MCQE and convert endian order as needed */ + lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); + + /* Invoke the proper event handling routine */ + if (!bf_get(lpfc_trailer_async, &mcqe)) + workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); + else + workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); + return workposted; +} + /** * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event * @phba: Pointer to HBA context object. @@ -9246,6 +9481,112 @@ out: return status; } +/** + * lpfc_mq_create - Create a mailbox Queue on the HBA + * @phba: HBA structure that indicates port to create a queue on. + * @mq: The queue structure to use to create the mailbox queue. + * + * This function creates a mailbox queue, as detailed in @mq, on a port, + * described by @phba by sending a MQ_CREATE mailbox command to the HBA. + * + * The @phba struct is used to send mailbox command to HBA. The @cq struct + * is used to get the entry count and entry size that are necessary to + * determine the number of pages to allocate and use for this queue. This + * function will send the MQ_CREATE mailbox command to the HBA to setup the + * mailbox queue. This function is asynchronous and will wait for the mailbox + * command to finish before continuing. + * + * On success this function will return a zero. If unable to allocate enough + * memory this function will return ENOMEM. If the queue create mailbox command + * fails this function will return ENXIO. + **/ +uint32_t +lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, + struct lpfc_queue *cq, uint32_t subtype) +{ + struct lpfc_mbx_mq_create *mq_create; + struct lpfc_dmabuf *dmabuf; + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_mq_create) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_MQ_CREATE, + length, LPFC_SLI4_MBX_EMBED); + mq_create = &mbox->u.mqe.un.mq_create; + bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, + mq->page_count); + bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, + cq->queue_id); + bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); + switch (mq->entry_count) { + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0362 Unsupported MQ count. (%d)\n", + mq->entry_count); + if (mq->entry_count < 16) + return -EINVAL; + /* otherwise default to smallest count (drop through) */ + case 16: + bf_set(lpfc_mq_context_count, &mq_create->u.request.context, + LPFC_MQ_CNT_16); + break; + case 32: + bf_set(lpfc_mq_context_count, &mq_create->u.request.context, + LPFC_MQ_CNT_32); + break; + case 64: + bf_set(lpfc_mq_context_count, &mq_create->u.request.context, + LPFC_MQ_CNT_64); + break; + case 128: + bf_set(lpfc_mq_context_count, &mq_create->u.request.context, + LPFC_MQ_CNT_128); + break; + } + list_for_each_entry(dmabuf, &mq->page_list, list) { + mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = + putPaddrLow(dmabuf->phys); + mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = + putPaddrHigh(dmabuf->phys); + } + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2502 MQ_CREATE mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + goto out; + } + mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response); + if (mq->queue_id == 0xFFFF) { + status = -ENXIO; + goto out; + } + mq->type = LPFC_MQ; + mq->subtype = subtype; + mq->host_index = 0; + mq->hba_index = 0; + + /* link the mq onto the parent cq child list */ + list_add_tail(&mq->list, &cq->child_list); +out: + if (rc != MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + return status; +} + /** * lpfc_wq_create - Create a Work Queue on the HBA * @phba: HBA structure that indicates port to create a queue on. @@ -9614,6 +9955,60 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) return status; } +/** + * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA + * @qm: The queue structure associated with the queue to destroy. + * + * This function destroys a queue, as detailed in @mq by sending an mailbox + * command, specific to the type of queue, to the HBA. + * + * The @mq struct is used to get the queue ID of the queue to destroy. + * + * On success this function will return a zero. If the queue destroy mailbox + * command fails this function will return ENXIO. + **/ +uint32_t +lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) +{ + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + if (!mq) + return -ENODEV; + mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_mq_destroy) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_MQ_DESTROY, + length, LPFC_SLI4_MBX_EMBED); + bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, + mq->queue_id); + mbox->vport = mq->phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) + &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2507 MQ_DESTROY mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + } + /* Remove mq from any list */ + list_del_init(&mq->list); + if (rc != MBX_TIMEOUT) + mempool_free(mbox, mq->phba->mbox_mem_pool); + return status; +} + /** * lpfc_wq_destroy - Destroy a Work Queue on the HBA * @wq: The queue structure associated with the queue to destroy. -- cgit v1.2.3 From 6fb120a7ed882aae9636545142a51cf3182a3ace Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 22 May 2009 14:52:59 -0400 Subject: [SCSI] lpfc 8.3.2 : Addition of SLI4 Interface - FCOE Discovery support SLI4 supports both FC and FCOE, with some extended topology objects. This patch adss support for the objects, and updates the disovery engines for their use. Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_attr.c | 32 +- drivers/scsi/lpfc/lpfc_crtn.h | 3 + drivers/scsi/lpfc/lpfc_els.c | 179 +++++- drivers/scsi/lpfc/lpfc_hbadisc.c | 1055 +++++++++++++++++++++++++++++++++++- drivers/scsi/lpfc/lpfc_nportdisc.c | 34 +- drivers/scsi/lpfc/lpfc_sli.c | 517 ++++++++++++++++++ 6 files changed, 1756 insertions(+), 64 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 463104d96867..270a4c6cd3ac 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -2924,6 +2924,14 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat."); */ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); +/* +# lpfc_enable_fip: When set, FIP is required to start discovery. If not +# set, the driver will add an FCF record manually if the port has no +# FCF records available and start discovery. +# Value range is [0,1]. Default value is 1 (enabled) +*/ +LPFC_ATTR_RW(enable_fip, 0, 0, 1, "Enable FIP Discovery"); + /* # lpfc_prot_mask: i @@ -2990,6 +2998,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_peer_port_login, &dev_attr_lpfc_nodev_tmo, &dev_attr_lpfc_devloss_tmo, + &dev_attr_lpfc_enable_fip, &dev_attr_lpfc_fcp_class, &dev_attr_lpfc_use_adisc, &dev_attr_lpfc_ack0, @@ -3042,6 +3051,7 @@ struct device_attribute *lpfc_vport_attrs[] = { &dev_attr_lpfc_lun_queue_depth, &dev_attr_lpfc_nodev_tmo, &dev_attr_lpfc_devloss_tmo, + &dev_attr_lpfc_enable_fip, &dev_attr_lpfc_hba_queue_depth, &dev_attr_lpfc_peer_port_login, &dev_attr_lpfc_restrict_login, @@ -4167,26 +4177,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) phba->cfg_soft_wwpn = 0L; lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt); - /* - * Since the sg_tablesize is module parameter, the sg_dma_buf_size - * used to create the sg_dma_buf_pool must be dynamically calculated. - * 2 segments are added since the IOCB needs a command and response bde. - */ - phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + - sizeof(struct fcp_rsp) + - ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); - - if (phba->cfg_enable_bg) { - phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; - phba->cfg_sg_dma_buf_size += - phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); - } - - /* Also reinitialize the host templates with new values. */ - lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; - lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; - lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); + lpfc_enable_fip_init(phba, lpfc_enable_fip); + lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); + return; } diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index e0f1cd4b3d3d..d2a922997c0f 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -23,6 +23,8 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *); struct fc_rport; void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); +int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *); void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); @@ -108,6 +110,7 @@ int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *); int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t); +int lpfc_issue_fabric_reglogin(struct lpfc_vport *); int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *, diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 2c034a554c88..a3b56d7f72f4 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -387,6 +387,75 @@ fail: return -ENXIO; } +/** + * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login + * @vport: pointer to a host virtual N_Port data structure. + * + * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for + * the @vport. This mailbox command is necessary for FCoE only. + * + * Return code + * 0 - successfully issued REG_VFI for @vport + * A failure code otherwise. + **/ +static int +lpfc_issue_reg_vfi(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *mboxq; + struct lpfc_nodelist *ndlp; + struct serv_parm *sp; + struct lpfc_dmabuf *dmabuf; + int rc = 0; + + sp = &phba->fc_fabparam; + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { + rc = -ENODEV; + goto fail; + } + + dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!dmabuf) { + rc = -ENOMEM; + goto fail; + } + dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys); + if (!dmabuf->virt) { + rc = -ENOMEM; + goto fail_free_dmabuf; + } + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + rc = -ENOMEM; + goto fail_free_coherent; + } + vport->port_state = LPFC_FABRIC_CFG_LINK; + memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam)); + lpfc_reg_vfi(mboxq, vport, dmabuf->phys); + mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; + mboxq->vport = vport; + mboxq->context1 = dmabuf; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + rc = -ENXIO; + goto fail_free_mbox; + } + return 0; + +fail_free_mbox: + mempool_free(mboxq, phba->mbox_mem_pool); +fail_free_coherent: + lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); +fail_free_dmabuf: + kfree(dmabuf); +fail: + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "0289 Issue Register VFI failed: Err %d\n", rc); + return rc; +} + /** * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port * @vport: pointer to a host virtual N_Port data structure. @@ -499,17 +568,24 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } } - lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); - - if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && - vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) { - lpfc_register_new_vport(phba, vport, ndlp); - return 0; + if (phba->sli_rev < LPFC_SLI_REV4) { + lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && + vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) + lpfc_register_new_vport(phba, vport, ndlp); + else + lpfc_issue_fabric_reglogin(vport); + } else { + ndlp->nlp_type |= NLP_FABRIC; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + if (vport->vfi_state & LPFC_VFI_REGISTERED) { + lpfc_start_fdiscs(phba); + lpfc_do_scr_ns_plogi(phba, vport); + } else + lpfc_issue_reg_vfi(vport); } - lpfc_issue_fabric_reglogin(vport); return 0; } - /** * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port * @vport: pointer to a host virtual N_Port data structure. @@ -817,9 +893,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (sp->cmn.fcphHigh < FC_PH3) sp->cmn.fcphHigh = FC_PH3; - if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { + if (phba->sli_rev == LPFC_SLI_REV4) { + elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1); + elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1); + /* FLOGI needs to be 3 for WQE FCFI */ + /* Set the fcfi to the fcfi we registered with */ + elsiocb->iocb.ulpContext = phba->fcf.fcfi; + } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { sp->cmn.request_multiple_Nport = 1; - /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ icmd->ulpCt_h = 1; icmd->ulpCt_l = 0; @@ -932,6 +1013,8 @@ lpfc_initial_flogi(struct lpfc_vport *vport) if (!ndlp) return 0; lpfc_nlp_init(vport, ndlp, Fabric_DID); + /* Set the node type */ + ndlp->nlp_type |= NLP_FABRIC; /* Put ndlp onto node list */ lpfc_enqueue_node(vport, ndlp); } else if (!NLP_CHK_NODE_ACT(ndlp)) { @@ -1604,7 +1687,8 @@ lpfc_adisc_done(struct lpfc_vport *vport) * and continue discovery. */ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && - !(vport->fc_flag & FC_RSCN_MODE)) { + !(vport->fc_flag & FC_RSCN_MODE) && + (phba->sli_rev < LPFC_SLI_REV4)) { lpfc_issue_reg_vpi(phba, vport); return; } @@ -2937,6 +3021,14 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; + /* + * This routine is used to register and unregister in previous SLI + * modes. + */ + if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) && + (phba->sli_rev == LPFC_SLI_REV4)) + lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi); + pmb->context1 = NULL; lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); @@ -3816,7 +3908,9 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) payload_len -= sizeof(uint32_t); switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { case RSCN_ADDRESS_FORMAT_PORT: - if (ns_did.un.word == rscn_did.un.word) + if ((ns_did.un.b.domain == rscn_did.un.b.domain) + && (ns_did.un.b.area == rscn_did.un.b.area) + && (ns_did.un.b.id == rscn_did.un.b.id)) goto return_did_out; break; case RSCN_ADDRESS_FORMAT_AREA: @@ -4857,7 +4951,10 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, } else { /* FAN verified - skip FLOGI */ vport->fc_myDID = vport->fc_prevDID; - lpfc_issue_fabric_reglogin(vport); + if (phba->sli_rev < LPFC_SLI_REV4) + lpfc_issue_fabric_reglogin(vport); + else + lpfc_issue_reg_vfi(vport); } } return 0; @@ -5540,11 +5637,10 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, dropit: if (vport && !(vport->load_flag & FC_UNLOADING)) - lpfc_printf_log(phba, KERN_ERR, LOG_ELS, - "(%d):0111 Dropping received ELS cmd " + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "0111 Dropping received ELS cmd " "Data: x%x x%x x%x\n", - vport->vpi, icmd->ulpStatus, - icmd->un.ulpWord[4], icmd->ulpTimeout); + icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout); phba->fc_stat.elsRcvDrop++; } @@ -5620,10 +5716,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { if (icmd->unsli3.rcvsli3.vpi == 0xffff) vport = phba->pport; - else { - uint16_t vpi = icmd->unsli3.rcvsli3.vpi; - vport = lpfc_find_vport_by_vpid(phba, vpi); - } + else + vport = lpfc_find_vport_by_vpid(phba, + icmd->unsli3.rcvsli3.vpi - phba->vpi_base); } /* If there are no BDEs associated * with this IOCB, there is nothing to do. @@ -5792,7 +5887,10 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) } else { if (vport == phba->pport) - lpfc_issue_fabric_reglogin(vport); + if (phba->sli_rev < LPFC_SLI_REV4) + lpfc_issue_fabric_reglogin(vport); + else + lpfc_issue_reg_vfi(vport); else lpfc_do_scr_ns_plogi(phba, vport); } @@ -5824,7 +5922,7 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { - lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox); + lpfc_reg_vpi(vport, mbox); mbox->vport = vport; mbox->context2 = lpfc_nlp_get(ndlp); mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; @@ -6496,3 +6594,38 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba) lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); } + +/** + * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort + * @phba: pointer to lpfc hba data structure. + * @axri: pointer to the els xri abort wcqe structure. + * + * This routine is invoked by the worker thread to process a SLI4 slow-path + * ELS aborted xri. + **/ +void +lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, + struct sli4_wcqe_xri_aborted *axri) +{ + uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); + struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; + unsigned long iflag = 0; + + spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag); + list_for_each_entry_safe(sglq_entry, sglq_next, + &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { + if (sglq_entry->sli4_xritag == xri) { + list_del(&sglq_entry->list); + spin_unlock_irqrestore( + &phba->sli4_hba.abts_sgl_list_lock, + iflag); + spin_lock_irqsave(&phba->hbalock, iflag); + + list_add_tail(&sglq_entry->list, + &phba->sli4_hba.lpfc_sgl_list); + spin_unlock_irqrestore(&phba->hbalock, iflag); + return; + } + } + spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag); +} diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 2270d9a7c8e3..9bd7a8927a34 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -275,6 +275,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); + + lpfc_unregister_unused_fcf(phba); } /** @@ -297,10 +299,11 @@ lpfc_alloc_fast_evt(struct lpfc_hba *phba) { ret = kzalloc(sizeof(struct lpfc_fast_path_event), GFP_ATOMIC); - if (ret) + if (ret) { atomic_inc(&phba->fast_event_count); - INIT_LIST_HEAD(&ret->work_evt.evt_listp); - ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; + INIT_LIST_HEAD(&ret->work_evt.evt_listp); + ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; + } return ret; } @@ -741,6 +744,7 @@ lpfc_linkdown(struct lpfc_hba *phba) if (phba->link_state == LPFC_LINK_DOWN) return 0; spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED); if (phba->link_state > LPFC_LINK_DOWN) { phba->link_state = LPFC_LINK_DOWN; phba->pport->fc_flag &= ~FC_LBIT; @@ -748,7 +752,7 @@ lpfc_linkdown(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { /* Issue a LINK DOWN event to all nodes */ lpfc_linkdown_port(vports[i]); } @@ -858,10 +862,11 @@ lpfc_linkup(struct lpfc_hba *phba) vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) lpfc_linkup_port(vports[i]); lpfc_destroy_vport_work_array(phba, vports); - if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) + if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && + (phba->sli_rev < LPFC_SLI_REV4)) lpfc_issue_clear_la(phba, phba->pport); return 0; @@ -983,10 +988,593 @@ out: return; } +static void +lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct lpfc_vport *vport = mboxq->vport; + unsigned long flags; + + if (mboxq->u.mb.mbxStatus) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, + "2017 REG_FCFI mbxStatus error x%x " + "HBA state x%x\n", + mboxq->u.mb.mbxStatus, vport->port_state); + mempool_free(mboxq, phba->mbox_mem_pool); + return; + } + + /* Start FCoE discovery by sending a FLOGI. */ + phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi); + /* Set the FCFI registered flag */ + spin_lock_irqsave(&phba->hbalock, flags); + phba->fcf.fcf_flag |= FCF_REGISTERED; + spin_unlock_irqrestore(&phba->hbalock, flags); + if (vport->port_state != LPFC_FLOGI) { + spin_lock_irqsave(&phba->hbalock, flags); + phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_initial_flogi(vport); + } + + mempool_free(mboxq, phba->mbox_mem_pool); + return; +} + +/** + * lpfc_fab_name_match - Check if the fcf fabric name match. + * @fab_name: pointer to fabric name. + * @new_fcf_record: pointer to fcf record. + * + * This routine compare the fcf record's fabric name with provided + * fabric name. If the fabric name are identical this function + * returns 1 else return 0. + **/ +static uint32_t +lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record) +{ + if ((fab_name[0] == + bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) && + (fab_name[1] == + bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) && + (fab_name[2] == + bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) && + (fab_name[3] == + bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) && + (fab_name[4] == + bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) && + (fab_name[5] == + bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) && + (fab_name[6] == + bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) && + (fab_name[7] == + bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))) + return 1; + else + return 0; +} + +/** + * lpfc_mac_addr_match - Check if the fcf mac address match. + * @phba: pointer to lpfc hba data structure. + * @new_fcf_record: pointer to fcf record. + * + * This routine compare the fcf record's mac address with HBA's + * FCF mac address. If the mac addresses are identical this function + * returns 1 else return 0. + **/ +static uint32_t +lpfc_mac_addr_match(struct lpfc_hba *phba, struct fcf_record *new_fcf_record) +{ + if ((phba->fcf.mac_addr[0] == + bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) && + (phba->fcf.mac_addr[1] == + bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) && + (phba->fcf.mac_addr[2] == + bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) && + (phba->fcf.mac_addr[3] == + bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) && + (phba->fcf.mac_addr[4] == + bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) && + (phba->fcf.mac_addr[5] == + bf_get(lpfc_fcf_record_mac_5, new_fcf_record))) + return 1; + else + return 0; +} + +/** + * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. + * @phba: pointer to lpfc hba data structure. + * @new_fcf_record: pointer to fcf record. + * + * This routine copies the FCF information from the FCF + * record to lpfc_hba data structure. + **/ +static void +lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record) +{ + phba->fcf.fabric_name[0] = + bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record); + phba->fcf.fabric_name[1] = + bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record); + phba->fcf.fabric_name[2] = + bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record); + phba->fcf.fabric_name[3] = + bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record); + phba->fcf.fabric_name[4] = + bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record); + phba->fcf.fabric_name[5] = + bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record); + phba->fcf.fabric_name[6] = + bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record); + phba->fcf.fabric_name[7] = + bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record); + phba->fcf.mac_addr[0] = + bf_get(lpfc_fcf_record_mac_0, new_fcf_record); + phba->fcf.mac_addr[1] = + bf_get(lpfc_fcf_record_mac_1, new_fcf_record); + phba->fcf.mac_addr[2] = + bf_get(lpfc_fcf_record_mac_2, new_fcf_record); + phba->fcf.mac_addr[3] = + bf_get(lpfc_fcf_record_mac_3, new_fcf_record); + phba->fcf.mac_addr[4] = + bf_get(lpfc_fcf_record_mac_4, new_fcf_record); + phba->fcf.mac_addr[5] = + bf_get(lpfc_fcf_record_mac_5, new_fcf_record); + phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); + phba->fcf.priority = new_fcf_record->fip_priority; +} + +/** + * lpfc_register_fcf - Register the FCF with hba. + * @phba: pointer to lpfc hba data structure. + * + * This routine issues a register fcfi mailbox command to register + * the fcf with HBA. + **/ +static void +lpfc_register_fcf(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *fcf_mbxq; + int rc; + unsigned long flags; + + spin_lock_irqsave(&phba->hbalock, flags); + + /* If the FCF is not availabe do nothing. */ + if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { + spin_unlock_irqrestore(&phba->hbalock, flags); + return; + } + + /* The FCF is already registered, start discovery */ + if (phba->fcf.fcf_flag & FCF_REGISTERED) { + phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); + spin_unlock_irqrestore(&phba->hbalock, flags); + if (phba->pport->port_state != LPFC_FLOGI) + lpfc_initial_flogi(phba->pport); + return; + } + spin_unlock_irqrestore(&phba->hbalock, flags); + + fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, + GFP_KERNEL); + if (!fcf_mbxq) + return; + + lpfc_reg_fcfi(phba, fcf_mbxq); + fcf_mbxq->vport = phba->pport; + fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; + rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + mempool_free(fcf_mbxq, phba->mbox_mem_pool); + + return; +} + +/** + * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery. + * @phba: pointer to lpfc hba data structure. + * @new_fcf_record: pointer to fcf record. + * @boot_flag: Indicates if this record used by boot bios. + * @addr_mode: The address mode to be used by this FCF + * + * This routine compare the fcf record with connect list obtained from the + * config region to decide if this FCF can be used for SAN discovery. It returns + * 1 if this record can be used for SAN discovery else return zero. If this FCF + * record can be used for SAN discovery, the boot_flag will indicate if this FCF + * is used by boot bios and addr_mode will indicate the addressing mode to be + * used for this FCF when the function returns. + * If the FCF record need to be used with a particular vlan id, the vlan is + * set in the vlan_id on return of the function. If not VLAN tagging need to + * be used with the FCF vlan_id will be set to 0xFFFF; + **/ +static int +lpfc_match_fcf_conn_list(struct lpfc_hba *phba, + struct fcf_record *new_fcf_record, + uint32_t *boot_flag, uint32_t *addr_mode, + uint16_t *vlan_id) +{ + struct lpfc_fcf_conn_entry *conn_entry; + + if (!phba->cfg_enable_fip) { + *boot_flag = 0; + *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, + new_fcf_record); + if (phba->valid_vlan) + *vlan_id = phba->vlan_id; + else + *vlan_id = 0xFFFF; + return 1; + } + + /* + * If there are no FCF connection table entry, driver connect to all + * FCFs. + */ + if (list_empty(&phba->fcf_conn_rec_list)) { + *boot_flag = 0; + *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, + new_fcf_record); + *vlan_id = 0xFFFF; + return 1; + } + + list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) { + if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID)) + continue; + + if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) && + !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name, + new_fcf_record)) + continue; + + if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) { + /* + * If the vlan bit map does not have the bit set for the + * vlan id to be used, then it is not a match. + */ + if (!(new_fcf_record->vlan_bitmap + [conn_entry->conn_rec.vlan_tag / 8] & + (1 << (conn_entry->conn_rec.vlan_tag % 8)))) + continue; + } + + /* + * Check if the connection record specifies a required + * addressing mode. + */ + if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && + !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) { + + /* + * If SPMA required but FCF not support this continue. + */ + if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && + !(bf_get(lpfc_fcf_record_mac_addr_prov, + new_fcf_record) & LPFC_FCF_SPMA)) + continue; + + /* + * If FPMA required but FCF not support this continue. + */ + if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && + !(bf_get(lpfc_fcf_record_mac_addr_prov, + new_fcf_record) & LPFC_FCF_FPMA)) + continue; + } + + /* + * This fcf record matches filtering criteria. + */ + if (conn_entry->conn_rec.flags & FCFCNCT_BOOT) + *boot_flag = 1; + else + *boot_flag = 0; + + *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, + new_fcf_record); + /* + * If the user specified a required address mode, assign that + * address mode + */ + if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && + (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED))) + *addr_mode = (conn_entry->conn_rec.flags & + FCFCNCT_AM_SPMA) ? + LPFC_FCF_SPMA : LPFC_FCF_FPMA; + /* + * If the user specified a prefered address mode, use the + * addr mode only if FCF support the addr_mode. + */ + else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && + (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && + (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && + (*addr_mode & LPFC_FCF_SPMA)) + *addr_mode = LPFC_FCF_SPMA; + else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && + (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && + !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && + (*addr_mode & LPFC_FCF_FPMA)) + *addr_mode = LPFC_FCF_FPMA; + /* + * If user did not specify any addressing mode, use FPMA if + * possible else use SPMA. + */ + else if (*addr_mode & LPFC_FCF_FPMA) + *addr_mode = LPFC_FCF_FPMA; + + if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) + *vlan_id = conn_entry->conn_rec.vlan_tag; + else + *vlan_id = 0xFFFF; + + return 1; + } + + return 0; +} + +/** + * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox. + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to mailbox object. + * + * This function iterate through all the fcf records available in + * HBA and choose the optimal FCF record for discovery. After finding + * the FCF for discovery it register the FCF record and kick start + * discovery. + * If FCF_IN_USE flag is set in currently used FCF, the routine try to + * use a FCF record which match fabric name and mac address of the + * currently used FCF record. + * If the driver support only one FCF, it will try to use the FCF record + * used by BOOT_BIOS. + */ +void +lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + void *virt_addr; + dma_addr_t phys_addr; + uint8_t *bytep; + struct lpfc_mbx_sge sge; + struct lpfc_mbx_read_fcf_tbl *read_fcf; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + struct fcf_record *new_fcf_record; + int rc; + uint32_t boot_flag, addr_mode; + uint32_t next_fcf_index; + unsigned long flags; + uint16_t vlan_id; + + /* Get the first SGE entry from the non-embedded DMA memory. This + * routine only uses a single SGE. + */ + lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); + phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); + if (unlikely(!mboxq->sge_array)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "2524 Failed to get the non-embedded SGE " + "virtual address\n"); + goto out; + } + virt_addr = mboxq->sge_array->addr[0]; + + shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, + &shdr->response); + /* + * The FCF Record was read and there is no reason for the driver + * to maintain the FCF record data or memory. Instead, just need + * to book keeping the FCFIs can be used. + */ + if (shdr_status || shdr_add_status) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2521 READ_FCF_RECORD mailbox failed " + "with status x%x add_status x%x, mbx\n", + shdr_status, shdr_add_status); + goto out; + } + /* Interpreting the returned information of FCF records */ + read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; + lpfc_sli_pcimem_bcopy(read_fcf, read_fcf, + sizeof(struct lpfc_mbx_read_fcf_tbl)); + next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); + + new_fcf_record = (struct fcf_record *)(virt_addr + + sizeof(struct lpfc_mbx_read_fcf_tbl)); + lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, + sizeof(struct fcf_record)); + bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); + + rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, + &boot_flag, &addr_mode, + &vlan_id); + /* + * If the fcf record does not match with connect list entries + * read the next entry. + */ + if (!rc) + goto read_next_fcf; + /* + * If this is not the first FCF discovery of the HBA, use last + * FCF record for the discovery. + */ + spin_lock_irqsave(&phba->hbalock, flags); + if (phba->fcf.fcf_flag & FCF_IN_USE) { + if (lpfc_fab_name_match(phba->fcf.fabric_name, + new_fcf_record) && + lpfc_mac_addr_match(phba, new_fcf_record)) { + phba->fcf.fcf_flag |= FCF_AVAILABLE; + spin_unlock_irqrestore(&phba->hbalock, flags); + goto out; + } + spin_unlock_irqrestore(&phba->hbalock, flags); + goto read_next_fcf; + } + if (phba->fcf.fcf_flag & FCF_AVAILABLE) { + /* + * If the current FCF record does not have boot flag + * set and new fcf record has boot flag set, use the + * new fcf record. + */ + if (boot_flag && !(phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) { + /* Use this FCF record */ + lpfc_copy_fcf_record(phba, new_fcf_record); + phba->fcf.addr_mode = addr_mode; + phba->fcf.fcf_flag |= FCF_BOOT_ENABLE; + if (vlan_id != 0xFFFF) { + phba->fcf.fcf_flag |= FCF_VALID_VLAN; + phba->fcf.vlan_id = vlan_id; + } + spin_unlock_irqrestore(&phba->hbalock, flags); + goto read_next_fcf; + } + /* + * If the current FCF record has boot flag set and the + * new FCF record does not have boot flag, read the next + * FCF record. + */ + if (!boot_flag && (phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) { + spin_unlock_irqrestore(&phba->hbalock, flags); + goto read_next_fcf; + } + /* + * If there is a record with lower priority value for + * the current FCF, use that record. + */ + if (lpfc_fab_name_match(phba->fcf.fabric_name, new_fcf_record) + && (new_fcf_record->fip_priority < + phba->fcf.priority)) { + /* Use this FCF record */ + lpfc_copy_fcf_record(phba, new_fcf_record); + phba->fcf.addr_mode = addr_mode; + if (vlan_id != 0xFFFF) { + phba->fcf.fcf_flag |= FCF_VALID_VLAN; + phba->fcf.vlan_id = vlan_id; + } + spin_unlock_irqrestore(&phba->hbalock, flags); + goto read_next_fcf; + } + spin_unlock_irqrestore(&phba->hbalock, flags); + goto read_next_fcf; + } + /* + * This is the first available FCF record, use this + * record. + */ + lpfc_copy_fcf_record(phba, new_fcf_record); + phba->fcf.addr_mode = addr_mode; + if (boot_flag) + phba->fcf.fcf_flag |= FCF_BOOT_ENABLE; + phba->fcf.fcf_flag |= FCF_AVAILABLE; + if (vlan_id != 0xFFFF) { + phba->fcf.fcf_flag |= FCF_VALID_VLAN; + phba->fcf.vlan_id = vlan_id; + } + spin_unlock_irqrestore(&phba->hbalock, flags); + goto read_next_fcf; + +read_next_fcf: + lpfc_sli4_mbox_cmd_free(phba, mboxq); + if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) + lpfc_register_fcf(phba); + else + lpfc_sli4_read_fcf_record(phba, next_fcf_index); + return; + +out: + lpfc_sli4_mbox_cmd_free(phba, mboxq); + lpfc_register_fcf(phba); + + return; +} + +/** + * lpfc_start_fdiscs - send fdiscs for each vports on this port. + * @phba: pointer to lpfc hba data structure. + * + * This function loops through the list of vports on the @phba and issues an + * FDISC if possible. + */ +void +lpfc_start_fdiscs(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + int i; + + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + if (vports[i]->port_type == LPFC_PHYSICAL_PORT) + continue; + /* There are no vpi for this vport */ + if (vports[i]->vpi > phba->max_vpi) { + lpfc_vport_set_state(vports[i], + FC_VPORT_FAILED); + continue; + } + if (phba->fc_topology == TOPOLOGY_LOOP) { + lpfc_vport_set_state(vports[i], + FC_VPORT_LINKDOWN); + continue; + } + if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) + lpfc_initial_fdisc(vports[i]); + else { + lpfc_vport_set_state(vports[i], + FC_VPORT_NO_FABRIC_SUPP); + lpfc_printf_vlog(vports[i], KERN_ERR, + LOG_ELS, + "0259 No NPIV " + "Fabric support\n"); + } + } + } + lpfc_destroy_vport_work_array(phba, vports); +} + +void +lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct lpfc_dmabuf *dmabuf = mboxq->context1; + struct lpfc_vport *vport = mboxq->vport; + + if (mboxq->u.mb.mbxStatus) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, + "2018 REG_VFI mbxStatus error x%x " + "HBA state x%x\n", + mboxq->u.mb.mbxStatus, vport->port_state); + if (phba->fc_topology == TOPOLOGY_LOOP) { + /* FLOGI failed, use loop map to make discovery list */ + lpfc_disc_list_loopmap(vport); + /* Start discovery */ + lpfc_disc_start(vport); + goto fail_free_mem; + } + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + goto fail_free_mem; + } + /* Mark the vport has registered with its VFI */ + vport->vfi_state |= LPFC_VFI_REGISTERED; + + if (vport->port_state == LPFC_FABRIC_CFG_LINK) { + lpfc_start_fdiscs(phba); + lpfc_do_scr_ns_plogi(phba, vport); + } + +fail_free_mem: + mempool_free(mboxq, phba->mbox_mem_pool); + lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); + kfree(dmabuf); + return; +} + static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; struct lpfc_vport *vport = pmb->vport; @@ -1037,13 +1625,13 @@ static void lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) { struct lpfc_vport *vport = phba->pport; - LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox; + LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL; int i; struct lpfc_dmabuf *mp; int rc; + struct fcf_record *fcf_record; sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); spin_lock_irq(&phba->hbalock); switch (la->UlnkSpeed) { @@ -1140,22 +1728,66 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(sparam_mbox, phba->mbox_mem_pool); - if (cfglink_mbox) - mempool_free(cfglink_mbox, phba->mbox_mem_pool); goto out; } } - if (cfglink_mbox) { + if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) { + cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!cfglink_mbox) + goto out; vport->port_state = LPFC_LOCAL_CFG_LINK; lpfc_config_link(phba, cfglink_mbox); cfglink_mbox->vport = vport; cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); - if (rc != MBX_NOT_FINISHED) - return; - mempool_free(cfglink_mbox, phba->mbox_mem_pool); + if (rc == MBX_NOT_FINISHED) { + mempool_free(cfglink_mbox, phba->mbox_mem_pool); + goto out; + } + } else { + /* + * Add the driver's default FCF record at FCF index 0 now. This + * is phase 1 implementation that support FCF index 0 and driver + * defaults. + */ + if (phba->cfg_enable_fip == 0) { + fcf_record = kzalloc(sizeof(struct fcf_record), + GFP_KERNEL); + if (unlikely(!fcf_record)) { + lpfc_printf_log(phba, KERN_ERR, + LOG_MBOX | LOG_SLI, + "2554 Could not allocate memmory for " + "fcf record\n"); + rc = -ENODEV; + goto out; + } + + lpfc_sli4_build_dflt_fcf_record(phba, fcf_record, + LPFC_FCOE_FCF_DEF_INDEX); + rc = lpfc_sli4_add_fcf_record(phba, fcf_record); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, + LOG_MBOX | LOG_SLI, + "2013 Could not manually add FCF " + "record 0, status %d\n", rc); + rc = -ENODEV; + kfree(fcf_record); + goto out; + } + kfree(fcf_record); + } + /* + * The driver is expected to do FIP/FCF. Call the port + * and get the FCF Table. + */ + rc = lpfc_sli4_read_fcf_record(phba, + LPFC_FCOE_FCF_GET_FIRST); + if (rc) + goto out; } + + return; out: lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, @@ -1186,6 +1818,7 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba) { lpfc_linkdown(phba); lpfc_enable_la(phba); + lpfc_unregister_unused_fcf(phba); /* turn on Link Attention interrupts - no CLEAR_LA needed */ } @@ -3330,3 +3963,395 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) return 1; return 0; } + +/** + * lpfc_fcf_inuse - Check if FCF can be unregistered. + * @phba: Pointer to hba context object. + * + * This function iterate through all FC nodes associated + * will all vports to check if there is any node with + * fc_rports associated with it. If there is an fc_rport + * associated with the node, then the node is either in + * discovered state or its devloss_timer is pending. + */ +static int +lpfc_fcf_inuse(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + int i, ret = 0; + struct lpfc_nodelist *ndlp; + struct Scsi_Host *shost; + + vports = lpfc_create_vport_work_array(phba); + + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + shost = lpfc_shost_from_vport(vports[i]); + spin_lock_irq(shost->host_lock); + list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { + if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport && + (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { + ret = 1; + spin_unlock_irq(shost->host_lock); + goto out; + } + } + spin_unlock_irq(shost->host_lock); + } +out: + lpfc_destroy_vport_work_array(phba, vports); + return ret; +} + +/** + * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi. + * @phba: Pointer to hba context object. + * @mboxq: Pointer to mailbox object. + * + * This function frees memory associated with the mailbox command. + */ +static void +lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct lpfc_vport *vport = mboxq->vport; + + if (mboxq->u.mb.mbxStatus) { + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, + "2555 UNREG_VFI mbxStatus error x%x " + "HBA state x%x\n", + mboxq->u.mb.mbxStatus, vport->port_state); + } + mempool_free(mboxq, phba->mbox_mem_pool); + return; +} + +/** + * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi. + * @phba: Pointer to hba context object. + * @mboxq: Pointer to mailbox object. + * + * This function frees memory associated with the mailbox command. + */ +static void +lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct lpfc_vport *vport = mboxq->vport; + + if (mboxq->u.mb.mbxStatus) { + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, + "2550 UNREG_FCFI mbxStatus error x%x " + "HBA state x%x\n", + mboxq->u.mb.mbxStatus, vport->port_state); + } + mempool_free(mboxq, phba->mbox_mem_pool); + return; +} + +/** + * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected. + * @phba: Pointer to hba context object. + * + * This function check if there are any connected remote port for the FCF and + * if all the devices are disconnected, this function unregister FCFI. + * This function also tries to use another FCF for discovery. + */ +void +lpfc_unregister_unused_fcf(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mbox; + int rc; + struct lpfc_vport **vports; + int i; + + spin_lock_irq(&phba->hbalock); + /* + * If HBA is not running in FIP mode or + * If HBA does not support FCoE or + * If FCF is not registered. + * do nothing. + */ + if (!(phba->hba_flag & HBA_FCOE_SUPPORT) || + !(phba->fcf.fcf_flag & FCF_REGISTERED) || + (phba->cfg_enable_fip == 0)) { + spin_unlock_irq(&phba->hbalock); + return; + } + spin_unlock_irq(&phba->hbalock); + + if (lpfc_fcf_inuse(phba)) + return; + + + /* Unregister VPIs */ + vports = lpfc_create_vport_work_array(phba); + if (vports && + (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + lpfc_mbx_unreg_vpi(vports[i]); + vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; + } + lpfc_destroy_vport_work_array(phba, vports); + + /* Unregister VFI */ + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, + "2556 UNREG_VFI mbox allocation failed" + "HBA state x%x\n", + phba->pport->port_state); + return; + } + + lpfc_unreg_vfi(mbox, phba->pport->vfi); + mbox->vport = phba->pport; + mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl; + + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, + "2557 UNREG_VFI issue mbox failed rc x%x " + "HBA state x%x\n", + rc, phba->pport->port_state); + mempool_free(mbox, phba->mbox_mem_pool); + return; + } + + /* Unregister FCF */ + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, + "2551 UNREG_FCFI mbox allocation failed" + "HBA state x%x\n", + phba->pport->port_state); + return; + } + + lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); + mbox->vport = phba->pport; + mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + + if (rc == MBX_NOT_FINISHED) { + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, + "2552 UNREG_FCFI issue mbox failed rc x%x " + "HBA state x%x\n", + rc, phba->pport->port_state); + mempool_free(mbox, phba->mbox_mem_pool); + return; + } + + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_REGISTERED | + FCF_DISCOVERED | FCF_BOOT_ENABLE | FCF_IN_USE | + FCF_VALID_VLAN); + spin_unlock_irq(&phba->hbalock); + + /* + * If driver is not unloading, check if there is any other + * FCF record that can be used for discovery. + */ + if ((phba->pport->load_flag & FC_UNLOADING) || + (phba->link_state < LPFC_LINK_UP)) + return; + + rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); + + if (rc) + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, + "2553 lpfc_unregister_unused_fcf failed to read FCF" + " record HBA state x%x\n", + phba->pport->port_state); +} + +/** + * lpfc_read_fcf_conn_tbl - Create driver FCF connection table. + * @phba: Pointer to hba context object. + * @buff: Buffer containing the FCF connection table as in the config + * region. + * This function create driver data structure for the FCF connection + * record table read from config region 23. + */ +static void +lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba, + uint8_t *buff) +{ + struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; + struct lpfc_fcf_conn_hdr *conn_hdr; + struct lpfc_fcf_conn_rec *conn_rec; + uint32_t record_count; + int i; + + /* Free the current connect table */ + list_for_each_entry_safe(conn_entry, next_conn_entry, + &phba->fcf_conn_rec_list, list) + kfree(conn_entry); + + conn_hdr = (struct lpfc_fcf_conn_hdr *) buff; + record_count = conn_hdr->length * sizeof(uint32_t)/ + sizeof(struct lpfc_fcf_conn_rec); + + conn_rec = (struct lpfc_fcf_conn_rec *) + (buff + sizeof(struct lpfc_fcf_conn_hdr)); + + for (i = 0; i < record_count; i++) { + if (!(conn_rec[i].flags & FCFCNCT_VALID)) + continue; + conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry), + GFP_KERNEL); + if (!conn_entry) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2566 Failed to allocate connection" + " table entry\n"); + return; + } + + memcpy(&conn_entry->conn_rec, &conn_rec[i], + sizeof(struct lpfc_fcf_conn_rec)); + conn_entry->conn_rec.vlan_tag = + le16_to_cpu(conn_entry->conn_rec.vlan_tag) & 0xFFF; + conn_entry->conn_rec.flags = + le16_to_cpu(conn_entry->conn_rec.flags); + list_add_tail(&conn_entry->list, + &phba->fcf_conn_rec_list); + } +} + +/** + * lpfc_read_fcoe_param - Read FCoe parameters from conf region.. + * @phba: Pointer to hba context object. + * @buff: Buffer containing the FCoE parameter data structure. + * + * This function update driver data structure with config + * parameters read from config region 23. + */ +static void +lpfc_read_fcoe_param(struct lpfc_hba *phba, + uint8_t *buff) +{ + struct lpfc_fip_param_hdr *fcoe_param_hdr; + struct lpfc_fcoe_params *fcoe_param; + + fcoe_param_hdr = (struct lpfc_fip_param_hdr *) + buff; + fcoe_param = (struct lpfc_fcoe_params *) + buff + sizeof(struct lpfc_fip_param_hdr); + + if ((fcoe_param_hdr->parm_version != FIPP_VERSION) || + (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) + return; + + if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) == + FIPP_MODE_ON) + phba->cfg_enable_fip = 1; + + if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) == + FIPP_MODE_OFF) + phba->cfg_enable_fip = 0; + + if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) { + phba->valid_vlan = 1; + phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) & + 0xFFF; + } + + phba->fc_map[0] = fcoe_param->fc_map[0]; + phba->fc_map[1] = fcoe_param->fc_map[1]; + phba->fc_map[2] = fcoe_param->fc_map[2]; + return; +} + +/** + * lpfc_get_rec_conf23 - Get a record type in config region data. + * @buff: Buffer containing config region 23 data. + * @size: Size of the data buffer. + * @rec_type: Record type to be searched. + * + * This function searches config region data to find the begining + * of the record specified by record_type. If record found, this + * function return pointer to the record else return NULL. + */ +static uint8_t * +lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type) +{ + uint32_t offset = 0, rec_length; + + if ((buff[0] == LPFC_REGION23_LAST_REC) || + (size < sizeof(uint32_t))) + return NULL; + + rec_length = buff[offset + 1]; + + /* + * One TLV record has one word header and number of data words + * specified in the rec_length field of the record header. + */ + while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t)) + <= size) { + if (buff[offset] == rec_type) + return &buff[offset]; + + if (buff[offset] == LPFC_REGION23_LAST_REC) + return NULL; + + offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t); + rec_length = buff[offset + 1]; + } + return NULL; +} + +/** + * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23. + * @phba: Pointer to lpfc_hba data structure. + * @buff: Buffer containing config region 23 data. + * @size: Size of the data buffer. + * + * This fuction parse the FCoE config parameters in config region 23 and + * populate driver data structure with the parameters. + */ +void +lpfc_parse_fcoe_conf(struct lpfc_hba *phba, + uint8_t *buff, + uint32_t size) +{ + uint32_t offset = 0, rec_length; + uint8_t *rec_ptr; + + /* + * If data size is less than 2 words signature and version cannot be + * verified. + */ + if (size < 2*sizeof(uint32_t)) + return; + + /* Check the region signature first */ + if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2567 Config region 23 has bad signature\n"); + return; + } + + offset += 4; + + /* Check the data structure version */ + if (buff[offset] != LPFC_REGION23_VERSION) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2568 Config region 23 has bad version\n"); + return; + } + offset += 4; + + rec_length = buff[offset + 1]; + + /* Read FCoE param record */ + rec_ptr = lpfc_get_rec_conf23(&buff[offset], + size - offset, FCOE_PARAM_TYPE); + if (rec_ptr) + lpfc_read_fcoe_param(phba, rec_ptr); + + /* Read FCF connection table */ + rec_ptr = lpfc_get_rec_conf23(&buff[offset], + size - offset, FCOE_CONN_TBL_TYPE); + if (rec_ptr) + lpfc_read_fcf_conn_tbl(phba, rec_ptr); + +} diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 6efe459e8ddf..2c7eba686262 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -363,7 +363,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (!mbox) goto out; - rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID, + rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID, (uint8_t *) sp, mbox, 0); if (rc) { mempool_free(mbox, phba->mbox_mem_pool); @@ -497,11 +497,19 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); else lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); + if ((ndlp->nlp_type & NLP_FABRIC) && + vport->port_type == LPFC_NPIV_PORT) { + lpfc_linkdown_port(vport); + mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag |= NLP_DELAY_TMO; + spin_unlock_irq(shost->host_lock); - if ((!(ndlp->nlp_type & NLP_FABRIC) && - ((ndlp->nlp_type & NLP_FCP_TARGET) || - !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || - (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { + ndlp->nlp_last_elscmd = ELS_CMD_FDISC; + } else if ((!(ndlp->nlp_type & NLP_FABRIC) && + ((ndlp->nlp_type & NLP_FCP_TARGET) || + !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || + (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { /* Only try to re-login if this is NOT a Fabric Node */ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); spin_lock_irq(shost->host_lock); @@ -569,7 +577,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - if (!ndlp->nlp_rpi) { + if (!(ndlp->nlp_flag & NLP_RPI_VALID)) { ndlp->nlp_flag &= ~NLP_NPR_ADISC; return 0; } @@ -859,7 +867,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, lpfc_unreg_rpi(vport, ndlp); - if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID, + if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID, (uint8_t *) sp, mbox, 0) == 0) { switch (ndlp->nlp_DID) { case NameServer_DID: @@ -1070,6 +1078,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, *rspiocb; IOCB_t *irsp; ADISC *ap; + int rc; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->context_un.rsp_iocb; @@ -1095,6 +1104,15 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, return ndlp->nlp_state; } + if (phba->sli_rev == LPFC_SLI_REV4) { + rc = lpfc_sli4_resume_rpi(ndlp); + if (rc) { + /* Stay in state and retry. */ + ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; + return ndlp->nlp_state; + } + } + if (ndlp->nlp_type & NLP_FCP_TARGET) { ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); @@ -1102,6 +1120,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); } + return ndlp->nlp_state; } @@ -1285,6 +1304,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, } ndlp->nlp_rpi = mb->un.varWords[0]; + ndlp->nlp_flag |= NLP_RPI_VALID; /* Only if we are not a fabric nport do we issue PRLI */ if (!(ndlp->nlp_type & NLP_FABRIC)) { diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index b53af9936282..9645d32e6f87 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -10929,3 +10929,520 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba) }; return 0; } + +/** + * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to post rpi header templates to the + * HBA consistent with the SLI-4 interface spec. This routine + * posts a PAGE_SIZE memory region to the port to hold up to + * PAGE_SIZE modulo 64 rpi context headers. + * + * This routine does not require any locks. It's usage is expected + * to be driver load or reset recovery when the driver is + * sequential. + * + * Return codes + * 0 - sucessful + * EIO - The mailbox failed to complete successfully. + * When this error occurs, the driver is not guaranteed + * to have any rpi regions posted to the device and + * must either attempt to repost the regions or take a + * fatal error. + **/ +int +lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) +{ + struct lpfc_rpi_hdr *rpi_page; + uint32_t rc = 0; + + /* Post all rpi memory regions to the port. */ + list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { + rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2008 Error %d posting all rpi " + "headers\n", rc); + rc = -EIO; + break; + } + } + + return rc; +} + +/** + * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port + * @phba: pointer to lpfc hba data structure. + * @rpi_page: pointer to the rpi memory region. + * + * This routine is invoked to post a single rpi header to the + * HBA consistent with the SLI-4 interface spec. This memory region + * maps up to 64 rpi context regions. + * + * Return codes + * 0 - sucessful + * ENOMEM - No available memory + * EIO - The mailbox failed to complete successfully. + **/ +int +lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) +{ + LPFC_MBOXQ_t *mboxq; + struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; + uint32_t rc = 0; + uint32_t mbox_tmo; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + /* The port is notified of the header region via a mailbox command. */ + mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2001 Unable to allocate memory for issuing " + "SLI_CONFIG_SPECIAL mailbox command\n"); + return -ENOMEM; + } + + /* Post all rpi memory regions to the port. */ + hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; + mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, + sizeof(struct lpfc_mbx_post_hdr_tmpl) - + sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED); + bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, + hdr_tmpl, rpi_page->page_count); + bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, + rpi_page->start_rpi); + hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); + hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + else + rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); + shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (rc != MBX_TIMEOUT) + mempool_free(mboxq, phba->mbox_mem_pool); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2514 POST_RPI_HDR mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + rc = -ENXIO; + } + return rc; +} + +/** + * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to post rpi header templates to the + * HBA consistent with the SLI-4 interface spec. This routine + * posts a PAGE_SIZE memory region to the port to hold up to + * PAGE_SIZE modulo 64 rpi context headers. + * + * Returns + * A nonzero rpi defined as rpi_base <= rpi < max_rpi if sucessful + * LPFC_RPI_ALLOC_ERROR if no rpis are available. + **/ +int +lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) +{ + int rpi; + uint16_t max_rpi, rpi_base, rpi_limit; + uint16_t rpi_remaining; + struct lpfc_rpi_hdr *rpi_hdr; + + max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; + rpi_base = phba->sli4_hba.max_cfg_param.rpi_base; + rpi_limit = phba->sli4_hba.next_rpi; + + /* + * The valid rpi range is not guaranteed to be zero-based. Start + * the search at the rpi_base as reported by the port. + */ + spin_lock_irq(&phba->hbalock); + rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base); + if (rpi >= rpi_limit || rpi < rpi_base) + rpi = LPFC_RPI_ALLOC_ERROR; + else { + set_bit(rpi, phba->sli4_hba.rpi_bmask); + phba->sli4_hba.max_cfg_param.rpi_used++; + phba->sli4_hba.rpi_count++; + } + + /* + * Don't try to allocate more rpi header regions if the device limit + * on available rpis max has been exhausted. + */ + if ((rpi == LPFC_RPI_ALLOC_ERROR) && + (phba->sli4_hba.rpi_count >= max_rpi)) { + spin_unlock_irq(&phba->hbalock); + return rpi; + } + + /* + * If the driver is running low on rpi resources, allocate another + * page now. Note that the next_rpi value is used because + * it represents how many are actually in use whereas max_rpi notes + * how many are supported max by the device. + */ + rpi_remaining = phba->sli4_hba.next_rpi - rpi_base - + phba->sli4_hba.rpi_count; + spin_unlock_irq(&phba->hbalock); + if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { + rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); + if (!rpi_hdr) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2002 Error Could not grow rpi " + "count\n"); + } else { + lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); + } + } + + return rpi; +} + +/** + * lpfc_sli4_free_rpi - Release an rpi for reuse. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to release an rpi to the pool of + * available rpis maintained by the driver. + **/ +void +lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) +{ + spin_lock_irq(&phba->hbalock); + clear_bit(rpi, phba->sli4_hba.rpi_bmask); + phba->sli4_hba.rpi_count--; + phba->sli4_hba.max_cfg_param.rpi_used--; + spin_unlock_irq(&phba->hbalock); +} + +/** + * lpfc_sli4_remove_rpis - Remove the rpi bitmask region + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to remove the memory region that + * provided rpi via a bitmask. + **/ +void +lpfc_sli4_remove_rpis(struct lpfc_hba *phba) +{ + kfree(phba->sli4_hba.rpi_bmask); +} + +/** + * lpfc_sli4_resume_rpi - Remove the rpi bitmask region + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to remove the memory region that + * provided rpi via a bitmask. + **/ +int +lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp) +{ + LPFC_MBOXQ_t *mboxq; + struct lpfc_hba *phba = ndlp->phba; + int rc; + + /* The port is notified of the header region via a mailbox command. */ + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + return -ENOMEM; + + /* Post all rpi memory regions to the port. */ + lpfc_resume_rpi(mboxq, ndlp); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2010 Resume RPI Mailbox failed " + "status %d, mbxStatus x%x\n", rc, + bf_get(lpfc_mqe_status, &mboxq->u.mqe)); + mempool_free(mboxq, phba->mbox_mem_pool); + return -EIO; + } + return 0; +} + +/** + * lpfc_sli4_init_vpi - Initialize a vpi with the port + * @phba: pointer to lpfc hba data structure. + * @vpi: vpi value to activate with the port. + * + * This routine is invoked to activate a vpi with the + * port when the host intends to use vports with a + * nonzero vpi. + * + * Returns: + * 0 success + * -Evalue otherwise + **/ +int +lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi) +{ + LPFC_MBOXQ_t *mboxq; + int rc = 0; + uint32_t mbox_tmo; + + if (vpi == 0) + return -EINVAL; + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + return -ENOMEM; + lpfc_init_vpi(mboxq, vpi); + mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI); + rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); + if (rc != MBX_TIMEOUT) + mempool_free(mboxq, phba->mbox_mem_pool); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2022 INIT VPI Mailbox failed " + "status %d, mbxStatus x%x\n", rc, + bf_get(lpfc_mqe_status, &mboxq->u.mqe)); + rc = -EIO; + } + return rc; +} + +/** + * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. + * @phba: pointer to lpfc hba data structure. + * @mboxq: Pointer to mailbox object. + * + * This routine is invoked to manually add a single FCF record. The caller + * must pass a completely initialized FCF_Record. This routine takes + * care of the nonembedded mailbox operations. + **/ +static void +lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + void *virt_addr; + union lpfc_sli4_cfg_shdr *shdr; + uint32_t shdr_status, shdr_add_status; + + virt_addr = mboxq->sge_array->addr[0]; + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + + if ((shdr_status || shdr_add_status) && + (shdr_status != STATUS_FCF_IN_USE)) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2558 ADD_FCF_RECORD mailbox failed with " + "status x%x add_status x%x\n", + shdr_status, shdr_add_status); + + lpfc_sli4_mbox_cmd_free(phba, mboxq); +} + +/** + * lpfc_sli4_add_fcf_record - Manually add an FCF Record. + * @phba: pointer to lpfc hba data structure. + * @fcf_record: pointer to the initialized fcf record to add. + * + * This routine is invoked to manually add a single FCF record. The caller + * must pass a completely initialized FCF_Record. This routine takes + * care of the nonembedded mailbox operations. + **/ +int +lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) +{ + int rc = 0; + LPFC_MBOXQ_t *mboxq; + uint8_t *bytep; + void *virt_addr; + dma_addr_t phys_addr; + struct lpfc_mbx_sge sge; + uint32_t alloc_len, req_len; + uint32_t fcfindex; + + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2009 Failed to allocate mbox for ADD_FCF cmd\n"); + return -ENOMEM; + } + + req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + + sizeof(uint32_t); + + /* Allocate DMA memory and set up the non-embedded mailbox command */ + alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_ADD_FCF, + req_len, LPFC_SLI4_MBX_NEMBED); + if (alloc_len < req_len) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2523 Allocated DMA memory size (x%x) is " + "less than the requested DMA memory " + "size (x%x)\n", alloc_len, req_len); + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return -ENOMEM; + } + + /* + * Get the first SGE entry from the non-embedded DMA memory. This + * routine only uses a single SGE. + */ + lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); + phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); + if (unlikely(!mboxq->sge_array)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "2526 Failed to get the non-embedded SGE " + "virtual address\n"); + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return -ENOMEM; + } + virt_addr = mboxq->sge_array->addr[0]; + /* + * Configure the FCF record for FCFI 0. This is the driver's + * hardcoded default and gets used in nonFIP mode. + */ + fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); + bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); + lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); + + /* + * Copy the fcf_index and the FCF Record Data. The data starts after + * the FCoE header plus word10. The data copy needs to be endian + * correct. + */ + bytep += sizeof(uint32_t); + lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); + mboxq->vport = phba->pport; + mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2515 ADD_FCF_RECORD mailbox failed with " + "status 0x%x\n", rc); + lpfc_sli4_mbox_cmd_free(phba, mboxq); + rc = -EIO; + } else + rc = 0; + + return rc; +} + +/** + * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. + * @phba: pointer to lpfc hba data structure. + * @fcf_record: pointer to the fcf record to write the default data. + * @fcf_index: FCF table entry index. + * + * This routine is invoked to build the driver's default FCF record. The + * values used are hardcoded. This routine handles memory initialization. + * + **/ +void +lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, + struct fcf_record *fcf_record, + uint16_t fcf_index) +{ + memset(fcf_record, 0, sizeof(struct fcf_record)); + fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; + fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; + fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; + bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); + bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); + bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); + bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); + bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); + bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); + bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); + bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); + bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); + bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); + bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); + bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, + LPFC_FCF_FPMA | LPFC_FCF_SPMA); + /* Set the VLAN bit map */ + if (phba->valid_vlan) { + fcf_record->vlan_bitmap[phba->vlan_id / 8] + = 1 << (phba->vlan_id % 8); + } +} + +/** + * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record. + * @phba: pointer to lpfc hba data structure. + * @fcf_index: FCF table entry offset. + * + * This routine is invoked to read up to @fcf_num of FCF record from the + * device starting with the given @fcf_index. + **/ +int +lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) +{ + int rc = 0, error; + LPFC_MBOXQ_t *mboxq; + void *virt_addr; + dma_addr_t phys_addr; + uint8_t *bytep; + struct lpfc_mbx_sge sge; + uint32_t alloc_len, req_len; + struct lpfc_mbx_read_fcf_tbl *read_fcf; + + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2000 Failed to allocate mbox for " + "READ_FCF cmd\n"); + return -ENOMEM; + } + + req_len = sizeof(struct fcf_record) + + sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t); + + /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */ + alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len, + LPFC_SLI4_MBX_NEMBED); + + if (alloc_len < req_len) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0291 Allocated DMA memory size (x%x) is " + "less than the requested DMA memory " + "size (x%x)\n", alloc_len, req_len); + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return -ENOMEM; + } + + /* Get the first SGE entry from the non-embedded DMA memory. This + * routine only uses a single SGE. + */ + lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); + phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); + if (unlikely(!mboxq->sge_array)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "2527 Failed to get the non-embedded SGE " + "virtual address\n"); + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return -ENOMEM; + } + virt_addr = mboxq->sge_array->addr[0]; + read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; + + /* Set up command fields */ + bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index); + /* Perform necessary endian conversion */ + bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); + lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t)); + mboxq->vport = phba->pport; + mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + lpfc_sli4_mbox_cmd_free(phba, mboxq); + error = -EIO; + } else + error = 0; + return error; +} -- cgit v1.2.3 From d8e93df13c8f7bde45a7756944aab528c58df4cf Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 22 May 2009 14:53:05 -0400 Subject: [SCSI] lpfc 8.3.2 : Update of copyrights Update of copyrights on modified files Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc.h | 2 +- drivers/scsi/lpfc/lpfc_attr.c | 2 +- drivers/scsi/lpfc/lpfc_ct.c | 2 +- drivers/scsi/lpfc/lpfc_debugfs.c | 2 +- drivers/scsi/lpfc/lpfc_els.c | 2 +- drivers/scsi/lpfc/lpfc_hbadisc.c | 2 +- drivers/scsi/lpfc/lpfc_hw.h | 2 +- drivers/scsi/lpfc/lpfc_init.c | 2 +- drivers/scsi/lpfc/lpfc_logmsg.h | 2 +- drivers/scsi/lpfc/lpfc_mbox.c | 2 +- drivers/scsi/lpfc/lpfc_mem.c | 2 +- drivers/scsi/lpfc/lpfc_nportdisc.c | 2 +- drivers/scsi/lpfc/lpfc_scsi.c | 2 +- drivers/scsi/lpfc/lpfc_sli.c | 2 +- 14 files changed, 14 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 13ac108a244c..540569849099 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 270a4c6cd3ac..f032d8b7bb4b 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 51990787796f..38c18518a5e2 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 42ef258c7d52..2b02b1fb39a0 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2007-2008 Emulex. All rights reserved. * + * Copyright (C) 2007-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index a3b56d7f72f4..6bdeb14878a2 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 9bd7a8927a34..d507a581b999 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index a9d64cfbe5cc..02aa016b93e9 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index ff821bb77167..8c30f5707f95 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index 1aa85709b012..db1ba22b8016 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 6aeb1c668e22..29fe5c17f4e8 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 516f4802f84e..e198c917c13e 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 2c7eba686262..09f659f77bb3 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 9af2db355bc6..1a7659c8f388 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 9645d32e6f87..a53c267a067b 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * -- cgit v1.2.3 From f4b4c68f74dcd5da03df851090cad28ad4e8d7cc Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 22 May 2009 14:53:12 -0400 Subject: [SCSI] lpfc 8.3.2 : Miscellaneous Changes Miscellaneous Changes: - Convert from SLI2_ACTIVE flag to more correct SLI_ACTIVE (generic) flag - Reposition log verbose messaging definitions - Update naming for vpi object name from vport slang name - Handle deferred error attention condition - Add 10G link support - Small bug fixup Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_attr.c | 55 ++++++++++++++++++++++------------------ drivers/scsi/lpfc/lpfc_ct.c | 7 +++-- drivers/scsi/lpfc/lpfc_disc.h | 1 + drivers/scsi/lpfc/lpfc_hbadisc.c | 3 +++ drivers/scsi/lpfc/lpfc_init.c | 20 ++++++++++++--- drivers/scsi/lpfc/lpfc_logmsg.h | 52 ++++++++++++++++++++----------------- drivers/scsi/lpfc/lpfc_sli.c | 8 +++--- drivers/scsi/lpfc/lpfc_sli.h | 2 +- drivers/scsi/lpfc/lpfc_vport.c | 2 +- 9 files changed, 91 insertions(+), 59 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index f032d8b7bb4b..46e032aa0bea 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -805,7 +805,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba, */ if (phba->link_state < LPFC_LINK_DOWN || !phba->mbox_mem_pool || - (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0) + (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0) return 0; if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) @@ -822,7 +822,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba, pmboxq->context1 = NULL; if ((phba->pport->fc_flag & FC_OFFLINE_MODE) || - (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) + (!(psli->sli_flag & LPFC_SLI_ACTIVE))) rc = MBX_NOT_FINISHED; else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); @@ -2045,22 +2045,9 @@ static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR, # lpfc_log_verbose: Only turn this flag on if you are willing to risk being # deluged with LOTS of information. # You can set a bit mask to record specific types of verbose messages: -# -# LOG_ELS 0x1 ELS events -# LOG_DISCOVERY 0x2 Link discovery events -# LOG_MBOX 0x4 Mailbox events -# LOG_INIT 0x8 Initialization events -# LOG_LINK_EVENT 0x10 Link events -# LOG_FCP 0x40 FCP traffic history -# LOG_NODE 0x80 Node table events -# LOG_BG 0x200 BlockBuard events -# LOG_MISC 0x400 Miscellaneous events -# LOG_SLI 0x800 SLI events -# LOG_FCP_ERROR 0x1000 Only log FCP errors -# LOG_LIBDFC 0x2000 LIBDFC events -# LOG_ALL_MSG 0xffff LOG all messages +# See lpfc_logmsh.h for definitions. */ -LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffff, +LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff, "Verbose logging bit-mask"); /* @@ -2365,7 +2352,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr, if (vports == NULL) return -ENOMEM; - for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { v_shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(v_shost->host_lock); /* Block and reset data collection */ @@ -2380,7 +2367,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr, phba->bucket_base = base; phba->bucket_step = step; - for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { v_shost = lpfc_shost_from_vport(vports[i]); /* Unblock data collection */ @@ -2397,7 +2384,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr, if (vports == NULL) return -ENOMEM; - for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { v_shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); vports[i]->stat_data_blocked = 1; @@ -3418,7 +3405,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, } if ((vport->fc_flag & FC_OFFLINE_MODE) || - (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){ + (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) { spin_unlock_irq(&phba->hbalock); rc = lpfc_sli_issue_mbox (phba, @@ -3646,6 +3633,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost) case LA_8GHZ_LINK: fc_host_speed(shost) = FC_PORTSPEED_8GBIT; break; + case LA_10GHZ_LINK: + fc_host_speed(shost) = FC_PORTSPEED_10GBIT; + break; default: fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; @@ -3713,7 +3703,7 @@ lpfc_get_stats(struct Scsi_Host *shost) */ if (phba->link_state < LPFC_LINK_DOWN || !phba->mbox_mem_pool || - (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0) + (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0) return NULL; if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) @@ -3756,7 +3746,7 @@ lpfc_get_stats(struct Scsi_Host *shost) pmboxq->vport = vport; if ((vport->fc_flag & FC_OFFLINE_MODE) || - (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) + (!(psli->sli_flag & LPFC_SLI_ACTIVE))) rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); @@ -3838,7 +3828,7 @@ lpfc_reset_stats(struct Scsi_Host *shost) pmboxq->vport = vport; if ((vport->fc_flag & FC_OFFLINE_MODE) || - (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) + (!(psli->sli_flag & LPFC_SLI_ACTIVE))) rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); @@ -3856,7 +3846,7 @@ lpfc_reset_stats(struct Scsi_Host *shost) pmboxq->vport = vport; if ((vport->fc_flag & FC_OFFLINE_MODE) || - (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) + (!(psli->sli_flag & LPFC_SLI_ACTIVE))) rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); @@ -4023,6 +4013,21 @@ lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport) lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); } +/** + * lpfc_hba_log_verbose_init - Set hba's log verbose level + * @phba: Pointer to lpfc_hba struct. + * + * This function is called by the lpfc_get_cfgparam() routine to set the + * module lpfc_log_verbose into the @phba cfg_log_verbose for use with + * log messsage according to the module's lpfc_log_verbose parameter setting + * before hba port or vport created. + **/ +static void +lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose) +{ + phba->cfg_log_verbose = verbose; +} + struct fc_function_template lpfc_transport_functions = { /* fixed attributes the driver supports */ .show_host_node_name = 1, diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 38c18518a5e2..1dbccfd3d022 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1578,6 +1578,9 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode) case LA_8GHZ_LINK: ae->un.PortSpeed = HBA_PORTSPEED_8GBIT; break; + case LA_10GHZ_LINK: + ae->un.PortSpeed = HBA_PORTSPEED_10GBIT; + break; default: ae->un.PortSpeed = HBA_PORTSPEED_UNKNOWN; @@ -1730,7 +1733,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag) uint8_t *fwname; if (vp->rev.rBit) { - if (psli->sli_flag & LPFC_SLI2_ACTIVE) + if (psli->sli_flag & LPFC_SLI_ACTIVE) rev = vp->rev.sli2FwRev; else rev = vp->rev.sli1FwRev; @@ -1756,7 +1759,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag) } b4 = (rev & 0x0000000f); - if (psli->sli_flag & LPFC_SLI2_ACTIVE) + if (psli->sli_flag & LPFC_SLI_ACTIVE) fwname = vp->rev.sli2FwName; else fwname = vp->rev.sli1FwName; diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index ffd108972072..1142070e9484 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -135,6 +135,7 @@ struct lpfc_nodelist { #define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ #define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ #define NLP_SC_REQ 0x20000000 /* Target requires authentication */ +#define NLP_RPI_VALID 0x80000000 /* nlp_rpi is valid */ /* ndlp usage management macros */ #define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \ diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index d507a581b999..126323a4dcec 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1647,6 +1647,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) case LA_8GHZ_LINK: phba->fc_linkspeed = LA_8GHZ_LINK; break; + case LA_10GHZ_LINK: + phba->fc_linkspeed = LA_10GHZ_LINK; + break; default: phba->fc_linkspeed = LA_UNKNW_LINK; break; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 8c30f5707f95..65cd3fe62200 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -906,7 +906,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) "taking this port offline.\n"); spin_lock_irq(&phba->hbalock); - psli->sli_flag &= ~LPFC_SLI2_ACTIVE; + psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); lpfc_offline_prep(phba); @@ -931,13 +931,15 @@ lpfc_offline_eratt(struct lpfc_hba *phba) struct lpfc_sli *psli = &phba->sli; spin_lock_irq(&phba->hbalock); - psli->sli_flag &= ~LPFC_SLI2_ACTIVE; + psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); lpfc_offline_prep(phba); lpfc_offline(phba); lpfc_reset_barrier(phba); + spin_lock_irq(&phba->hbalock); lpfc_sli_brdreset(phba); + spin_unlock_irq(&phba->hbalock); lpfc_hba_down_post(phba); lpfc_sli_brdready(phba, HS_MBRDY); lpfc_unblock_mgmt_io(phba); @@ -980,6 +982,16 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) struct lpfc_sli_ring *pring; struct lpfc_sli *psli = &phba->sli; + /* If the pci channel is offline, ignore possible errors, + * since we cannot communicate with the pci card anyway. + */ + if (pci_channel_offline(phba->pcidev)) { + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~DEFER_ERATT; + spin_unlock_irq(&phba->hbalock); + return; + } + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0479 Deferred Adapter Hardware Error " "Data: x%x x%x x%x\n", @@ -987,7 +999,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) phba->work_status[0], phba->work_status[1]); spin_lock_irq(&phba->hbalock); - psli->sli_flag &= ~LPFC_SLI2_ACTIVE; + psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); @@ -1097,7 +1109,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) phba->work_status[0], phba->work_status[1]); spin_lock_irq(&phba->hbalock); - psli->sli_flag &= ~LPFC_SLI2_ACTIVE; + psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); /* diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index db1ba22b8016..954ba57970a3 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -18,33 +18,39 @@ * included with this package. * *******************************************************************/ -#define LOG_ELS 0x1 /* ELS events */ -#define LOG_DISCOVERY 0x2 /* Link discovery events */ -#define LOG_MBOX 0x4 /* Mailbox events */ -#define LOG_INIT 0x8 /* Initialization events */ -#define LOG_LINK_EVENT 0x10 /* Link events */ -#define LOG_IP 0x20 /* IP traffic history */ -#define LOG_FCP 0x40 /* FCP traffic history */ -#define LOG_NODE 0x80 /* Node table events */ -#define LOG_TEMP 0x100 /* Temperature sensor events */ -#define LOG_BG 0x200 /* BlockGuard events */ -#define LOG_MISC 0x400 /* Miscellaneous events */ -#define LOG_SLI 0x800 /* SLI events */ -#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */ -#define LOG_LIBDFC 0x2000 /* Libdfc events */ -#define LOG_VPORT 0x4000 /* NPIV events */ -#define LOG_ALL_MSG 0xffff /* LOG all messages */ +#define LOG_ELS 0x00000001 /* ELS events */ +#define LOG_DISCOVERY 0x00000002 /* Link discovery events */ +#define LOG_MBOX 0x00000004 /* Mailbox events */ +#define LOG_INIT 0x00000008 /* Initialization events */ +#define LOG_LINK_EVENT 0x00000010 /* Link events */ +#define LOG_IP 0x00000020 /* IP traffic history */ +#define LOG_FCP 0x00000040 /* FCP traffic history */ +#define LOG_NODE 0x00000080 /* Node table events */ +#define LOG_TEMP 0x00000100 /* Temperature sensor events */ +#define LOG_BG 0x00000200 /* BlockGuard events */ +#define LOG_MISC 0x00000400 /* Miscellaneous events */ +#define LOG_SLI 0x00000800 /* SLI events */ +#define LOG_FCP_ERROR 0x00001000 /* log errors, not underruns */ +#define LOG_LIBDFC 0x00002000 /* Libdfc events */ +#define LOG_VPORT 0x00004000 /* NPIV events */ +#define LOF_SECURITY 0x00008000 /* Security events */ +#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ +#define LOG_ALL_MSG 0xffffffff /* LOG all messages */ #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ - do { \ - { if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \ +do { \ + { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \ dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \ - } while (0) +} while (0) #define lpfc_printf_log(phba, level, mask, fmt, arg...) \ - do { \ - { if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \ +do { \ + { uint32_t log_verbose = (phba)->pport ? \ + (phba)->pport->cfg_log_verbose : \ + (phba)->cfg_log_verbose; \ + if (((mask) & log_verbose) || (level[1] <= '3')) \ dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ - fmt, phba->brd_no, ##arg); } \ - } while (0) + fmt, phba->brd_no, ##arg); \ + } \ +} while (0) diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index a53c267a067b..ff04daf18f48 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -3272,7 +3272,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) mdelay(1); if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { - if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE || + if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || phba->pport->stopped) goto restore_hc; else @@ -3353,7 +3353,9 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) return 1; } - psli->sli_flag &= ~LPFC_SLI2_ACTIVE; + spin_lock_irq(&phba->hbalock); + psli->sli_flag &= ~LPFC_SLI_ACTIVE; + spin_unlock_irq(&phba->hbalock); mempool_free(pmb, phba->mbox_mem_pool); @@ -4643,7 +4645,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) spin_unlock_irq(&phba->pport->work_port_lock); spin_lock_irq(&phba->hbalock); phba->link_state = LPFC_LINK_UNKNOWN; - psli->sli_flag &= ~LPFC_SLI2_ACTIVE; + psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); pring = &psli->ring[psli->fcp_ring]; diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index e6c88ee8ee96..7d37eb7459bf 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -244,7 +244,7 @@ struct lpfc_sli { /* Additional sli_flags */ #define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */ -#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */ +#define LPFC_SLI_ACTIVE 0x200 /* SLI in firmware is active */ #define LPFC_PROCESS_LA 0x400 /* Able to process link attention */ #define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ #define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */ diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 59e67f7ee531..a415ec0b9a86 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -121,7 +121,7 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport) if (!pmb) { return -ENOMEM; } - mb = &pmb->mb; + mb = &pmb->u.mb; lpfc_read_sparam(phba, pmb, vport->vpi); /* -- cgit v1.2.3 From 21e9a0a5fbd2b7cb3ae29f6d491a30bc0e688422 Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 22 May 2009 14:53:21 -0400 Subject: [SCSI] lpfc 8.3.2 : Persistent Vport Support Add support for persistent vport definitions at creation at boot time Also includes a few misc fixes for: - conversion to vpi name from vport slang name - couple of small mailbox references - some additional discovery mods Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_attr.c | 31 +++++++ drivers/scsi/lpfc/lpfc_hbadisc.c | 180 ++++++++++++++++++++++++++++++--------- drivers/scsi/lpfc/lpfc_init.c | 1 + drivers/scsi/lpfc/lpfc_mbox.c | 38 +++++++++ drivers/scsi/lpfc/lpfc_scsi.c | 6 +- drivers/scsi/lpfc/lpfc_vport.c | 37 ++++---- 6 files changed, 236 insertions(+), 57 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 46e032aa0bea..d73e677201f8 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -2277,6 +2277,36 @@ lpfc_param_init(topology, 0, 0, 6) static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR, lpfc_topology_show, lpfc_topology_store); +/** + * lpfc_static_vport_show: Read callback function for + * lpfc_static_vport sysfs file. + * @dev: Pointer to class device object. + * @attr: device attribute structure. + * @buf: Data buffer. + * + * This function is the read call back function for + * lpfc_static_vport sysfs file. The lpfc_static_vport + * sysfs file report the mageability of the vport. + **/ +static ssize_t +lpfc_static_vport_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + if (vport->vport_flag & STATIC_VPORT) + sprintf(buf, "1\n"); + else + sprintf(buf, "0\n"); + + return strlen(buf); +} + +/* + * Sysfs attribute to control the statistical data collection. + */ +static DEVICE_ATTR(lpfc_static_vport, S_IRUGO, + lpfc_static_vport_show, NULL); /** * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file @@ -3051,6 +3081,7 @@ struct device_attribute *lpfc_vport_attrs[] = { &dev_attr_lpfc_enable_da_id, &dev_attr_lpfc_max_scsicmpl_time, &dev_attr_lpfc_stat_data_ctrl, + &dev_attr_lpfc_static_vport, NULL, }; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 126323a4dcec..35c41ae75be2 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -2079,6 +2079,128 @@ out: return; } +/** + * lpfc_create_static_vport - Read HBA config region to create static vports. + * @phba: pointer to lpfc hba data structure. + * + * This routine issue a DUMP mailbox command for config region 22 to get + * the list of static vports to be created. The function create vports + * based on the information returned from the HBA. + **/ +void +lpfc_create_static_vport(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *pmb = NULL; + MAILBOX_t *mb; + struct static_vport_info *vport_info; + int rc, i; + struct fc_vport_identifiers vport_id; + struct fc_vport *new_fc_vport; + struct Scsi_Host *shost; + struct lpfc_vport *vport; + uint16_t offset = 0; + uint8_t *vport_buff; + + pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0542 lpfc_create_static_vport failed to" + " allocate mailbox memory\n"); + return; + } + + mb = &pmb->u.mb; + + vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL); + if (!vport_info) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0543 lpfc_create_static_vport failed to" + " allocate vport_info\n"); + mempool_free(pmb, phba->mbox_mem_pool); + return; + } + + vport_buff = (uint8_t *) vport_info; + do { + lpfc_dump_static_vport(phba, pmb, offset); + pmb->vport = phba->pport; + rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO); + + if ((rc != MBX_SUCCESS) || mb->mbxStatus) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0544 lpfc_create_static_vport failed to" + " issue dump mailbox command ret 0x%x " + "status 0x%x\n", + rc, mb->mbxStatus); + goto out; + } + + if (mb->un.varDmp.word_cnt > + sizeof(struct static_vport_info) - offset) + mb->un.varDmp.word_cnt = + sizeof(struct static_vport_info) - offset; + + lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, + vport_buff + offset, + mb->un.varDmp.word_cnt); + offset += mb->un.varDmp.word_cnt; + + } while (mb->un.varDmp.word_cnt && + offset < sizeof(struct static_vport_info)); + + + if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) || + ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK) + != VPORT_INFO_REV)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0545 lpfc_create_static_vport bad" + " information header 0x%x 0x%x\n", + le32_to_cpu(vport_info->signature), + le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK); + + goto out; + } + + shost = lpfc_shost_from_vport(phba->pport); + + for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) { + memset(&vport_id, 0, sizeof(vport_id)); + vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn); + vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn); + if (!vport_id.port_name || !vport_id.node_name) + continue; + + vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; + vport_id.vport_type = FC_PORTTYPE_NPIV; + vport_id.disable = false; + new_fc_vport = fc_vport_create(shost, 0, &vport_id); + + if (!new_fc_vport) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0546 lpfc_create_static_vport failed to" + " create vport \n"); + continue; + } + + vport = *(struct lpfc_vport **)new_fc_vport->dd_data; + vport->vport_flag |= STATIC_VPORT; + } + +out: + /* + * If this is timed out command, setting NULL to context2 tell SLI + * layer not to use this buffer. + */ + spin_lock_irq(&phba->hbalock); + pmb->context2 = NULL; + spin_unlock_irq(&phba->hbalock); + kfree(vport_info); + if (rc != MBX_TIMEOUT) + mempool_free(pmb, phba->mbox_mem_pool); + + return; +} + /* * This routine handles processing a Fabric REG_LOGIN mailbox * command upon completion. It is setup in the LPFC_MBOXQ @@ -2089,16 +2211,17 @@ void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); struct lpfc_nodelist *ndlp; - struct lpfc_vport **vports; - int i; ndlp = (struct lpfc_nodelist *) pmb->context2; pmb->context1 = NULL; pmb->context2 = NULL; if (mb->mbxStatus) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, + "0258 Register Fabric login error: 0x%x\n", + mb->mbxStatus); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); @@ -2117,9 +2240,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) } lpfc_vport_set_state(vport, FC_VPORT_FAILED); - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, - "0258 Register Fabric login error: 0x%x\n", - mb->mbxStatus); /* Decrement the reference count to ndlp after the reference * to the ndlp are done. */ @@ -2128,34 +2248,12 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) } ndlp->nlp_rpi = mb->un.varWords[0]; + ndlp->nlp_flag |= NLP_RPI_VALID; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); if (vport->port_state == LPFC_FABRIC_CFG_LINK) { - vports = lpfc_create_vport_work_array(phba); - if (vports != NULL) - for(i = 0; - i <= phba->max_vpi && vports[i] != NULL; - i++) { - if (vports[i]->port_type == LPFC_PHYSICAL_PORT) - continue; - if (phba->fc_topology == TOPOLOGY_LOOP) { - lpfc_vport_set_state(vports[i], - FC_VPORT_LINKDOWN); - continue; - } - if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) - lpfc_initial_fdisc(vports[i]); - else { - lpfc_vport_set_state(vports[i], - FC_VPORT_NO_FABRIC_SUPP); - lpfc_printf_vlog(vport, KERN_ERR, - LOG_ELS, - "0259 No NPIV " - "Fabric support\n"); - } - } - lpfc_destroy_vport_work_array(phba, vports); + lpfc_start_fdiscs(phba); lpfc_do_scr_ns_plogi(phba, vport); } @@ -2179,13 +2277,16 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; struct lpfc_vport *vport = pmb->vport; if (mb->mbxStatus) { out: + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "0260 Register NameServer error: 0x%x\n", + mb->mbxStatus); /* decrement the node reference count held for this * callback function. */ @@ -2209,15 +2310,13 @@ out: return; } lpfc_vport_set_state(vport, FC_VPORT_FAILED); - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, - "0260 Register NameServer error: 0x%x\n", - mb->mbxStatus); return; } pmb->context1 = NULL; ndlp->nlp_rpi = mb->un.varWords[0]; + ndlp->nlp_flag |= NLP_RPI_VALID; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); @@ -2718,7 +2817,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba, if (pring->ringno == LPFC_ELS_RING) { switch (icmd->ulpCommand) { case CMD_GEN_REQUEST64_CR: - if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) + if (iocb->context_un.ndlp == ndlp) return 1; case CMD_ELS_REQUEST64_CR: if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID) @@ -2765,7 +2864,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) */ psli = &phba->sli; rpi = ndlp->nlp_rpi; - if (rpi) { + if (ndlp->nlp_flag & NLP_RPI_VALID) { /* Now process each ring */ for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; @@ -2813,7 +2912,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) LPFC_MBOXQ_t *mbox; int rc; - if (ndlp->nlp_rpi) { + if (ndlp->nlp_flag & NLP_RPI_VALID) { mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); @@ -2825,6 +2924,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) } lpfc_no_rpi(phba, ndlp); ndlp->nlp_rpi = 0; + ndlp->nlp_flag &= ~NLP_RPI_VALID; return 1; } return 0; @@ -2972,13 +3072,14 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) int rc; lpfc_cancel_retry_delay_tmo(vport, ndlp); - if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) { + if ((ndlp->nlp_flag & NLP_DEFER_RM) && + !(ndlp->nlp_flag & NLP_RPI_VALID)) { /* For this case we need to cleanup the default rpi * allocated by the firmware. */ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) { - rc = lpfc_reg_login(phba, vport->vpi, ndlp->nlp_DID, + rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID, (uint8_t *) &vport->fc_sparam, mbox, 0); if (rc) { mempool_free(mbox, phba->mbox_mem_pool); @@ -3713,6 +3814,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) pmb->context1 = NULL; ndlp->nlp_rpi = mb->un.varWords[0]; + ndlp->nlp_flag |= NLP_RPI_VALID; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 65cd3fe62200..2f5907f92eea 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -385,6 +385,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) /* Update the fc_host data structures with new wwn. */ fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); + fc_host_max_npiv_vports(shost) = phba->max_vpi; /* If no serial number in VPD data, use low 6 bytes of WWNN */ /* This should be consolidated into parse_vpd ? - mr */ diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 29fe5c17f4e8..b9b451c09010 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -40,6 +40,44 @@ #include "lpfc_crtn.h" #include "lpfc_compat.h" +/** + * lpfc_dump_static_vport - Dump HBA's static vport information. + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * @offset: offset for dumping vport info. + * + * The dump mailbox command provides a method for the device driver to obtain + * various types of information from the HBA device. + * + * This routine prepares the mailbox command for dumping list of static + * vports to be created. + **/ +void +lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, + uint16_t offset) +{ + MAILBOX_t *mb; + void *ctx; + + mb = &pmb->u.mb; + ctx = pmb->context2; + + /* Setup to dump vport info region */ + memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); + mb->mbxCommand = MBX_DUMP_MEMORY; + mb->un.varDmp.cv = 1; + mb->un.varDmp.type = DMP_NV_PARAMS; + mb->un.varDmp.entry_index = offset; + mb->un.varDmp.region_id = DMP_REGION_VPORT; + mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t); + mb->un.varDmp.co = 0; + mb->un.varDmp.resp_offset = 0; + pmb->context2 = ctx; + mb->mbxOwner = OWN_HOST; + + return; +} + /** * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory * @phba: pointer to lpfc hba data structure. diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 1a7659c8f388..ccbde41c1539 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -329,7 +329,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); shost_for_each_device(sdev, shost) { new_queue_depth = @@ -383,7 +383,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); shost_for_each_device(sdev, shost) { if (vports[i]->cfg_lun_queue_depth <= @@ -431,7 +431,7 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba) vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); shost_for_each_device(sdev, shost) { rport = starget_to_rport(scsi_target(sdev)); diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index a415ec0b9a86..a6313ee84ac5 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -251,23 +251,22 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport) (vport->fc_flag & wait_flags) || ((vport->port_state > LPFC_VPORT_FAILED) && (vport->port_state < LPFC_VPORT_READY))) { - lpfc_printf_log(phba, KERN_INFO, LOG_VPORT, + lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT, "1833 Vport discovery quiesce Wait:" - " vpi x%x state x%x fc_flags x%x" + " state x%x fc_flags x%x" " num_nodes x%x, waiting 1000 msecs" " total wait msecs x%x\n", - vport->vpi, vport->port_state, - vport->fc_flag, vport->num_disc_nodes, + vport->port_state, vport->fc_flag, + vport->num_disc_nodes, jiffies_to_msecs(jiffies - start_time)); msleep(1000); } else { /* Base case. Wait variants satisfied. Break out */ - lpfc_printf_log(phba, KERN_INFO, LOG_VPORT, + lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT, "1834 Vport discovery quiesced:" - " vpi x%x state x%x fc_flags x%x" + " state x%x fc_flags x%x" " wait msecs x%x\n", - vport->vpi, vport->port_state, - vport->fc_flag, + vport->port_state, vport->fc_flag, jiffies_to_msecs(jiffies - start_time)); break; @@ -275,12 +274,10 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport) } if (time_after(jiffies, wait_time_max)) - lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, + lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, "1835 Vport discovery quiesce failed:" - " vpi x%x state x%x fc_flags x%x" - " wait msecs x%x\n", - vport->vpi, vport->port_state, - vport->fc_flag, + " state x%x fc_flags x%x wait msecs x%x\n", + vport->port_state, vport->fc_flag, jiffies_to_msecs(jiffies - start_time)); } @@ -558,6 +555,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport) "physical host\n"); return VPORT_ERROR; } + + /* If the vport is a static vport fail the deletion. */ + if ((vport->vport_flag & STATIC_VPORT) && + !(phba->pport->load_flag & FC_UNLOADING)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, + "1837 vport_delete failed: Cannot delete " + "static vport.\n"); + return VPORT_ERROR; + } + /* * If we are not unloading the driver then prevent the vport_delete * from happening until after this vport's discovery is finished. @@ -733,7 +740,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba) struct lpfc_vport *port_iterator; struct lpfc_vport **vports; int index = 0; - vports = kzalloc((phba->max_vpi + 1) * sizeof(struct lpfc_vport *), + vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *), GFP_KERNEL); if (vports == NULL) return NULL; @@ -757,7 +764,7 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports) int i; if (vports == NULL) return; - for (i=0; vports[i] != NULL && i <= phba->max_vpi; i++) + for (i = 0; vports[i] != NULL && i <= phba->max_vports; i++) scsi_host_put(lpfc_shost_from_vport(vports[i])); kfree(vports); } -- cgit v1.2.3 From 53331aa1c721336b661567e4c0aacc04ab9725d8 Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 22 May 2009 14:53:27 -0400 Subject: [SCSI] lpfc 8.3.2 : Update the lpfc driver version to 8.3.2 Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index e599519e3078..6b8a148f0a55 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.1" +#define LPFC_DRIVER_VERSION "8.3.2" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" -- cgit v1.2.3 From 6462c6160af557c310d5941f4700ea2c7f6c67b2 Mon Sep 17 00:00:00 2001 From: Thomas Reitmayr Date: Mon, 1 Jun 2009 13:38:33 +0200 Subject: [ARM] orion5x: Change names of defines for Reset-Out-Mask register The name of the define for the Reset-Out-Mask register as well as its bit for the watchdog reset are changed to match the names used for Kirkwood (which in turn match the processor specification more closely). There is no functional change. This patch prepares for adding orion5x_wdt as a platform device to Kirkwood. Signed-off-by: Thomas Reitmayr Signed-off-by: Nicolas Pitre --- drivers/watchdog/orion5x_wdt.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/watchdog/orion5x_wdt.c b/drivers/watchdog/orion5x_wdt.c index 2cde568e4fb0..d2dc9762a8c9 100644 --- a/drivers/watchdog/orion5x_wdt.c +++ b/drivers/watchdog/orion5x_wdt.c @@ -73,9 +73,9 @@ static void orion5x_wdt_enable(void) writel(reg, TIMER_CTRL); /* Enable reset on watchdog */ - reg = readl(CPU_RESET_MASK); - reg |= WDT_RESET; - writel(reg, CPU_RESET_MASK); + reg = readl(RSTOUTn_MASK); + reg |= WDT_RESET_OUT_EN; + writel(reg, RSTOUTn_MASK); spin_unlock(&wdt_lock); } @@ -87,9 +87,9 @@ static void orion5x_wdt_disable(void) spin_lock(&wdt_lock); /* Disable reset on watchdog */ - reg = readl(CPU_RESET_MASK); - reg &= ~WDT_RESET; - writel(reg, CPU_RESET_MASK); + reg = readl(RSTOUTn_MASK); + reg &= ~WDT_RESET_OUT_EN; + writel(reg, RSTOUTn_MASK); /* Disable watchdog timer */ reg = readl(TIMER_CTRL); -- cgit v1.2.3 From 054bd3f053de54c81b20df11f354476389826e61 Mon Sep 17 00:00:00 2001 From: Thomas Reitmayr Date: Mon, 1 Jun 2009 13:38:34 +0200 Subject: [ARM] Kirkwood: Add the watchdog timer as a platform device. The Kirkwood architecture uses the same watchdog device as the Orion architecture. This patch adds orion5x_wdt as a platform device for Kirkwood. Signed-off-by: Thomas Reitmayr Tested-by: Martin Michlmayr Signed-off-by: Nicolas Pitre --- drivers/watchdog/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 5eb8f21da82e..1e983b1717d3 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -233,10 +233,10 @@ config DAVINCI_WATCHDOG config ORION5X_WATCHDOG tristate "Orion5x watchdog" - depends on ARCH_ORION5X + depends on ARCH_ORION5X || ARCH_KIRKWOOD help Say Y here if to include support for the watchdog timer - in the Orion5x ARM SoCs. + in the Orion5x and Kirkwood ARM SoCs. To compile this driver as a module, choose M here: the module will be called orion5x_wdt. -- cgit v1.2.3 From 3b937a7dbddbedd9457b33fcc8fa369c0c229c6e Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 1 Jun 2009 13:56:02 -0400 Subject: [ARM] Orion/Kirkwood: rename orion5x_wdt to orion_wdt The Orion watchdog driver is also used on Kirkwood. Convention is to use orion5x for stuff specific to 88F5xxx Orion chips and simply "orion" for shared stuff across SoCs including Kirkwood. Signed-off-by: Nicolas Pitre --- drivers/watchdog/Kconfig | 8 +- drivers/watchdog/Makefile | 2 +- drivers/watchdog/orion5x_wdt.c | 322 ----------------------------------------- drivers/watchdog/orion_wdt.c | 322 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 327 insertions(+), 327 deletions(-) delete mode 100644 drivers/watchdog/orion5x_wdt.c create mode 100644 drivers/watchdog/orion_wdt.c (limited to 'drivers') diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 1e983b1717d3..5744cac4864b 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -231,14 +231,14 @@ config DAVINCI_WATCHDOG NOTE: once enabled, this timer cannot be disabled. Say N if you are unsure. -config ORION5X_WATCHDOG - tristate "Orion5x watchdog" +config ORION_WATCHDOG + tristate "Orion watchdog" depends on ARCH_ORION5X || ARCH_KIRKWOOD help Say Y here if to include support for the watchdog timer - in the Orion5x and Kirkwood ARM SoCs. + in the Marvell Orion5x and Kirkwood ARM SoCs. To compile this driver as a module, choose M here: the - module will be called orion5x_wdt. + module will be called orion_wdt. # AVR32 Architecture diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index 7f8c56b14f58..c3afa14d5be1 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -40,7 +40,7 @@ obj-$(CONFIG_EP93XX_WATCHDOG) += ep93xx_wdt.o obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o -obj-$(CONFIG_ORION5X_WATCHDOG) += orion5x_wdt.o +obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o # AVR32 Architecture obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o diff --git a/drivers/watchdog/orion5x_wdt.c b/drivers/watchdog/orion5x_wdt.c deleted file mode 100644 index d2dc9762a8c9..000000000000 --- a/drivers/watchdog/orion5x_wdt.c +++ /dev/null @@ -1,322 +0,0 @@ -/* - * drivers/watchdog/orion5x_wdt.c - * - * Watchdog driver for Orion5x processors - * - * Author: Sylver Bruneau - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * Watchdog timer block registers. - */ -#define TIMER_CTRL (TIMER_VIRT_BASE + 0x0000) -#define WDT_EN 0x0010 -#define WDT_VAL (TIMER_VIRT_BASE + 0x0024) - -#define WDT_MAX_CYCLE_COUNT 0xffffffff -#define WDT_IN_USE 0 -#define WDT_OK_TO_CLOSE 1 - -static int nowayout = WATCHDOG_NOWAYOUT; -static int heartbeat = -1; /* module parameter (seconds) */ -static unsigned int wdt_max_duration; /* (seconds) */ -static unsigned int wdt_tclk; -static unsigned long wdt_status; -static spinlock_t wdt_lock; - -static void orion5x_wdt_ping(void) -{ - spin_lock(&wdt_lock); - - /* Reload watchdog duration */ - writel(wdt_tclk * heartbeat, WDT_VAL); - - spin_unlock(&wdt_lock); -} - -static void orion5x_wdt_enable(void) -{ - u32 reg; - - spin_lock(&wdt_lock); - - /* Set watchdog duration */ - writel(wdt_tclk * heartbeat, WDT_VAL); - - /* Clear watchdog timer interrupt */ - reg = readl(BRIDGE_CAUSE); - reg &= ~WDT_INT_REQ; - writel(reg, BRIDGE_CAUSE); - - /* Enable watchdog timer */ - reg = readl(TIMER_CTRL); - reg |= WDT_EN; - writel(reg, TIMER_CTRL); - - /* Enable reset on watchdog */ - reg = readl(RSTOUTn_MASK); - reg |= WDT_RESET_OUT_EN; - writel(reg, RSTOUTn_MASK); - - spin_unlock(&wdt_lock); -} - -static void orion5x_wdt_disable(void) -{ - u32 reg; - - spin_lock(&wdt_lock); - - /* Disable reset on watchdog */ - reg = readl(RSTOUTn_MASK); - reg &= ~WDT_RESET_OUT_EN; - writel(reg, RSTOUTn_MASK); - - /* Disable watchdog timer */ - reg = readl(TIMER_CTRL); - reg &= ~WDT_EN; - writel(reg, TIMER_CTRL); - - spin_unlock(&wdt_lock); -} - -static int orion5x_wdt_get_timeleft(int *time_left) -{ - spin_lock(&wdt_lock); - *time_left = readl(WDT_VAL) / wdt_tclk; - spin_unlock(&wdt_lock); - return 0; -} - -static int orion5x_wdt_open(struct inode *inode, struct file *file) -{ - if (test_and_set_bit(WDT_IN_USE, &wdt_status)) - return -EBUSY; - clear_bit(WDT_OK_TO_CLOSE, &wdt_status); - orion5x_wdt_enable(); - return nonseekable_open(inode, file); -} - -static ssize_t orion5x_wdt_write(struct file *file, const char *data, - size_t len, loff_t *ppos) -{ - if (len) { - if (!nowayout) { - size_t i; - - clear_bit(WDT_OK_TO_CLOSE, &wdt_status); - for (i = 0; i != len; i++) { - char c; - - if (get_user(c, data + i)) - return -EFAULT; - if (c == 'V') - set_bit(WDT_OK_TO_CLOSE, &wdt_status); - } - } - orion5x_wdt_ping(); - } - return len; -} - -static int orion5x_wdt_settimeout(int new_time) -{ - if ((new_time <= 0) || (new_time > wdt_max_duration)) - return -EINVAL; - - /* Set new watchdog time to be used when - * orion5x_wdt_enable() or orion5x_wdt_ping() is called. */ - heartbeat = new_time; - return 0; -} - -static const struct watchdog_info ident = { - .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | - WDIOF_KEEPALIVEPING, - .identity = "Orion5x Watchdog", -}; - -static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - int ret = -ENOTTY; - int time; - - switch (cmd) { - case WDIOC_GETSUPPORT: - ret = copy_to_user((struct watchdog_info *)arg, &ident, - sizeof(ident)) ? -EFAULT : 0; - break; - - case WDIOC_GETSTATUS: - case WDIOC_GETBOOTSTATUS: - ret = put_user(0, (int *)arg); - break; - - case WDIOC_KEEPALIVE: - orion5x_wdt_ping(); - ret = 0; - break; - - case WDIOC_SETTIMEOUT: - ret = get_user(time, (int *)arg); - if (ret) - break; - - if (orion5x_wdt_settimeout(time)) { - ret = -EINVAL; - break; - } - orion5x_wdt_ping(); - /* Fall through */ - - case WDIOC_GETTIMEOUT: - ret = put_user(heartbeat, (int *)arg); - break; - - case WDIOC_GETTIMELEFT: - if (orion5x_wdt_get_timeleft(&time)) { - ret = -EINVAL; - break; - } - ret = put_user(time, (int *)arg); - break; - } - return ret; -} - -static int orion5x_wdt_release(struct inode *inode, struct file *file) -{ - if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) - orion5x_wdt_disable(); - else - printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - " - "timer will not stop\n"); - clear_bit(WDT_IN_USE, &wdt_status); - clear_bit(WDT_OK_TO_CLOSE, &wdt_status); - - return 0; -} - - -static const struct file_operations orion5x_wdt_fops = { - .owner = THIS_MODULE, - .llseek = no_llseek, - .write = orion5x_wdt_write, - .unlocked_ioctl = orion5x_wdt_ioctl, - .open = orion5x_wdt_open, - .release = orion5x_wdt_release, -}; - -static struct miscdevice orion5x_wdt_miscdev = { - .minor = WATCHDOG_MINOR, - .name = "watchdog", - .fops = &orion5x_wdt_fops, -}; - -static int __devinit orion5x_wdt_probe(struct platform_device *pdev) -{ - struct orion5x_wdt_platform_data *pdata = pdev->dev.platform_data; - int ret; - - if (pdata) { - wdt_tclk = pdata->tclk; - } else { - printk(KERN_ERR "Orion5x Watchdog misses platform data\n"); - return -ENODEV; - } - - if (orion5x_wdt_miscdev.parent) - return -EBUSY; - orion5x_wdt_miscdev.parent = &pdev->dev; - - wdt_max_duration = WDT_MAX_CYCLE_COUNT / wdt_tclk; - if (orion5x_wdt_settimeout(heartbeat)) - heartbeat = wdt_max_duration; - - ret = misc_register(&orion5x_wdt_miscdev); - if (ret) - return ret; - - printk(KERN_INFO "Orion5x Watchdog Timer: Initial timeout %d sec%s\n", - heartbeat, nowayout ? ", nowayout" : ""); - return 0; -} - -static int __devexit orion5x_wdt_remove(struct platform_device *pdev) -{ - int ret; - - if (test_bit(WDT_IN_USE, &wdt_status)) { - orion5x_wdt_disable(); - clear_bit(WDT_IN_USE, &wdt_status); - } - - ret = misc_deregister(&orion5x_wdt_miscdev); - if (!ret) - orion5x_wdt_miscdev.parent = NULL; - - return ret; -} - -static void orion5x_wdt_shutdown(struct platform_device *pdev) -{ - if (test_bit(WDT_IN_USE, &wdt_status)) - orion5x_wdt_disable(); -} - -static struct platform_driver orion5x_wdt_driver = { - .probe = orion5x_wdt_probe, - .remove = __devexit_p(orion5x_wdt_remove), - .shutdown = orion5x_wdt_shutdown, - .driver = { - .owner = THIS_MODULE, - .name = "orion5x_wdt", - }, -}; - -static int __init orion5x_wdt_init(void) -{ - spin_lock_init(&wdt_lock); - return platform_driver_register(&orion5x_wdt_driver); -} - -static void __exit orion5x_wdt_exit(void) -{ - platform_driver_unregister(&orion5x_wdt_driver); -} - -module_init(orion5x_wdt_init); -module_exit(orion5x_wdt_exit); - -MODULE_AUTHOR("Sylver Bruneau "); -MODULE_DESCRIPTION("Orion5x Processor Watchdog"); - -module_param(heartbeat, int, 0); -MODULE_PARM_DESC(heartbeat, "Initial watchdog heartbeat in seconds"); - -module_param(nowayout, int, 0); -MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" - __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); - -MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c new file mode 100644 index 000000000000..2d9fb96a9ee9 --- /dev/null +++ b/drivers/watchdog/orion_wdt.c @@ -0,0 +1,322 @@ +/* + * drivers/watchdog/orion_wdt.c + * + * Watchdog driver for Orion/Kirkwood processors + * + * Author: Sylver Bruneau + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Watchdog timer block registers. + */ +#define TIMER_CTRL (TIMER_VIRT_BASE + 0x0000) +#define WDT_EN 0x0010 +#define WDT_VAL (TIMER_VIRT_BASE + 0x0024) + +#define WDT_MAX_CYCLE_COUNT 0xffffffff +#define WDT_IN_USE 0 +#define WDT_OK_TO_CLOSE 1 + +static int nowayout = WATCHDOG_NOWAYOUT; +static int heartbeat = -1; /* module parameter (seconds) */ +static unsigned int wdt_max_duration; /* (seconds) */ +static unsigned int wdt_tclk; +static unsigned long wdt_status; +static spinlock_t wdt_lock; + +static void orion_wdt_ping(void) +{ + spin_lock(&wdt_lock); + + /* Reload watchdog duration */ + writel(wdt_tclk * heartbeat, WDT_VAL); + + spin_unlock(&wdt_lock); +} + +static void orion_wdt_enable(void) +{ + u32 reg; + + spin_lock(&wdt_lock); + + /* Set watchdog duration */ + writel(wdt_tclk * heartbeat, WDT_VAL); + + /* Clear watchdog timer interrupt */ + reg = readl(BRIDGE_CAUSE); + reg &= ~WDT_INT_REQ; + writel(reg, BRIDGE_CAUSE); + + /* Enable watchdog timer */ + reg = readl(TIMER_CTRL); + reg |= WDT_EN; + writel(reg, TIMER_CTRL); + + /* Enable reset on watchdog */ + reg = readl(RSTOUTn_MASK); + reg |= WDT_RESET_OUT_EN; + writel(reg, RSTOUTn_MASK); + + spin_unlock(&wdt_lock); +} + +static void orion_wdt_disable(void) +{ + u32 reg; + + spin_lock(&wdt_lock); + + /* Disable reset on watchdog */ + reg = readl(RSTOUTn_MASK); + reg &= ~WDT_RESET_OUT_EN; + writel(reg, RSTOUTn_MASK); + + /* Disable watchdog timer */ + reg = readl(TIMER_CTRL); + reg &= ~WDT_EN; + writel(reg, TIMER_CTRL); + + spin_unlock(&wdt_lock); +} + +static int orion_wdt_get_timeleft(int *time_left) +{ + spin_lock(&wdt_lock); + *time_left = readl(WDT_VAL) / wdt_tclk; + spin_unlock(&wdt_lock); + return 0; +} + +static int orion_wdt_open(struct inode *inode, struct file *file) +{ + if (test_and_set_bit(WDT_IN_USE, &wdt_status)) + return -EBUSY; + clear_bit(WDT_OK_TO_CLOSE, &wdt_status); + orion_wdt_enable(); + return nonseekable_open(inode, file); +} + +static ssize_t orion_wdt_write(struct file *file, const char *data, + size_t len, loff_t *ppos) +{ + if (len) { + if (!nowayout) { + size_t i; + + clear_bit(WDT_OK_TO_CLOSE, &wdt_status); + for (i = 0; i != len; i++) { + char c; + + if (get_user(c, data + i)) + return -EFAULT; + if (c == 'V') + set_bit(WDT_OK_TO_CLOSE, &wdt_status); + } + } + orion_wdt_ping(); + } + return len; +} + +static int orion_wdt_settimeout(int new_time) +{ + if ((new_time <= 0) || (new_time > wdt_max_duration)) + return -EINVAL; + + /* Set new watchdog time to be used when + * orion_wdt_enable() or orion_wdt_ping() is called. */ + heartbeat = new_time; + return 0; +} + +static const struct watchdog_info ident = { + .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | + WDIOF_KEEPALIVEPING, + .identity = "Orion Watchdog", +}; + +static long orion_wdt_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret = -ENOTTY; + int time; + + switch (cmd) { + case WDIOC_GETSUPPORT: + ret = copy_to_user((struct watchdog_info *)arg, &ident, + sizeof(ident)) ? -EFAULT : 0; + break; + + case WDIOC_GETSTATUS: + case WDIOC_GETBOOTSTATUS: + ret = put_user(0, (int *)arg); + break; + + case WDIOC_KEEPALIVE: + orion_wdt_ping(); + ret = 0; + break; + + case WDIOC_SETTIMEOUT: + ret = get_user(time, (int *)arg); + if (ret) + break; + + if (orion_wdt_settimeout(time)) { + ret = -EINVAL; + break; + } + orion_wdt_ping(); + /* Fall through */ + + case WDIOC_GETTIMEOUT: + ret = put_user(heartbeat, (int *)arg); + break; + + case WDIOC_GETTIMELEFT: + if (orion_wdt_get_timeleft(&time)) { + ret = -EINVAL; + break; + } + ret = put_user(time, (int *)arg); + break; + } + return ret; +} + +static int orion_wdt_release(struct inode *inode, struct file *file) +{ + if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) + orion_wdt_disable(); + else + printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - " + "timer will not stop\n"); + clear_bit(WDT_IN_USE, &wdt_status); + clear_bit(WDT_OK_TO_CLOSE, &wdt_status); + + return 0; +} + + +static const struct file_operations orion_wdt_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .write = orion_wdt_write, + .unlocked_ioctl = orion_wdt_ioctl, + .open = orion_wdt_open, + .release = orion_wdt_release, +}; + +static struct miscdevice orion_wdt_miscdev = { + .minor = WATCHDOG_MINOR, + .name = "watchdog", + .fops = &orion_wdt_fops, +}; + +static int __devinit orion_wdt_probe(struct platform_device *pdev) +{ + struct orion_wdt_platform_data *pdata = pdev->dev.platform_data; + int ret; + + if (pdata) { + wdt_tclk = pdata->tclk; + } else { + printk(KERN_ERR "Orion Watchdog misses platform data\n"); + return -ENODEV; + } + + if (orion_wdt_miscdev.parent) + return -EBUSY; + orion_wdt_miscdev.parent = &pdev->dev; + + wdt_max_duration = WDT_MAX_CYCLE_COUNT / wdt_tclk; + if (orion_wdt_settimeout(heartbeat)) + heartbeat = wdt_max_duration; + + ret = misc_register(&orion_wdt_miscdev); + if (ret) + return ret; + + printk(KERN_INFO "Orion Watchdog Timer: Initial timeout %d sec%s\n", + heartbeat, nowayout ? ", nowayout" : ""); + return 0; +} + +static int __devexit orion_wdt_remove(struct platform_device *pdev) +{ + int ret; + + if (test_bit(WDT_IN_USE, &wdt_status)) { + orion_wdt_disable(); + clear_bit(WDT_IN_USE, &wdt_status); + } + + ret = misc_deregister(&orion_wdt_miscdev); + if (!ret) + orion_wdt_miscdev.parent = NULL; + + return ret; +} + +static void orion_wdt_shutdown(struct platform_device *pdev) +{ + if (test_bit(WDT_IN_USE, &wdt_status)) + orion_wdt_disable(); +} + +static struct platform_driver orion_wdt_driver = { + .probe = orion_wdt_probe, + .remove = __devexit_p(orion_wdt_remove), + .shutdown = orion_wdt_shutdown, + .driver = { + .owner = THIS_MODULE, + .name = "orion_wdt", + }, +}; + +static int __init orion_wdt_init(void) +{ + spin_lock_init(&wdt_lock); + return platform_driver_register(&orion_wdt_driver); +} + +static void __exit orion_wdt_exit(void) +{ + platform_driver_unregister(&orion_wdt_driver); +} + +module_init(orion_wdt_init); +module_exit(orion_wdt_exit); + +MODULE_AUTHOR("Sylver Bruneau "); +MODULE_DESCRIPTION("Orion Processor Watchdog"); + +module_param(heartbeat, int, 0); +MODULE_PARM_DESC(heartbeat, "Initial watchdog heartbeat in seconds"); + +module_param(nowayout, int, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); -- cgit v1.2.3 From 477e608c03eb2f561a23994bee38a32a9fd3357d Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Mon, 27 Apr 2009 20:54:22 +0200 Subject: [SCSI] fix documentation for two functions Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: James Bottomley --- drivers/scsi/scsi.c | 4 ++-- drivers/scsi/scsi_error.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 166417a6afba..2de5f3ad640b 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -1225,8 +1225,8 @@ EXPORT_SYMBOL(__scsi_device_lookup_by_target); * @starget: SCSI target pointer * @lun: SCSI Logical Unit Number * - * Description: Looks up the scsi_device with the specified @channel, @id, @lun - * for a given host. The returned scsi_device has an additional reference that + * Description: Looks up the scsi_device with the specified @lun for a given + * @starget. The returned scsi_device has an additional reference that * needs to be released with scsi_device_put once you're done with it. **/ struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 0c2c73be1974..a95d2bac0780 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -641,9 +641,9 @@ EXPORT_SYMBOL(scsi_eh_prep_cmnd); /** * scsi_eh_restore_cmnd - Restore a scsi command info as part of error recory * @scmd: SCSI command structure to restore - * @ses: saved information from a coresponding call to scsi_prep_eh_cmnd + * @ses: saved information from a coresponding call to scsi_eh_prep_cmnd * - * Undo any damage done by above scsi_prep_eh_cmnd(). + * Undo any damage done by above scsi_eh_prep_cmnd(). */ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses) { -- cgit v1.2.3 From 91bc31fb3bae4e55832c7c39d4f9c193285e6ab2 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Sun, 17 May 2009 09:30:48 -0500 Subject: [SCSI] fix up scsi_eh_lock_door() The Documentation is incorrect (we removed some functions referred to), and none of the bug warnings now apply. Additionally remove the spurious check on the return from blk_get_request() which can't fail if __GFP_WAIT is passed in. Signed-off-by: James Bottomley --- drivers/scsi/scsi_error.c | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index a95d2bac0780..a1689353d7fd 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -1451,28 +1451,21 @@ static void eh_lock_door_done(struct request *req, int uptodate) * @sdev: SCSI device to prevent medium removal * * Locking: - * We must be called from process context; scsi_allocate_request() - * may sleep. + * We must be called from process context. * * Notes: * We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the * head of the devices request queue, and continue. - * - * Bugs: - * scsi_allocate_request() may sleep waiting for existing requests to - * be processed. However, since we haven't kicked off any request - * processing for this host, this may deadlock. - * - * If scsi_allocate_request() fails for what ever reason, we - * completely forget to lock the door. */ static void scsi_eh_lock_door(struct scsi_device *sdev) { struct request *req; + /* + * blk_get_request with GFP_KERNEL (__GFP_WAIT) sleeps until a + * request becomes available + */ req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL); - if (!req) - return; req->cmd[0] = ALLOW_MEDIUM_REMOVAL; req->cmd[1] = 0; -- cgit v1.2.3 From 601e7638254c118fca135af9b1a9f35061420f62 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Tue, 26 May 2009 20:35:48 +0000 Subject: [SCSI] sd: fix bug in SCSI async probing The async split up of probing in sd.c created a potential failure case where something goes wrong with device_add(), but which we don't recover properly. Since, in general, asynchronous error handling is hard, move the device_add() into the asynchronous path (it should be fast) and make sure all the deferred processing cannot fail. Signed-off-by: James Bottomley --- drivers/scsi/sd.c | 45 +++++++++++++++++++++------------------------ 1 file changed, 21 insertions(+), 24 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 84044233b637..d8e1d15101b7 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1902,24 +1902,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie) index = sdkp->index; dev = &sdp->sdev_gendev; - if (!sdp->request_queue->rq_timeout) { - if (sdp->type != TYPE_MOD) - blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT); - else - blk_queue_rq_timeout(sdp->request_queue, - SD_MOD_TIMEOUT); - } - - device_initialize(&sdkp->dev); - sdkp->dev.parent = &sdp->sdev_gendev; - sdkp->dev.class = &sd_disk_class; - dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev)); - - if (device_add(&sdkp->dev)) - goto out_free_index; - - get_device(&sdp->sdev_gendev); - if (index < SD_MAX_DISKS) { gd->major = sd_major((index & 0xf0) >> 4); gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); @@ -1954,11 +1936,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie) sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", sdp->removable ? "removable " : ""); - - return; - - out_free_index: - ida_remove(&sd_index_ida, index); } /** @@ -2026,6 +2003,24 @@ static int sd_probe(struct device *dev) sdkp->openers = 0; sdkp->previous_state = 1; + if (!sdp->request_queue->rq_timeout) { + if (sdp->type != TYPE_MOD) + blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT); + else + blk_queue_rq_timeout(sdp->request_queue, + SD_MOD_TIMEOUT); + } + + device_initialize(&sdkp->dev); + sdkp->dev.parent = &sdp->sdev_gendev; + sdkp->dev.class = &sd_disk_class; + dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev)); + + if (device_add(&sdkp->dev)) + goto out_free_index; + + get_device(&sdp->sdev_gendev); + async_schedule(sd_probe_async, sdkp); return 0; @@ -2055,8 +2050,10 @@ static int sd_probe(struct device *dev) **/ static int sd_remove(struct device *dev) { - struct scsi_disk *sdkp = dev_get_drvdata(dev); + struct scsi_disk *sdkp; + async_synchronize_full(); + sdkp = dev_get_drvdata(dev); device_del(&sdkp->dev); del_gendisk(sdkp->disk); sd_shutdown(dev); -- cgit v1.2.3 From 4a2837d4fcaf8a2c2ad61523287073d0c14b9ed0 Mon Sep 17 00:00:00 2001 From: Brian King Date: Thu, 28 May 2009 16:17:22 -0500 Subject: [SCSI] ibmvfc: Fix invalid error response handling Fix an obvious bug in processing error responses for SCSI commands which can result in successful responses being incorrectly returned with DID_ERROR. Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvfc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index ea4abee7a2a9..879c51133c95 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -275,7 +275,7 @@ static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd) int fc_rsp_len = rsp->fcp_rsp_len; if ((rsp->flags & FCP_RSP_LEN_VALID) && - ((!fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) || + ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) || rsp->data.info.rsp_code)) return DID_ERROR << 16; -- cgit v1.2.3 From 7270b9bde5f382e730e1ef69d6c1b34d388df2b0 Mon Sep 17 00:00:00 2001 From: Brian King Date: Thu, 28 May 2009 16:17:24 -0500 Subject: [SCSI] ibmvfc: Fixup GFP flags for target allocations Since target allocations can occur while resetting the virtual adapter, we shouldn't be using GFP_KERNEL for them as it could hang. Switch to use GFP_NOIO. Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvfc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 879c51133c95..c450a346590e 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -3420,7 +3420,7 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id) } spin_unlock_irqrestore(vhost->host->host_lock, flags); - tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL); + tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO); if (!tgt) { dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n", scsi_id); -- cgit v1.2.3 From 85e2399e925e0afa04dd6e185a910bdd3dc4626b Mon Sep 17 00:00:00 2001 From: Brian King Date: Thu, 28 May 2009 16:17:25 -0500 Subject: [SCSI] ibmvfc: Use DEVICE_ATTR macro Use DEVICE_ATTR macro for defining device sysfs attributes. Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvfc.c | 67 ++++++++---------------------------------- 1 file changed, 13 insertions(+), 54 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index c450a346590e..26abed2c3a41 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -2434,14 +2434,6 @@ static ssize_t ibmvfc_show_host_partition_name(struct device *dev, vhost->login_buf->resp.partition_name); } -static struct device_attribute ibmvfc_host_partition_name = { - .attr = { - .name = "partition_name", - .mode = S_IRUGO, - }, - .show = ibmvfc_show_host_partition_name, -}; - static ssize_t ibmvfc_show_host_device_name(struct device *dev, struct device_attribute *attr, char *buf) { @@ -2452,14 +2444,6 @@ static ssize_t ibmvfc_show_host_device_name(struct device *dev, vhost->login_buf->resp.device_name); } -static struct device_attribute ibmvfc_host_device_name = { - .attr = { - .name = "device_name", - .mode = S_IRUGO, - }, - .show = ibmvfc_show_host_device_name, -}; - static ssize_t ibmvfc_show_host_loc_code(struct device *dev, struct device_attribute *attr, char *buf) { @@ -2470,14 +2454,6 @@ static ssize_t ibmvfc_show_host_loc_code(struct device *dev, vhost->login_buf->resp.port_loc_code); } -static struct device_attribute ibmvfc_host_loc_code = { - .attr = { - .name = "port_loc_code", - .mode = S_IRUGO, - }, - .show = ibmvfc_show_host_loc_code, -}; - static ssize_t ibmvfc_show_host_drc_name(struct device *dev, struct device_attribute *attr, char *buf) { @@ -2488,14 +2464,6 @@ static ssize_t ibmvfc_show_host_drc_name(struct device *dev, vhost->login_buf->resp.drc_name); } -static struct device_attribute ibmvfc_host_drc_name = { - .attr = { - .name = "drc_name", - .mode = S_IRUGO, - }, - .show = ibmvfc_show_host_drc_name, -}; - static ssize_t ibmvfc_show_host_npiv_version(struct device *dev, struct device_attribute *attr, char *buf) { @@ -2504,14 +2472,6 @@ static ssize_t ibmvfc_show_host_npiv_version(struct device *dev, return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version); } -static struct device_attribute ibmvfc_host_npiv_version = { - .attr = { - .name = "npiv_version", - .mode = S_IRUGO, - }, - .show = ibmvfc_show_host_npiv_version, -}; - /** * ibmvfc_show_log_level - Show the adapter's error logging level * @dev: class device struct @@ -2556,14 +2516,13 @@ static ssize_t ibmvfc_store_log_level(struct device *dev, return strlen(buf); } -static struct device_attribute ibmvfc_log_level_attr = { - .attr = { - .name = "log_level", - .mode = S_IRUGO | S_IWUSR, - }, - .show = ibmvfc_show_log_level, - .store = ibmvfc_store_log_level -}; +static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL); +static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL); +static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL); +static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL); +static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL); +static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR, + ibmvfc_show_log_level, ibmvfc_store_log_level); #ifdef CONFIG_SCSI_IBMVFC_TRACE /** @@ -2612,12 +2571,12 @@ static struct bin_attribute ibmvfc_trace_attr = { #endif static struct device_attribute *ibmvfc_attrs[] = { - &ibmvfc_host_partition_name, - &ibmvfc_host_device_name, - &ibmvfc_host_loc_code, - &ibmvfc_host_drc_name, - &ibmvfc_host_npiv_version, - &ibmvfc_log_level_attr, + &dev_attr_partition_name, + &dev_attr_device_name, + &dev_attr_port_loc_code, + &dev_attr_drc_name, + &dev_attr_npiv_version, + &dev_attr_log_level, NULL }; -- cgit v1.2.3 From 7d0e462247241b8ec2d377306203b58c7f423553 Mon Sep 17 00:00:00 2001 From: Brian King Date: Thu, 28 May 2009 16:17:26 -0500 Subject: [SCSI] ibmvfc: Reduce error logging noise The ibmvfc driver currently logs errors during discovery for several transient fabric errors, which generally get retried. If retries do not work, we see multiple errors in the log. If retries do work, we see errors in the log which may be confusing since the retry worked. This patch enhances the discovery time error logging to only log errors for command failures during discovery if all allowed retries have been used up. The existing behavior of logging all failures can be restored by setting the hosts log_level to a value of 3 or greater. Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvfc.c | 68 ++++++++++++++++++++++++++---------------- drivers/scsi/ibmvscsi/ibmvfc.h | 6 ++++ 2 files changed, 48 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 26abed2c3a41..182c8e75aaf0 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -842,9 +842,13 @@ static void ibmvfc_reset_host(struct ibmvfc_host *vhost) * ibmvfc_retry_host_init - Retry host initialization if allowed * @vhost: ibmvfc host struct * + * Returns: 1 if init will be retried / 0 if not + * **/ -static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost) +static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost) { + int retry = 0; + if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) { vhost->delay_init = 1; if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) { @@ -853,11 +857,14 @@ static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost) ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES) __ibmvfc_reset_host(vhost); - else + else { ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); + retry = 1; + } } wake_up(&vhost->work_wait_q); + return retry; } /** @@ -2733,15 +2740,19 @@ static void ibmvfc_init_tgt(struct ibmvfc_target *tgt, * @tgt: ibmvfc target struct * @job_step: initialization job step * + * Returns: 1 if step will be retried / 0 if not + * **/ -static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt, +static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt, void (*job_step) (struct ibmvfc_target *)) { if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) { ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); wake_up(&tgt->vhost->work_wait_q); + return 0; } else ibmvfc_init_tgt(tgt, job_step); + return 1; } /* Defined in FC-LS */ @@ -2790,7 +2801,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli; struct ibmvfc_prli_svc_parms *parms = &rsp->parms; u32 status = rsp->common.status; - int index; + int index, level = IBMVFC_DEFAULT_LOG_LEVEL; vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); @@ -2826,13 +2837,14 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) break; case IBMVFC_MAD_FAILED: default: - tgt_err(tgt, "Process Login failed: %s (%x:%x) rc=0x%02X\n", - ibmvfc_get_cmd_error(rsp->status, rsp->error), - rsp->status, rsp->error, status); if (ibmvfc_retry_cmd(rsp->status, rsp->error)) - ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); + level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); else ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + + tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n", + ibmvfc_get_cmd_error(rsp->status, rsp->error), + rsp->status, rsp->error, status); break; }; @@ -2891,6 +2903,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt) struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi; u32 status = rsp->common.status; + int level = IBMVFC_DEFAULT_LOG_LEVEL; vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); @@ -2919,15 +2932,15 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt) break; case IBMVFC_MAD_FAILED: default: - tgt_err(tgt, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", - ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, - ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, - ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status); - if (ibmvfc_retry_cmd(rsp->status, rsp->error)) - ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); + level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); else ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + + tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", + ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, + ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, + ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status); break; }; @@ -3281,6 +3294,7 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt) struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt; u32 status = rsp->common.status; + int level = IBMVFC_DEFAULT_LOG_LEVEL; vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); @@ -3300,19 +3314,19 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt) break; case IBMVFC_MAD_FAILED: default: - tgt_err(tgt, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", - ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, - ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, - ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status); - if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED && rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ && rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); else if (ibmvfc_retry_cmd(rsp->status, rsp->error)) - ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); + level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); else ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + + tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", + ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, + ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, + ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status); break; }; @@ -3431,6 +3445,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt) struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets; u32 mad_status = rsp->common.status; + int level = IBMVFC_DEFAULT_LOG_LEVEL; switch (mad_status) { case IBMVFC_MAD_SUCCESS: @@ -3439,9 +3454,9 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS); break; case IBMVFC_MAD_FAILED: - dev_err(vhost->dev, "Discover Targets failed: %s (%x:%x)\n", - ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); - ibmvfc_retry_host_init(vhost); + level += ibmvfc_retry_host_init(vhost); + ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n", + ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); break; case IBMVFC_MAD_DRIVER_FAILED: break; @@ -3493,18 +3508,19 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) u32 mad_status = evt->xfer_iu->npiv_login.common.status; struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp; unsigned int npiv_max_sectors; + int level = IBMVFC_DEFAULT_LOG_LEVEL; switch (mad_status) { case IBMVFC_MAD_SUCCESS: ibmvfc_free_event(evt); break; case IBMVFC_MAD_FAILED: - dev_err(vhost->dev, "NPIV Login failed: %s (%x:%x)\n", - ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); if (ibmvfc_retry_cmd(rsp->status, rsp->error)) - ibmvfc_retry_host_init(vhost); + level += ibmvfc_retry_host_init(vhost); else ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n", + ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); ibmvfc_free_event(evt); return; case IBMVFC_MAD_CRQ_ERROR: diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index ca1dcf7a7568..4dac3560c1a4 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -707,6 +707,12 @@ struct ibmvfc_host { #define tgt_err(t, fmt, ...) \ dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__) +#define tgt_log(t, level, fmt, ...) \ + do { \ + if ((t)->vhost->log_level >= level) \ + tgt_err(t, fmt, ##__VA_ARGS__); \ + } while (0) + #define ibmvfc_dbg(vhost, ...) \ DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__)) -- cgit v1.2.3 From 43c8da907ccc656935d1085701f4db83385d8a59 Mon Sep 17 00:00:00 2001 From: Brian King Date: Thu, 28 May 2009 16:17:28 -0500 Subject: [SCSI] ibmvfc: Fix deadlock in EH Fixes the following deadlock scenario shown below. We currently allow queuecommand to send commands when the ibmvfc workqueue is scanning for new rports, so we should also allow EH to function at this time as well. scsi_eh_3 D 0000000000000000 12304 1279 2 Call Trace: [c0000002f7257730] [c0000002f72577e0] 0xc0000002f72577e0 (unreliable) [c0000002f7257900] [c0000000000118f4] .__switch_to+0x158/0x1a0 [c0000002f72579a0] [c0000000004f8b40] .schedule+0x8d4/0x9dc [c0000002f7257b60] [c0000000004f8f08] .schedule_timeout+0xa8/0xe8 [c0000002f7257c50] [d0000000001d23e0] .ibmvfc_wait_while_resetting+0xe4/0x140 [ibmvfc] [c0000002f7257d20] [d0000000001d3984] .ibmvfc_eh_abort_handler+0x60/0xe4 [ibmvfc] [c0000002f7257dc0] [d000000000366714] .scsi_error_handler+0x38c/0x674 [scsi_mod] [c0000002f7257f00] [c0000000000a7470] .kthread+0x78/0xc4 [c0000002f7257f90] [c000000000029b8c] .kernel_thread+0x4c/0x68 ibmvfc_3 D 0000000000000000 12432 1280 2 Call Trace: [c0000002f7253540] [c0000002f72535f0] 0xc0000002f72535f0 (unreliable) [c0000002f7253710] [c0000000000118f4] .__switch_to+0x158/0x1a0 [c0000002f72537b0] [c0000000004f8b40] .schedule+0x8d4/0x9dc [c0000002f7253970] [c0000000004f8e98] .schedule_timeout+0x38/0xe8 [c0000002f7253a60] [c0000000004f80cc] .wait_for_common+0x138/0x220 [c0000002f7253b40] [c0000000000a2784] .flush_cpu_workqueue+0xac/0xcc [c0000002f7253c10] [c0000000000a2960] .flush_workqueue+0x58/0xa0 [c0000002f7253ca0] [d0000000000827fc] .fc_flush_work+0x4c/0x64 [scsi_transport_fc] [c0000002f7253d20] [d000000000082db4] .fc_remote_port_add+0x48/0x6c4 [scsi_transport_fc] [c0000002f7253dd0] [d0000000001d7d04] .ibmvfc_work+0x820/0xa7c [ibmvfc] [c0000002f7253f00] [c0000000000a7470] .kthread+0x78/0xc4 [c0000002f7253f90] [c000000000029b8c] .kernel_thread+0x4c/0x68 fc_wq_3 D 0000000000000000 10720 1283 2 Call Trace: [c0000002f559ac30] [c0000002f559ace0] 0xc0000002f559ace0 (unreliable) [c0000002f559ae00] [c0000000000118f4] .__switch_to+0x158/0x1a0 [c0000002f559aea0] [c0000000004f8b40] .schedule+0x8d4/0x9dc [c0000002f559b060] [c0000000004f8e98] .schedule_timeout+0x38/0xe8 [c0000002f559b150] [c0000000004f80cc] .wait_for_common+0x138/0x220 [c0000002f559b230] [c0000000002721c4] .blk_execute_rq+0xb4/0x100 [c0000002f559b360] [d00000000036a1f8] .scsi_execute+0x118/0x194 [scsi_mod] [c0000002f559b420] [d00000000036a32c] .scsi_execute_req+0xb8/0x124 [scsi_mod] [c0000002f559b500] [d0000000000c1330] .sd_sync_cache+0x8c/0x108 [sd_mod] [c0000002f559b5e0] [d0000000000c15b4] .sd_shutdown+0x9c/0x158 [sd_mod] [c0000002f559b660] [d0000000000c16d0] .sd_remove+0x60/0xb4 [sd_mod] [c0000002f559b700] [c000000000392ecc] .__device_release_driver+0xd0/0x118 [c0000002f559b7a0] [c000000000393080] .device_release_driver+0x30/0x54 [c0000002f559b830] [c000000000392108] .bus_remove_device+0x128/0x16c [c0000002f559b8d0] [c00000000038f94c] .device_del+0x158/0x234 [c0000002f559b960] [d00000000036f078] .__scsi_remove_device+0x5c/0xd4 [scsi_mod] [c0000002f559b9f0] [d00000000036f124] .scsi_remove_device+0x34/0x58 [scsi_mod] [c0000002f559ba80] [d00000000036f204] .__scsi_remove_target+0xb4/0x120 [scsi_mod] [c0000002f559bb10] [d00000000036f338] .__remove_child+0x2c/0x44 [scsi_mod] [c0000002f559bb90] [c00000000038f11c] .device_for_each_child+0x54/0xb4 [c0000002f559bc50] [d00000000036f2e0] .scsi_remove_target+0x70/0x9c [scsi_mod] [c0000002f559bce0] [d000000000083454] .fc_starget_delete+0x24/0x3c [scsi_transport_fc] [c0000002f559bd70] [c0000000000a2368] .run_workqueue+0x118/0x208 [c0000002f559be30] [c0000000000a2580] .worker_thread+0x128/0x154 [c0000002f559bf00] [c0000000000a7470] .kthread+0x78/0xc4 [c0000002f559bf90] [c000000000029b8c] .kernel_thread+0x4c/0x68 Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvfc.c | 122 ++++++++++++++++++++++++++++------------- drivers/scsi/ibmvscsi/ibmvfc.h | 5 +- 2 files changed, 86 insertions(+), 41 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 182c8e75aaf0..76ae266b07c4 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -431,6 +431,8 @@ static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt, case IBMVFC_TGT_ACTION_DEL_RPORT: break; default: + if (action == IBMVFC_TGT_ACTION_DEL_RPORT) + tgt->add_rport = 0; tgt->action = action; break; } @@ -483,7 +485,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, switch (vhost->action) { case IBMVFC_HOST_ACTION_INIT_WAIT: case IBMVFC_HOST_ACTION_NONE: - case IBMVFC_HOST_ACTION_TGT_ADD: + case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: vhost->action = action; break; default: @@ -498,7 +500,6 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, case IBMVFC_HOST_ACTION_TGT_DEL: case IBMVFC_HOST_ACTION_QUERY_TGTS: case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: - case IBMVFC_HOST_ACTION_TGT_ADD: case IBMVFC_HOST_ACTION_NONE: default: vhost->action = action; @@ -2306,7 +2307,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time) done = 1; } - if (vhost->state != IBMVFC_NO_CRQ && vhost->action == IBMVFC_HOST_ACTION_NONE) + if (vhost->scan_complete) done = 1; spin_unlock_irqrestore(shost->host_lock, flags); return done; @@ -2820,7 +2821,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET; if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC) tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_ADD_RPORT); + tgt->add_rport = 1; } else ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); } else if (prli_rsp[index].retry) @@ -3660,7 +3661,6 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost) return 1; case IBMVFC_HOST_ACTION_INIT: case IBMVFC_HOST_ACTION_ALLOC_TGTS: - case IBMVFC_HOST_ACTION_TGT_ADD: case IBMVFC_HOST_ACTION_TGT_DEL: case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: case IBMVFC_HOST_ACTION_QUERY: @@ -3715,25 +3715,26 @@ static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events) static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) { struct ibmvfc_host *vhost = tgt->vhost; - struct fc_rport *rport = tgt->rport; + struct fc_rport *rport; unsigned long flags; - if (rport) { - tgt_dbg(tgt, "Setting rport roles\n"); - fc_remote_port_rolechg(rport, tgt->ids.roles); - spin_lock_irqsave(vhost->host->host_lock, flags); - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + tgt_dbg(tgt, "Adding rport\n"); + rport = fc_remote_port_add(vhost->host, 0, &tgt->ids); + spin_lock_irqsave(vhost->host->host_lock, flags); + + if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) { + tgt_dbg(tgt, "Deleting rport\n"); + list_del(&tgt->queue); spin_unlock_irqrestore(vhost->host->host_lock, flags); + fc_remote_port_delete(rport); + del_timer_sync(&tgt->timer); + kref_put(&tgt->kref, ibmvfc_release_tgt); return; } - tgt_dbg(tgt, "Adding rport\n"); - rport = fc_remote_port_add(vhost->host, 0, &tgt->ids); - spin_lock_irqsave(vhost->host->host_lock, flags); - tgt->rport = rport; - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); if (rport) { tgt_dbg(tgt, "rport add succeeded\n"); + tgt->rport = rport; rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff; rport->supported_classes = 0; tgt->target_id = rport->scsi_target_id; @@ -3811,11 +3812,21 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) if (vhost->state == IBMVFC_INITIALIZING) { if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) { - ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE); - ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD); - vhost->init_retries = 0; - spin_unlock_irqrestore(vhost->host->host_lock, flags); - scsi_unblock_requests(vhost->host); + if (vhost->reinit) { + vhost->reinit = 0; + scsi_block_requests(vhost->host); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + } else { + ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); + wake_up(&vhost->init_wait_q); + schedule_work(&vhost->rport_add_work_q); + vhost->init_retries = 0; + spin_unlock_irqrestore(vhost->host->host_lock, flags); + scsi_unblock_requests(vhost->host); + } + return; } else { ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); @@ -3846,24 +3857,6 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) if (!ibmvfc_dev_init_to_do(vhost)) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED); break; - case IBMVFC_HOST_ACTION_TGT_ADD: - list_for_each_entry(tgt, &vhost->targets, queue) { - if (tgt->action == IBMVFC_TGT_ACTION_ADD_RPORT) { - spin_unlock_irqrestore(vhost->host->host_lock, flags); - ibmvfc_tgt_add_rport(tgt); - return; - } - } - - if (vhost->reinit && !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { - vhost->reinit = 0; - scsi_block_requests(vhost->host); - ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); - } else { - ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); - wake_up(&vhost->init_wait_q); - } - break; default: break; }; @@ -4092,6 +4085,56 @@ nomem: return -ENOMEM; } +/** + * ibmvfc_rport_add_thread - Worker thread for rport adds + * @work: work struct + * + **/ +static void ibmvfc_rport_add_thread(struct work_struct *work) +{ + struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host, + rport_add_work_q); + struct ibmvfc_target *tgt; + struct fc_rport *rport; + unsigned long flags; + int did_work; + + ENTER; + spin_lock_irqsave(vhost->host->host_lock, flags); + do { + did_work = 0; + if (vhost->state != IBMVFC_ACTIVE) + break; + + list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->add_rport) { + did_work = 1; + tgt->add_rport = 0; + kref_get(&tgt->kref); + rport = tgt->rport; + if (!rport) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + ibmvfc_tgt_add_rport(tgt); + } else if (get_device(&rport->dev)) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + tgt_dbg(tgt, "Setting rport roles\n"); + fc_remote_port_rolechg(rport, tgt->ids.roles); + put_device(&rport->dev); + } + + kref_put(&tgt->kref, ibmvfc_release_tgt); + spin_lock_irqsave(vhost->host->host_lock, flags); + break; + } + } + } while(did_work); + + if (vhost->state == IBMVFC_ACTIVE) + vhost->scan_complete = 1; + spin_unlock_irqrestore(vhost->host->host_lock, flags); + LEAVE; +} + /** * ibmvfc_probe - Adapter hot plug add entry point * @vdev: vio device struct @@ -4135,6 +4178,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) strcpy(vhost->partition_name, "UNKNOWN"); init_waitqueue_head(&vhost->work_wait_q); init_waitqueue_head(&vhost->init_wait_q); + INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread); if ((rc = ibmvfc_alloc_mem(vhost))) goto free_scsi_host; diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 4dac3560c1a4..3a6a725fd396 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -575,7 +575,6 @@ enum ibmvfc_target_action { IBMVFC_TGT_ACTION_NONE = 0, IBMVFC_TGT_ACTION_INIT, IBMVFC_TGT_ACTION_INIT_WAIT, - IBMVFC_TGT_ACTION_ADD_RPORT, IBMVFC_TGT_ACTION_DEL_RPORT, }; @@ -588,6 +587,7 @@ struct ibmvfc_target { int target_id; enum ibmvfc_target_action action; int need_login; + int add_rport; int init_retries; u32 cancel_key; struct ibmvfc_service_parms service_parms; @@ -635,7 +635,6 @@ enum ibmvfc_host_action { IBMVFC_HOST_ACTION_ALLOC_TGTS, IBMVFC_HOST_ACTION_TGT_INIT, IBMVFC_HOST_ACTION_TGT_DEL_FAILED, - IBMVFC_HOST_ACTION_TGT_ADD, }; enum ibmvfc_host_state { @@ -682,6 +681,7 @@ struct ibmvfc_host { int client_migrated; int reinit; int delay_init; + int scan_complete; int events_to_log; #define IBMVFC_AE_LINKUP 0x0001 #define IBMVFC_AE_LINKDOWN 0x0002 @@ -692,6 +692,7 @@ struct ibmvfc_host { void (*job_step) (struct ibmvfc_host *); struct task_struct *work_thread; struct tasklet_struct tasklet; + struct work_struct rport_add_work_q; wait_queue_head_t init_wait_q; wait_queue_head_t work_wait_q; }; -- cgit v1.2.3 From 79111d0899a122fa3cf0a2921292c3e6a25452d0 Mon Sep 17 00:00:00 2001 From: Brian King Date: Thu, 28 May 2009 16:17:29 -0500 Subject: [SCSI] ibmvfc: Add support for NPIV Logout This patch adds support for a new command supported by the Virtual I/O Server, NPIV Logout. The command will abort all outstanding commands and log out of the fabric. Currently, the only way to do this is by breaking the CRQ, which can take a fairly long time when lots of commands are outstanding. The NPIV Logout commands provides a mechanism to accomplish virtually the same function, but is much faster. Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvfc.c | 97 ++++++++++++++++++++++++++++++++++++++++-- drivers/scsi/ibmvscsi/ibmvfc.h | 12 +++++- 2 files changed, 105 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 76ae266b07c4..6eb3b75eec93 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -143,6 +143,7 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *); static void ibmvfc_tgt_send_prli(struct ibmvfc_target *); static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *); static void ibmvfc_tgt_query_target(struct ibmvfc_target *); +static void ibmvfc_npiv_logout(struct ibmvfc_host *); static const char *unknown_error = "unknown error"; @@ -477,6 +478,10 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) vhost->action = action; break; + case IBMVFC_HOST_ACTION_LOGO_WAIT: + if (vhost->action == IBMVFC_HOST_ACTION_LOGO) + vhost->action = action; + break; case IBMVFC_HOST_ACTION_INIT_WAIT: if (vhost->action == IBMVFC_HOST_ACTION_INIT) vhost->action = action; @@ -496,6 +501,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS) vhost->action = action; break; + case IBMVFC_HOST_ACTION_LOGO: case IBMVFC_HOST_ACTION_INIT: case IBMVFC_HOST_ACTION_TGT_DEL: case IBMVFC_HOST_ACTION_QUERY_TGTS: @@ -647,6 +653,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost) } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); vhost->state = IBMVFC_NO_CRQ; + vhost->logged_in = 0; dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); free_page((unsigned long)crq->msgs); } @@ -693,6 +700,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); vhost->state = IBMVFC_NO_CRQ; + vhost->logged_in = 0; ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); /* Clean out the queue */ @@ -808,10 +816,10 @@ static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code) } /** - * __ibmvfc_reset_host - Reset the connection to the server (no locking) + * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ * @vhost: struct ibmvfc host to reset **/ -static void __ibmvfc_reset_host(struct ibmvfc_host *vhost) +static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost) { int rc; @@ -827,9 +835,25 @@ static void __ibmvfc_reset_host(struct ibmvfc_host *vhost) } /** - * ibmvfc_reset_host - Reset the connection to the server + * __ibmvfc_reset_host - Reset the connection to the server (no locking) * @vhost: struct ibmvfc host to reset **/ +static void __ibmvfc_reset_host(struct ibmvfc_host *vhost) +{ + if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT && + !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { + scsi_block_requests(vhost->host); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO); + vhost->job_step = ibmvfc_npiv_logout; + wake_up(&vhost->work_wait_q); + } else + ibmvfc_hard_reset_host(vhost); +} + +/** + * ibmvfc_reset_host - Reset the connection to the server + * @vhost: ibmvfc host struct + **/ static void ibmvfc_reset_host(struct ibmvfc_host *vhost) { unsigned long flags; @@ -2230,6 +2254,7 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost) return; case IBMVFC_CRQ_XPORT_EVENT: vhost->state = IBMVFC_NO_CRQ; + vhost->logged_in = 0; ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); if (crq->format == IBMVFC_PARTITION_MIGRATED) { /* We need to re-setup the interpartition connection */ @@ -3554,6 +3579,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) return; } + vhost->logged_in = 1; npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS); dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n", rsp->partition_name, rsp->device_name, rsp->port_loc_code, @@ -3611,6 +3637,65 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost) ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); }; +/** + * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout + * @vhost: ibmvfc host struct + * + **/ +static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_host *vhost = evt->vhost; + u32 mad_status = evt->xfer_iu->npiv_logout.common.status; + + ibmvfc_free_event(evt); + + switch (mad_status) { + case IBMVFC_MAD_SUCCESS: + if (list_empty(&vhost->sent) && + vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) { + ibmvfc_init_host(vhost, 0); + return; + } + break; + case IBMVFC_MAD_FAILED: + case IBMVFC_MAD_NOT_SUPPORTED: + case IBMVFC_MAD_CRQ_ERROR: + case IBMVFC_MAD_DRIVER_FAILED: + default: + ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status); + break; + } + + ibmvfc_hard_reset_host(vhost); +} + +/** + * ibmvfc_npiv_logout - Issue an NPIV Logout + * @vhost: ibmvfc host struct + * + **/ +static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost) +{ + struct ibmvfc_npiv_logout_mad *mad; + struct ibmvfc_event *evt; + + evt = ibmvfc_get_event(vhost); + ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT); + + mad = &evt->iu.npiv_logout; + memset(mad, 0, sizeof(*mad)); + mad->common.version = 1; + mad->common.opcode = IBMVFC_NPIV_LOGOUT; + mad->common.length = sizeof(struct ibmvfc_npiv_logout_mad); + + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT); + + if (!ibmvfc_send_event(evt, vhost, default_timeout)) + ibmvfc_dbg(vhost, "Sent NPIV logout\n"); + else + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); +} + /** * ibmvfc_dev_init_to_do - Is there target initialization work to do? * @vhost: ibmvfc host struct @@ -3647,6 +3732,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost) switch (vhost->action) { case IBMVFC_HOST_ACTION_NONE: case IBMVFC_HOST_ACTION_INIT_WAIT: + case IBMVFC_HOST_ACTION_LOGO_WAIT: return 0; case IBMVFC_HOST_ACTION_TGT_INIT: case IBMVFC_HOST_ACTION_QUERY_TGTS: @@ -3659,6 +3745,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost) if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT) return 0; return 1; + case IBMVFC_HOST_ACTION_LOGO: case IBMVFC_HOST_ACTION_INIT: case IBMVFC_HOST_ACTION_ALLOC_TGTS: case IBMVFC_HOST_ACTION_TGT_DEL: @@ -3765,8 +3852,12 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) vhost->events_to_log = 0; switch (vhost->action) { case IBMVFC_HOST_ACTION_NONE: + case IBMVFC_HOST_ACTION_LOGO_WAIT: case IBMVFC_HOST_ACTION_INIT_WAIT: break; + case IBMVFC_HOST_ACTION_LOGO: + vhost->job_step(vhost); + break; case IBMVFC_HOST_ACTION_INIT: BUG_ON(vhost->state != IBMVFC_INITIALIZING); if (vhost->delay_init) { diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 3a6a725fd396..6adaad80565d 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -57,9 +57,10 @@ * Ensure we have resources for ERP and initialization: * 1 for ERP * 1 for initialization + * 1 for NPIV Logout * 2 for each discovery thread */ -#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + (disc_threads * 2)) +#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + 1 + (disc_threads * 2)) #define IBMVFC_MAD_SUCCESS 0x00 #define IBMVFC_MAD_NOT_SUPPORTED 0xF1 @@ -127,6 +128,7 @@ enum ibmvfc_mad_types { IBMVFC_IMPLICIT_LOGOUT = 0x0040, IBMVFC_PASSTHRU = 0x0200, IBMVFC_TMF_MAD = 0x0100, + IBMVFC_NPIV_LOGOUT = 0x0800, }; struct ibmvfc_mad_common { @@ -143,6 +145,10 @@ struct ibmvfc_npiv_login_mad { struct srp_direct_buf buffer; }__attribute__((packed, aligned (8))); +struct ibmvfc_npiv_logout_mad { + struct ibmvfc_mad_common common; +}__attribute__((packed, aligned (8))); + #define IBMVFC_MAX_NAME 256 struct ibmvfc_npiv_login { @@ -561,6 +567,7 @@ struct ibmvfc_async_crq_queue { union ibmvfc_iu { struct ibmvfc_mad_common mad_common; struct ibmvfc_npiv_login_mad npiv_login; + struct ibmvfc_npiv_logout_mad npiv_logout; struct ibmvfc_discover_targets discover_targets; struct ibmvfc_port_login plogi; struct ibmvfc_process_login prli; @@ -627,6 +634,8 @@ struct ibmvfc_event_pool { enum ibmvfc_host_action { IBMVFC_HOST_ACTION_NONE = 0, + IBMVFC_HOST_ACTION_LOGO, + IBMVFC_HOST_ACTION_LOGO_WAIT, IBMVFC_HOST_ACTION_INIT, IBMVFC_HOST_ACTION_INIT_WAIT, IBMVFC_HOST_ACTION_QUERY, @@ -682,6 +691,7 @@ struct ibmvfc_host { int reinit; int delay_init; int scan_complete; + int logged_in; int events_to_log; #define IBMVFC_AE_LINKUP 0x0001 #define IBMVFC_AE_LINKDOWN 0x0002 -- cgit v1.2.3 From 497f9c504f76e7a751cd370604e1c8521743746d Mon Sep 17 00:00:00 2001 From: Brian King Date: Thu, 28 May 2009 16:17:30 -0500 Subject: [SCSI] ibmvfc: Add flush on halt support The virtual I/O server controlling the NPIV adapter associated with a virtual fibre channel adapter can send a HALT event to the client. When this occurs, the client can no longer send commands until a RESUME is received. By adding support for flush on halt, we will get all of our outstanding commands flushed back before the Virtual I/O server enters the halt state, eliminating potential command timeouts for outstanding commands which might occur if we did not support this feature. Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvfc.c | 34 +++++++++++++++++++++++++++++++--- drivers/scsi/ibmvscsi/ibmvfc.h | 13 +++++++++++-- 2 files changed, 42 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 6eb3b75eec93..04d97d9e3cea 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -110,7 +110,7 @@ static const struct { { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" }, - { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 0, 0, "link halted" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" }, { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" }, @@ -1169,8 +1169,9 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost) login_info->partition_num = vhost->partition_number; login_info->vfc_frame_version = 1; login_info->fcp_version = 3; + login_info->flags = IBMVFC_FLUSH_ON_HALT; if (vhost->client_migrated) - login_info->flags = IBMVFC_CLIENT_MIGRATED; + login_info->flags |= IBMVFC_CLIENT_MIGRATED; login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ; login_info->capabilities = IBMVFC_CAN_MIGRATE; @@ -2185,8 +2186,25 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name); switch (crq->event) { - case IBMVFC_AE_LINK_UP: case IBMVFC_AE_RESUME: + switch (crq->link_state) { + case IBMVFC_AE_LS_LINK_DOWN: + ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); + break; + case IBMVFC_AE_LS_LINK_DEAD: + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + break; + case IBMVFC_AE_LS_LINK_UP: + case IBMVFC_AE_LS_LINK_BOUNCED: + default: + vhost->events_to_log |= IBMVFC_AE_LINKUP; + vhost->delay_init = 1; + __ibmvfc_reset_host(vhost); + break; + }; + + break; + case IBMVFC_AE_LINK_UP: vhost->events_to_log |= IBMVFC_AE_LINKUP; vhost->delay_init = 1; __ibmvfc_reset_host(vhost); @@ -2505,6 +2523,14 @@ static ssize_t ibmvfc_show_host_npiv_version(struct device *dev, return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version); } +static ssize_t ibmvfc_show_host_capabilities(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvfc_host *vhost = shost_priv(shost); + return snprintf(buf, PAGE_SIZE, "%llx\n", vhost->login_buf->resp.capabilities); +} + /** * ibmvfc_show_log_level - Show the adapter's error logging level * @dev: class device struct @@ -2554,6 +2580,7 @@ static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL); static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL); static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL); static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL); +static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL); static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR, ibmvfc_show_log_level, ibmvfc_store_log_level); @@ -2609,6 +2636,7 @@ static struct device_attribute *ibmvfc_attrs[] = { &dev_attr_port_loc_code, &dev_attr_drc_name, &dev_attr_npiv_version, + &dev_attr_capabilities, &dev_attr_log_level, NULL }; diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 6adaad80565d..cf26380820fb 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -207,7 +207,8 @@ struct ibmvfc_npiv_login_resp { #define IBMVFC_NATIVE_FC 0x01 #define IBMVFC_CAN_FLUSH_ON_HALT 0x08 u32 reserved; - u64 capabilites; + u64 capabilities; +#define IBMVFC_CAN_FLUSH_ON_HALT 0x08 u32 max_cmds; u32 scsi_id_sz; u64 max_dma_len; @@ -547,9 +548,17 @@ struct ibmvfc_crq_queue { dma_addr_t msg_token; }; +enum ibmvfc_ae_link_state { + IBMVFC_AE_LS_LINK_UP = 0x01, + IBMVFC_AE_LS_LINK_BOUNCED = 0x02, + IBMVFC_AE_LS_LINK_DOWN = 0x04, + IBMVFC_AE_LS_LINK_DEAD = 0x08, +}; + struct ibmvfc_async_crq { volatile u8 valid; - u8 pad[3]; + u8 link_state; + u8 pad[2]; u32 pad2; volatile u64 event; volatile u64 scsi_id; -- cgit v1.2.3 From 5e47167b6be0ca24cbb04fb71ea611ab7c089aff Mon Sep 17 00:00:00 2001 From: Brian King Date: Thu, 28 May 2009 16:17:32 -0500 Subject: [SCSI] ibmvfc: Improve device rediscovery For certain scenarios during device rediscovery, we detect we need to log back into a target. Currently we do just that - PLOGI/PRLI back into the target. Change the code to delete and add the target from the FC transport layer as well, to ensure we handle any cases where the target may have changed. Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvfc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 04d97d9e3cea..da233256e9aa 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -583,7 +583,7 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin) } list_for_each_entry(tgt, &vhost->targets, queue) - tgt->need_login = 1; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); scsi_block_requests(vhost->host); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); vhost->job_step = ibmvfc_npiv_login; @@ -3155,13 +3155,13 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt) case IBMVFC_MAD_SUCCESS: tgt_dbg(tgt, "ADISC succeeded\n"); if (ibmvfc_adisc_needs_plogi(mad, tgt)) - tgt->need_login = 1; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); break; case IBMVFC_MAD_DRIVER_FAILED: break; case IBMVFC_MAD_FAILED: default: - tgt->need_login = 1; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16; fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8; tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", -- cgit v1.2.3 From 6d29cc56bead73b6f386cf43333708579deb5eed Mon Sep 17 00:00:00 2001 From: Brian King Date: Thu, 28 May 2009 16:17:33 -0500 Subject: [SCSI] ibmvfc: Improve LOGO/PRLO ELS handling There are several scenarios where the ibmvfc driver needs to try to log back into a target on the fabric. Today when these events occur, we simply go through re-discovery for all attached targets, assuming that either the query of the name server or an ADISC will indicate we might need to log back into the target, which doesn't work for all scenarios. Fix this by taking note of the affected target(s) in these conditions and ensuring we try to PLOGI back into the target. Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvfc.c | 38 +++++++++++++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index da233256e9aa..b4b805e8d7db 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -1484,6 +1484,27 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt) rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status); } +/** + * ibmvfc_relogin - Log back into the specified device + * @sdev: scsi device struct + * + **/ +static void ibmvfc_relogin(struct scsi_device *sdev) +{ + struct ibmvfc_host *vhost = shost_priv(sdev->host); + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + struct ibmvfc_target *tgt; + + list_for_each_entry(tgt, &vhost->targets, queue) { + if (rport == tgt->rport) { + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + break; + } + } + + ibmvfc_reinit_host(vhost); +} + /** * ibmvfc_scsi_done - Handle responses from commands * @evt: ibmvfc event to be handled @@ -1516,7 +1537,7 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt) if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8) memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len); if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED)) - ibmvfc_reinit_host(evt->vhost); + ibmvfc_relogin(cmnd->device); if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER))) cmnd->result = (DID_ERROR << 16); @@ -2181,6 +2202,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, struct ibmvfc_host *vhost) { const char *desc = ibmvfc_get_ae_desc(crq->event); + struct ibmvfc_target *tgt; ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx," " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name); @@ -2218,9 +2240,23 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, case IBMVFC_AE_SCN_NPORT: case IBMVFC_AE_SCN_GROUP: vhost->events_to_log |= IBMVFC_AE_RSCN; + ibmvfc_reinit_host(vhost); + break; case IBMVFC_AE_ELS_LOGO: case IBMVFC_AE_ELS_PRLO: case IBMVFC_AE_ELS_PLOGI: + list_for_each_entry(tgt, &vhost->targets, queue) { + if (!crq->scsi_id && !crq->wwpn && !crq->node_name) + break; + if (crq->scsi_id && tgt->scsi_id != crq->scsi_id) + continue; + if (crq->wwpn && tgt->ids.port_name != crq->wwpn) + continue; + if (crq->node_name && tgt->ids.node_name != crq->node_name) + continue; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + } + ibmvfc_reinit_host(vhost); break; case IBMVFC_AE_LINK_DOWN: -- cgit v1.2.3 From cbbf58f2e2cb73dbed660fdc7b741a010d6bdbef Mon Sep 17 00:00:00 2001 From: Brian King Date: Thu, 28 May 2009 16:17:34 -0500 Subject: [SCSI] ibmvfc: Driver version 1.0.6 Bump driver version Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvfc.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index cf26380820fb..c2668d7d67f5 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -29,8 +29,8 @@ #include "viosrp.h" #define IBMVFC_NAME "ibmvfc" -#define IBMVFC_DRIVER_VERSION "1.0.5" -#define IBMVFC_DRIVER_DATE "(March 19, 2009)" +#define IBMVFC_DRIVER_VERSION "1.0.6" +#define IBMVFC_DRIVER_DATE "(May 28, 2009)" #define IBMVFC_DEFAULT_TIMEOUT 60 #define IBMVFC_ADISC_CANCEL_TIMEOUT 45 -- cgit v1.2.3 From f00a3328bf9ecff46abd68a421693ba71cd16fc8 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 30 May 2009 13:40:04 +1000 Subject: [SCSI] cxgb3i: Include net/dst.h for struct dst_cache This driver needs dst_cache->dev so it should include net/dst.h to ensure that it builds. While net/tcp.h probably includes it already, we shouldn't rely on that since there is no guarantee that this won't change in future. Signed-off-by: Herbert Xu Acked-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/cxgb3i/cxgb3i_iscsi.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c index 04a43744aedf..74369a3f963b 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c +++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c @@ -13,6 +13,7 @@ #include #include +#include #include #include #include -- cgit v1.2.3 From 5f48f70ecef25df93e122985272ff647f5653836 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Wed, 6 May 2009 10:52:12 -0700 Subject: [SCSI] libfcoe: fip: fix non-FIP-mode FLOGI state after reset. When a reset is sent using fcoeadm on a non-FIP mode NIC, there's no link flap, so the fcoe_ctlr stays in non-FIP mode. In that case, FIP wasn't setting the flogi_oxid or map_dest flag, causing the FLOGI to be sent with the both wrong source MAC and the wrong destination MAC address, causing it to fail. This leads to a non-functioning HBA until a link flap or instance delete/create. Signed-off-by: Joe Eykholt Signed-off-by: James Bottomley --- drivers/scsi/fcoe/libfcoe.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 62ba0f39c6bd..a7ecafb9a507 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -447,14 +447,10 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb) u16 old_xid; u8 op; - if (fip->state == FIP_ST_NON_FIP) - return 0; - fh = (struct fc_frame_header *)skb->data; op = *(u8 *)(fh + 1); - switch (op) { - case ELS_FLOGI: + if (op == ELS_FLOGI) { old_xid = fip->flogi_oxid; fip->flogi_oxid = ntohs(fh->fh_ox_id); if (fip->state == FIP_ST_AUTO) { @@ -466,6 +462,15 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb) fip->map_dest = 1; return 0; } + if (fip->state == FIP_ST_NON_FIP) + fip->map_dest = 1; + } + + if (fip->state == FIP_ST_NON_FIP) + return 0; + + switch (op) { + case ELS_FLOGI: op = FIP_DT_FLOGI; break; case ELS_FDISC: -- cgit v1.2.3 From 0f4915398a4233cdbfc4e9bf4436323546945b3f Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Wed, 6 May 2009 10:52:18 -0700 Subject: [SCSI] fcoe: use ETH_P_FIP for skb->protocol of FIP frames FIP frames should leave the fcoe layer with skb->protocol set to ETH_P_FIP, not ETH_P_802_3. Signed-off-by: Chris Leech Signed-off-by: James Bottomley --- drivers/scsi/fcoe/libfcoe.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index a7ecafb9a507..929411880e4b 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -213,7 +213,7 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf) sol->desc.size.fd_size = htons(fcoe_size); skb_put(skb, sizeof(*sol)); - skb->protocol = htons(ETH_P_802_3); + skb->protocol = htons(ETH_P_FIP); skb_reset_mac_header(skb); skb_reset_network_header(skb); fip->send(fip, skb); @@ -365,7 +365,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa) } skb_put(skb, len); - skb->protocol = htons(ETH_P_802_3); + skb->protocol = htons(ETH_P_FIP); skb_reset_mac_header(skb); skb_reset_network_header(skb); fip->send(fip, skb); @@ -424,7 +424,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, if (dtype != ELS_FLOGI) memcpy(mac->fd_mac, fip->data_src_addr, ETH_ALEN); - skb->protocol = htons(ETH_P_802_3); + skb->protocol = htons(ETH_P_FIP); skb_reset_mac_header(skb); skb_reset_network_header(skb); return 0; -- cgit v1.2.3 From d5e6054a0a097527b3920a8a0aefe7f830c014fd Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 6 May 2009 10:52:23 -0700 Subject: [SCSI] libfc: use DID_ERROR when we have internall aborted command If we aborted a command, because it timed out we should not use DID_ABORT. It will fail the command right away back to the upper layer. We want to use something that indicated that the problem did not complete normally, but it was not a fatal problem. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_fcp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 521f996f9b13..ad8b747837b0 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -1896,7 +1896,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status; break; case FC_CMD_ABORTED: - sc_cmd->result = (DID_ABORT << 16) | fsp->io_status; + sc_cmd->result = (DID_ERROR << 16) | fsp->io_status; break; case FC_CMD_TIME_OUT: sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status; -- cgit v1.2.3 From 30121d14f503dac056ee7f68d99eb5d548899b59 Mon Sep 17 00:00:00 2001 From: Steve Ma Date: Wed, 6 May 2009 10:52:29 -0700 Subject: [SCSI] libfc: Check if exchange is completed when receiving a sequence When a sequence is received in response to an exchange we issued previously, we should check to see if the exchange has completed. If yes, the sequence should be discarded. Since the exchange might be still in the completion process, it should be untouched. Signed-off-by: Steve Ma Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 992af05aacf1..7af9bceb8aa9 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -1159,6 +1159,10 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) atomic_inc(&mp->stats.xid_not_found); goto out; } + if (ep->esb_stat & ESB_ST_COMPLETE) { + atomic_inc(&mp->stats.xid_not_found); + goto out; + } if (ep->rxid == FC_XID_UNKNOWN) ep->rxid = ntohs(fh->fh_rx_id); if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) { -- cgit v1.2.3 From 4bb6b5153313269b4b328f4f5ddc558c45c50713 Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Wed, 6 May 2009 10:52:34 -0700 Subject: [SCSI] fcoe: reduces lock cost when adding a new skb to fcoe_pending_queue Currently fcoe_pending_queue.lock held twice for every new skb adding to this queue when already least one pkt is pending in this queue and that is not uncommon once skb pkts starts getting queued here upon fcoe_start_io => dev_queue_xmit failure. This patch moves most fcoe_pending_queue logic to fcoe_check_wait_queue function, this new logic grabs fcoe_pending_queue.lock only once to add a new skb instead twice as used to be. I think after this patch call flow around fcoe_check_wait_queue calling in fcoe_xmit is bit simplified with modified fcoe_check_wait_queue function taking care of adding and removing pending skb in one function. Signed-off-by: Vasu Dev Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 37 +++++++++++++++---------------------- 1 file changed, 15 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 6acb7778f557..30eba75a5cdd 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -71,7 +71,7 @@ static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *); static int fcoe_hostlist_add(const struct fc_lport *); static int fcoe_hostlist_remove(const struct fc_lport *); -static int fcoe_check_wait_queue(struct fc_lport *); +static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *); static int fcoe_device_notification(struct notifier_block *, ulong, void *); static void fcoe_dev_setup(void); static void fcoe_dev_cleanup(void); @@ -989,7 +989,7 @@ u32 fcoe_fc_crc(struct fc_frame *fp) */ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) { - int wlen, rc = 0; + int wlen; u32 crc; struct ethhdr *eh; struct fcoe_crc_eof *cp; @@ -1108,18 +1108,9 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) /* send down to lld */ fr_dev(fp) = lp; if (fc->fcoe_pending_queue.qlen) - rc = fcoe_check_wait_queue(lp); - - if (rc == 0) - rc = fcoe_start_io(skb); - - if (rc) { - spin_lock_bh(&fc->fcoe_pending_queue.lock); - __skb_queue_tail(&fc->fcoe_pending_queue, skb); - spin_unlock_bh(&fc->fcoe_pending_queue.lock); - if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) - lp->qfull = 1; - } + fcoe_check_wait_queue(lp, skb); + else if (fcoe_start_io(skb)) + fcoe_check_wait_queue(lp, skb); return 0; } @@ -1285,7 +1276,7 @@ void fcoe_watchdog(ulong vp) read_lock(&fcoe_hostlist_lock); list_for_each_entry(fc, &fcoe_hostlist, list) { if (fc->ctlr.lp) - fcoe_check_wait_queue(fc->ctlr.lp); + fcoe_check_wait_queue(fc->ctlr.lp, NULL); } read_unlock(&fcoe_hostlist_lock); @@ -1306,16 +1297,17 @@ void fcoe_watchdog(ulong vp) * The wait_queue is used when the skb transmit fails. skb will go * in the wait_queue which will be emptied by the timer function or * by the next skb transmit. - * - * Returns: 0 for success */ -static int fcoe_check_wait_queue(struct fc_lport *lp) +static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb) { struct fcoe_softc *fc = lport_priv(lp); - struct sk_buff *skb; - int rc = -1; + int rc; spin_lock_bh(&fc->fcoe_pending_queue.lock); + + if (skb) + __skb_queue_tail(&fc->fcoe_pending_queue, skb); + if (fc->fcoe_pending_queue_active) goto out; fc->fcoe_pending_queue_active = 1; @@ -1342,10 +1334,11 @@ static int fcoe_check_wait_queue(struct fc_lport *lp) if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) lp->qfull = 0; fc->fcoe_pending_queue_active = 0; - rc = fc->fcoe_pending_queue.qlen; out: + if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) + lp->qfull = 1; spin_unlock_bh(&fc->fcoe_pending_queue.lock); - return rc; + return; } /** -- cgit v1.2.3 From 1047f22108bd9bfedefd3ff014cb56691dfbaa3f Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Wed, 6 May 2009 10:52:40 -0700 Subject: [SCSI] fcoe: removes fcoe_watchdog Removes periodic fcoe_watchdog timer used across all fcoe interface maintained in fcoe_hostlist instead added new fcoe_queue_timer per fcoe interface. Added timer is armed only when some pending skb need to be flushed as oppose to periodic 1 second fcoe_watchdog, since now fcoe_queue_timer is used on demand thus set this to 2 jiffies. Now fcoe_queue_timer is much simple than fcoe_watchdog using lock to process all fcoe interface from fcoe_hostlist. I noticed +ve performance result with using 2 jiffies timer as this helps flushing fcoe_pending_queue quickly. Signed-off-by: Vasu Dev Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 52 +++++++++++++++++------------------------------- drivers/scsi/fcoe/fcoe.h | 1 + 2 files changed, 19 insertions(+), 34 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 30eba75a5cdd..6e7a700f5d54 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -54,7 +54,6 @@ MODULE_LICENSE("GPL v2"); /* fcoe host list */ LIST_HEAD(fcoe_hostlist); DEFINE_RWLOCK(fcoe_hostlist_lock); -DEFINE_TIMER(fcoe_timer, NULL, 0, 0); DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu); /* Function Prototypes */ @@ -167,6 +166,18 @@ static int fcoe_lport_config(struct fc_lport *lp) return 0; } +/** + * fcoe_queue_timer() - fcoe queue timer + * @lp: the fc_lport pointer + * + * Calls fcoe_check_wait_queue on timeout + * + */ +static void fcoe_queue_timer(ulong lp) +{ + fcoe_check_wait_queue((struct fc_lport *)lp, NULL); +} + /** * fcoe_netdev_config() - Set up netdev for SW FCoE * @lp : ptr to the fc_lport @@ -237,6 +248,7 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) } skb_queue_head_init(&fc->fcoe_pending_queue); fc->fcoe_pending_queue_active = 0; + setup_timer(&fc->timer, fcoe_queue_timer, (unsigned long)lp); /* setup Source Mac Address */ memcpy(fc->ctlr.ctl_src_addr, fc->real_dev->dev_addr, @@ -387,6 +399,9 @@ static int fcoe_if_destroy(struct net_device *netdev) /* Free existing skbs */ fcoe_clean_pending_queue(lp); + /* Stop the timer */ + del_timer_sync(&fc->timer); + /* Free memory used by statistical counters */ fc_lport_free_stats(lp); @@ -1259,32 +1274,6 @@ int fcoe_percpu_receive_thread(void *arg) return 0; } -/** - * fcoe_watchdog() - fcoe timer callback - * @vp: - * - * This checks the pending queue length for fcoe and set lport qfull - * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the - * fcoe_hostlist. - * - * Returns: 0 for success - */ -void fcoe_watchdog(ulong vp) -{ - struct fcoe_softc *fc; - - read_lock(&fcoe_hostlist_lock); - list_for_each_entry(fc, &fcoe_hostlist, list) { - if (fc->ctlr.lp) - fcoe_check_wait_queue(fc->ctlr.lp, NULL); - } - read_unlock(&fcoe_hostlist_lock); - - fcoe_timer.expires = jiffies + (1 * HZ); - add_timer(&fcoe_timer); -} - - /** * fcoe_check_wait_queue() - attempt to clear the transmit backlog * @lp: the fc_lport @@ -1333,6 +1322,8 @@ static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb) if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) lp->qfull = 0; + if (fc->fcoe_pending_queue.qlen && !timer_pending(&fc->timer)) + mod_timer(&fc->timer, jiffies + 2); fc->fcoe_pending_queue_active = 0; out: if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) @@ -1809,10 +1800,6 @@ static int __init fcoe_init(void) /* Setup link change notification */ fcoe_dev_setup(); - setup_timer(&fcoe_timer, fcoe_watchdog, 0); - - mod_timer(&fcoe_timer, jiffies + (10 * HZ)); - fcoe_if_init(); return 0; @@ -1838,9 +1825,6 @@ static void __exit fcoe_exit(void) fcoe_dev_cleanup(); - /* Stop the timer */ - del_timer_sync(&fcoe_timer); - /* releases the associated fcoe hosts */ list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list) fcoe_if_destroy(fc->real_dev); diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index 917aae886897..a1eb8c1988b0 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -61,6 +61,7 @@ struct fcoe_softc { struct packet_type fip_packet_type; struct sk_buff_head fcoe_pending_queue; u8 fcoe_pending_queue_active; + struct timer_list timer; /* queue timer */ struct fcoe_ctlr ctlr; }; -- cgit v1.2.3 From 4e57e1cbbd1435b523b9cedb949728e9fdcfb5d4 Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Wed, 6 May 2009 10:52:46 -0700 Subject: [SCSI] fcoe: removes reserving memory for vlan_ethdr on tx path This is not required as VLAN header is added by device interface driver, this was causing bad FC_CRC in FCoE pkts when using VLAN interface. Signed-off-by: Vasu Dev Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 6e7a700f5d54..e606b4829d44 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1037,8 +1037,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) sof = fr_sof(fp); eof = fr_eof(fp); - elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ? - sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr); + elen = sizeof(struct ethhdr); hlen = sizeof(struct fcoe_hdr); tlen = sizeof(struct fcoe_crc_eof); wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; -- cgit v1.2.3 From 7f774025171f626fc1a6a97781967c84a869d277 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Wed, 3 Jun 2009 09:55:12 -0700 Subject: [SCSI] qla2xxx: Export negotiated fabric-parameters for application support. Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_attr.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index fc30f8e2f467..524ceb9b9288 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1115,6 +1115,15 @@ qla2x00_vn_port_mac_address_show(struct device *dev, vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]); } +static ssize_t +qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + + return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap); +} + static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); @@ -1146,6 +1155,7 @@ static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show, static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL); static DEVICE_ATTR(vn_port_mac_address, S_IRUGO, qla2x00_vn_port_mac_address_show, NULL); +static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL); struct device_attribute *qla2x00_host_attrs[] = { &dev_attr_driver_version, @@ -1170,6 +1180,7 @@ struct device_attribute *qla2x00_host_attrs[] = { &dev_attr_flash_block_size, &dev_attr_vlan_id, &dev_attr_vn_port_mac_address, + &dev_attr_fabric_param, NULL, }; -- cgit v1.2.3 From ce0423f4a23317d0166addd7d6fcc4a0fa95e751 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Wed, 3 Jun 2009 09:55:13 -0700 Subject: [SCSI] qla2xxx: Export XGMAC statistics on supported ISPs. Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_attr.c | 57 +++++++++++++++++++++++++++++++++++++++++ drivers/scsi/qla2xxx/qla_def.h | 4 +++ drivers/scsi/qla2xxx/qla_fw.h | 1 + drivers/scsi/qla2xxx/qla_gbl.h | 3 +++ drivers/scsi/qla2xxx/qla_mbx.c | 38 +++++++++++++++++++++++++++ drivers/scsi/qla2xxx/qla_os.c | 4 +++ 6 files changed, 107 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 524ceb9b9288..e8c1c9e01a7b 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -692,6 +692,58 @@ static struct bin_attribute sysfs_edc_status_attr = { .read = qla2x00_sysfs_read_edc_status, }; +static ssize_t +qla2x00_sysfs_read_xgmac_stats(struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + struct qla_hw_data *ha = vha->hw; + int rval; + uint16_t actual_size; + + if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE) + return 0; + + if (ha->xgmac_data) + goto do_read; + + ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, + &ha->xgmac_data_dma, GFP_KERNEL); + if (!ha->xgmac_data) { + qla_printk(KERN_WARNING, ha, + "Unable to allocate memory for XGMAC read-data.\n"); + return 0; + } + +do_read: + actual_size = 0; + memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE); + + rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma, + XGMAC_DATA_SIZE, &actual_size); + if (rval != QLA_SUCCESS) { + qla_printk(KERN_WARNING, ha, + "Unable to read XGMAC data (%x).\n", rval); + count = 0; + } + + count = actual_size > count ? count: actual_size; + memcpy(buf, ha->xgmac_data, count); + + return count; +} + +static struct bin_attribute sysfs_xgmac_stats_attr = { + .attr = { + .name = "xgmac_stats", + .mode = S_IRUSR, + }, + .size = 0, + .read = qla2x00_sysfs_read_xgmac_stats, +}; + static struct sysfs_entry { char *name; struct bin_attribute *attr; @@ -706,6 +758,7 @@ static struct sysfs_entry { { "reset", &sysfs_reset_attr, }, { "edc", &sysfs_edc_attr, 2 }, { "edc_status", &sysfs_edc_status_attr, 2 }, + { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 }, { NULL }, }; @@ -721,6 +774,8 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha) continue; if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw)) continue; + if (iter->is4GBp_only == 3 && !IS_QLA81XX(vha->hw)) + continue; ret = sysfs_create_bin_file(&host->shost_gendev.kobj, iter->attr); @@ -743,6 +798,8 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha) continue; if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha)) continue; + if (iter->is4GBp_only == 3 && !IS_QLA81XX(ha)) + continue; sysfs_remove_bin_file(&host->shost_gendev.kobj, iter->attr); diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 721bae94e437..da941be9b182 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2397,6 +2397,10 @@ struct qla_hw_data { dma_addr_t edc_data_dma; uint16_t edc_data_len; +#define XGMAC_DATA_SIZE PAGE_SIZE + void *xgmac_data; + dma_addr_t xgmac_data_dma; + struct task_struct *dpc_thread; uint8_t dpc_active; /* DPC routine is active */ diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index f389f3da0bab..80ab46cfaca6 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -1405,6 +1405,7 @@ struct access_chip_rsp_84xx { #define MBC_IDC_ACK 0x101 #define MBC_RESTART_MPI_FW 0x3d #define MBC_FLASH_ACCESS_CTRL 0x3e /* Control flash access. */ +#define MBC_GET_XGMAC_STATS 0x7a /* Flash access control option field bit definitions */ #define FAC_OPT_FORCE_SEMAPHORE BIT_15 diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index f17d525897a0..66ba3997c91c 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -293,6 +293,9 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *, int); extern int qla81xx_fac_erase_sector(scsi_qla_host_t *, uint32_t, uint32_t); +extern int +qla2x00_get_xgmac_stats(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t *); + /* * Global Function Prototypes in qla_isr.c source file. */ diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 7d0eeec9ba57..2497fe4ce5aa 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -3462,3 +3462,41 @@ qla2x00_write_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr, return rval; } + +int +qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, + uint16_t size_in_bytes, uint16_t *actual_size) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_QLA81XX(vha->hw)) + return QLA_FUNCTION_FAILED; + + DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + + mcp->mb[0] = MBC_GET_XGMAC_STATS; + mcp->mb[2] = MSW(stats_dma); + mcp->mb[3] = LSW(stats_dma); + mcp->mb[6] = MSW(MSD(stats_dma)); + mcp->mb[7] = LSW(MSD(stats_dma)); + mcp->mb[8] = size_in_bytes >> 2; + mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; + mcp->in_mb = MBX_2|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x " + "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval, + mcp->mb[0], mcp->mb[1], mcp->mb[2])); + } else { + DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + + *actual_size = mcp->mb[2] << 2; + } + + return rval; +} diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index f4f535536952..642e976083e8 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -2434,6 +2434,10 @@ qla2x00_mem_free(struct qla_hw_data *ha) vfree(ha->fw_dump); } + if (ha->xgmac_data) + dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, + ha->xgmac_data, ha->xgmac_data_dma); + if (ha->sns_cmd) dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), ha->sns_cmd, ha->sns_cmd_dma); -- cgit v1.2.3 From 11bbc1d896637c1d83b11cc3b97ed3d6d2076c63 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Wed, 3 Jun 2009 09:55:14 -0700 Subject: [SCSI] qla2xxx: Export TLV data on supported ISPs. Firmware currently provides PB and PGF TLVs. Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_attr.c | 52 +++++++++++++++++++++++++++++++++++++++++ drivers/scsi/qla2xxx/qla_def.h | 4 ++++ drivers/scsi/qla2xxx/qla_fw.h | 1 + drivers/scsi/qla2xxx/qla_gbl.h | 3 +++ drivers/scsi/qla2xxx/qla_mbx.c | 37 +++++++++++++++++++++++++++++ drivers/scsi/qla2xxx/qla_os.c | 4 ++++ 6 files changed, 101 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index e8c1c9e01a7b..9aa00f25aa6a 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -744,6 +744,57 @@ static struct bin_attribute sysfs_xgmac_stats_attr = { .read = qla2x00_sysfs_read_xgmac_stats, }; +static ssize_t +qla2x00_sysfs_read_dcbx_tlv(struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + struct qla_hw_data *ha = vha->hw; + int rval; + uint16_t actual_size; + + if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE) + return 0; + + if (ha->dcbx_tlv) + goto do_read; + + ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, + &ha->dcbx_tlv_dma, GFP_KERNEL); + if (!ha->dcbx_tlv) { + qla_printk(KERN_WARNING, ha, + "Unable to allocate memory for DCBX TLV read-data.\n"); + return 0; + } + +do_read: + actual_size = 0; + memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE); + + rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma, + DCBX_TLV_DATA_SIZE); + if (rval != QLA_SUCCESS) { + qla_printk(KERN_WARNING, ha, + "Unable to read DCBX TLV data (%x).\n", rval); + count = 0; + } + + memcpy(buf, ha->dcbx_tlv, count); + + return count; +} + +static struct bin_attribute sysfs_dcbx_tlv_attr = { + .attr = { + .name = "dcbx_tlv", + .mode = S_IRUSR, + }, + .size = 0, + .read = qla2x00_sysfs_read_dcbx_tlv, +}; + static struct sysfs_entry { char *name; struct bin_attribute *attr; @@ -759,6 +810,7 @@ static struct sysfs_entry { { "edc", &sysfs_edc_attr, 2 }, { "edc_status", &sysfs_edc_status_attr, 2 }, { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 }, + { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 }, { NULL }, }; diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index da941be9b182..bb6bfd7b35f3 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2401,6 +2401,10 @@ struct qla_hw_data { void *xgmac_data; dma_addr_t xgmac_data_dma; +#define DCBX_TLV_DATA_SIZE PAGE_SIZE + void *dcbx_tlv; + dma_addr_t dcbx_tlv_dma; + struct task_struct *dpc_thread; uint8_t dpc_active; /* DPC routine is active */ diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 80ab46cfaca6..152d16c77f3e 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -1406,6 +1406,7 @@ struct access_chip_rsp_84xx { #define MBC_RESTART_MPI_FW 0x3d #define MBC_FLASH_ACCESS_CTRL 0x3e /* Control flash access. */ #define MBC_GET_XGMAC_STATS 0x7a +#define MBC_GET_DCBX_PARAMS 0x51 /* Flash access control option field bit definitions */ #define FAC_OPT_FORCE_SEMAPHORE BIT_15 diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 66ba3997c91c..fbf99726e551 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -296,6 +296,9 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *, uint32_t, uint32_t); extern int qla2x00_get_xgmac_stats(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t *); +extern int +qla2x00_get_dcbx_params(scsi_qla_host_t *, dma_addr_t, uint16_t); + /* * Global Function Prototypes in qla_isr.c source file. */ diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 2497fe4ce5aa..e0fee484f79c 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -3500,3 +3500,40 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, return rval; } + +int +qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, + uint16_t size) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_QLA81XX(vha->hw)) + return QLA_FUNCTION_FAILED; + + DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + + mcp->mb[0] = MBC_GET_DCBX_PARAMS; + mcp->mb[1] = 0; + mcp->mb[2] = MSW(tlv_dma); + mcp->mb[3] = LSW(tlv_dma); + mcp->mb[6] = MSW(MSD(tlv_dma)); + mcp->mb[7] = LSW(MSD(tlv_dma)); + mcp->mb[8] = size; + mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_2|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x " + "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval, + mcp->mb[0], mcp->mb[1], mcp->mb[2])); + } else { + DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + } + + return rval; +} diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 642e976083e8..7415ead92154 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -2434,6 +2434,10 @@ qla2x00_mem_free(struct qla_hw_data *ha) vfree(ha->fw_dump); } + if (ha->dcbx_tlv) + dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, + ha->dcbx_tlv, ha->dcbx_tlv_dma); + if (ha->xgmac_data) dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, ha->xgmac_data, ha->xgmac_data_dma); -- cgit v1.2.3 From 1b91a2e6712393cffc33eeff5cf857f7d5b62fed Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Wed, 3 Jun 2009 09:55:15 -0700 Subject: [SCSI] qla2xxx: Correct logic-bug in set-model-info(). Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 33e924810666..a7abc1035481 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1538,7 +1538,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, char *st, *en; uint16_t index; struct qla_hw_data *ha = vha->hw; - int use_tbl = !IS_QLA25XX(ha) && IS_QLA81XX(ha); + int use_tbl = !IS_QLA25XX(ha) && !IS_QLA81XX(ha); if (memcmp(model, BINZERO, len) != 0) { strncpy(ha->model_number, model, len); -- cgit v1.2.3 From 40859ae5f13534624cc35a05179b4f93ecbf531a Mon Sep 17 00:00:00 2001 From: Anirban Chakraborty Date: Wed, 3 Jun 2009 09:55:16 -0700 Subject: [SCSI] qla2xxx: Correct queue-creation bug when driver loaded in QoS mode. Signed-off-by: Anirban Chakraborty Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_attr.c | 14 +++++++------- drivers/scsi/qla2xxx/qla_def.h | 1 - drivers/scsi/qla2xxx/qla_os.c | 1 + drivers/scsi/qla2xxx/qla_sup.c | 8 +++----- 4 files changed, 11 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 9aa00f25aa6a..2bd017ffb084 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1651,13 +1651,13 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) qla24xx_vport_disable(fc_vport, disable); ret = 0; - if (ha->cur_vport_count <= ha->flex_port_count || ql2xmultique_tag - || ha->max_req_queues == 1 || !ha->npiv_info) + if (ql2xmaxqueues == 1 || ql2xmultique_tag || !ha->npiv_info) goto vport_queue; /* Create a request queue in QoS mode for the vport */ - for (cnt = ha->flex_port_count; cnt < ha->nvram_npiv_size; cnt++) { - if (ha->npiv_info[cnt].port_name == vha->port_name && - ha->npiv_info[cnt].node_name == vha->node_name) { + for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) { + if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0 + && memcmp(ha->npiv_info[cnt].node_name, vha->node_name, + 8) == 0) { qos = ha->npiv_info[cnt].q_qos; break; } @@ -1671,8 +1671,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) vha->vp_idx); else DEBUG2(qla_printk(KERN_INFO, ha, - "Request Que:%d created for vp_idx:%d\n", - ret, vha->vp_idx)); + "Request Que:%d (QoS: %d) created for vp_idx:%d\n", + ret, qos, vha->vp_idx)); } vport_queue: diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index bb6bfd7b35f3..4e846ae928aa 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2545,7 +2545,6 @@ struct qla_hw_data { uint16_t num_vsans; /* number of vsan created */ uint16_t max_npiv_vports; /* 63 or 125 per topoloty */ int cur_vport_count; - uint16_t flex_port_count; struct qla_chip_state_84xx *cs84xx; struct qla_statistics qla_stats; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 7415ead92154..181ed971a2ff 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1853,6 +1853,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->init_cb_size = sizeof(struct mid_init_cb_81xx); ha->gid_list_info_size = 8; ha->optrom_size = OPTROM_SIZE_81XX; + ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; ha->isp_ops = &qla81xx_isp_ops; ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 22f97eb50cf9..e239203f19f7 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -920,12 +920,13 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) entry = data + sizeof(struct qla_npiv_header); cnt = le16_to_cpu(hdr.entries); - ha->flex_port_count = cnt; for (i = 0; cnt; cnt--, entry++, i++) { uint16_t flags; struct fc_vport_identifiers vid; struct fc_vport *vport; + memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry)); + flags = le16_to_cpu(entry->flags); if (flags == 0xffff) continue; @@ -939,9 +940,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) vid.port_name = wwn_to_u64(entry->port_name); vid.node_name = wwn_to_u64(entry->node_name); - memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry)); - - DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx " + DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx " "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt, vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id), entry->q_qos, entry->f_qos)); @@ -957,7 +956,6 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) } done: kfree(data); - ha->npiv_info = NULL; } static int -- cgit v1.2.3 From cbc8eb67da11a4972834f61fe4729f4c037a17c9 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Wed, 3 Jun 2009 09:55:17 -0700 Subject: [SCSI] qla2xxx: Fallback to 'golden-firmware' operation on supported ISPs. In case the onboard firmware is unable to be read or loaded for operation, attempt to fallback to a limited-operational firmware image stored in a different flash region. This will allow a user to reflash and correct a board with proper operational firmware. Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_def.h | 2 ++ drivers/scsi/qla2xxx/qla_fw.h | 1 + drivers/scsi/qla2xxx/qla_init.c | 33 +++++++++++++++++++++++++-------- drivers/scsi/qla2xxx/qla_os.c | 7 +++++++ drivers/scsi/qla2xxx/qla_sup.c | 3 +++ 5 files changed, 38 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 4e846ae928aa..88ddae0e2b88 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2223,6 +2223,7 @@ struct qla_hw_data { uint32_t fac_supported :1; uint32_t chip_reset_done :1; uint32_t port0 :1; + uint32_t running_gold_fw :1; } flags; /* This spinlock is used to protect "io transactions", you must @@ -2523,6 +2524,7 @@ struct qla_hw_data { uint32_t flt_region_vpd; uint32_t flt_region_nvram; uint32_t flt_region_npiv_conf; + uint32_t flt_region_gold_fw; /* Needed for BEACON */ uint16_t beacon_blink_led; diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 152d16c77f3e..9e56d4a4cb75 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -1241,6 +1241,7 @@ struct qla_flt_header { #define FLT_REG_HW_EVENT_1 0x1f #define FLT_REG_NPIV_CONF_0 0x29 #define FLT_REG_NPIV_CONF_1 0x2a +#define FLT_REG_GOLD_FW 0x2f struct qla_flt_region { uint32_t code; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index a7abc1035481..34e6508bbab0 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -3806,11 +3806,11 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) } static int -qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr) +qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, + uint32_t faddr) { int rval = QLA_SUCCESS; int segments, fragment; - uint32_t faddr; uint32_t *dcode, dlen; uint32_t risc_addr; uint32_t risc_size; @@ -3819,12 +3819,11 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr) struct req_que *req = ha->req_q_map[0]; qla_printk(KERN_INFO, ha, - "FW: Loading from flash (%x)...\n", ha->flt_region_fw); + "FW: Loading from flash (%x)...\n", faddr); rval = QLA_SUCCESS; segments = FA_RISC_CODE_SEGMENTS; - faddr = ha->flt_region_fw; dcode = (uint32_t *)req->ring; *srisc_addr = 0; @@ -4124,27 +4123,45 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) if (rval == QLA_SUCCESS) return rval; - return qla24xx_load_risc_flash(vha, srisc_addr); + return qla24xx_load_risc_flash(vha, srisc_addr, + vha->hw->flt_region_fw); } int qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) { int rval; + struct qla_hw_data *ha = vha->hw; if (ql2xfwloadbin == 2) - return qla24xx_load_risc(vha, srisc_addr); + goto try_blob_fw; /* * FW Load priority: * 1) Firmware residing in flash. * 2) Firmware via request-firmware interface (.bin file). + * 3) Golden-Firmware residing in flash -- limited operation. */ - rval = qla24xx_load_risc_flash(vha, srisc_addr); + rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw); if (rval == QLA_SUCCESS) return rval; - return qla24xx_load_risc_blob(vha, srisc_addr); +try_blob_fw: + rval = qla24xx_load_risc_blob(vha, srisc_addr); + if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw) + return rval; + + qla_printk(KERN_ERR, ha, + "FW: Attempting to fallback to golden firmware...\n"); + rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); + if (rval != QLA_SUCCESS) + return rval; + + qla_printk(KERN_ERR, ha, + "FW: Please update operational firmware...\n"); + ha->flags.running_gold_fw = 1; + + return rval; } void diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 181ed971a2ff..128b3d5c9663 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1690,6 +1690,9 @@ qla2xxx_scan_start(struct Scsi_Host *shost) { scsi_qla_host_t *vha = shost_priv(shost); + if (vha->hw->flags.running_gold_fw) + return; + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(RSCN_UPDATE, &vha->dpc_flags); @@ -1962,6 +1965,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) "Can't create queues, falling back to single" " queue mode\n"); + if (ha->flags.running_gold_fw) + goto skip_dpc; + /* * Startup the kernel thread for this host adapter */ @@ -1974,6 +1980,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) goto probe_failed; } +skip_dpc: list_add_tail(&base_vha->list, &ha->vp_list); base_vha->host->irq = ha->pdev->irq; diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index e239203f19f7..6260505dceb5 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -728,6 +728,9 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) if (!ha->flags.port0) ha->flt_region_npiv_conf = start; break; + case FLT_REG_GOLD_FW: + ha->flt_region_gold_fw = start; + break; } } goto done; -- cgit v1.2.3 From 94b3aa47ac1ea0aa31b3f59ad121cdf55e038594 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Wed, 3 Jun 2009 09:55:18 -0700 Subject: [SCSI] qla2xxx: Use 'proper' DID_* status code for dropped-frame scenarios. The SCSI-midlayer's fast-fail codes consider an DID_ERROR status as a driver-error and the failed I/O would then be retried in the midlayer without being fast-failed to dm-multipath. DID_BUS_BUSY status returns would induce unneeded path-failures events being propagated to the DM/MD. Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_isr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index eb35d2050f7a..c8d0a176fea4 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1211,7 +1211,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) /* * If RISC reports underrun and target does not report * it then we must have a lost frame, so tell upper - * layer to retry it by reporting a bus busy. + * layer to retry it by reporting an error. */ if (!(scsi_status & SS_RESIDUAL_UNDER)) { DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped " @@ -1221,7 +1221,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) cp->device->id, cp->device->lun, resid, scsi_bufflen(cp))); - cp->result = DID_BUS_BUSY << 16; + cp->result = DID_ERROR << 16; break; } -- cgit v1.2.3 From 59e0b8b088031b3b751f0608f797f2581f49a827 Mon Sep 17 00:00:00 2001 From: Anirban Chakraborty Date: Wed, 3 Jun 2009 09:55:19 -0700 Subject: [SCSI] qla2xxx: Correct NULL pointer bug in cpu affinity mode. This patch fixes a NULL pointer bug that occurs when IO is being carried out on a vport for which the cpu affinity mode is turned on. Signed-off-by: Anirban Chakraborty Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_attr.c | 15 ++++++++++----- drivers/scsi/qla2xxx/qla_iocb.c | 16 ++++++---------- 2 files changed, 16 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 2bd017ffb084..74e69703ef98 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1595,6 +1595,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) struct qla_hw_data *ha = base_vha->hw; uint16_t options = 0; int cnt; + struct req_que *req = ha->req_q_map[0]; ret = qla24xx_vport_create_req_sanity_check(fc_vport); if (ret) { @@ -1650,14 +1651,16 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) qla24xx_vport_disable(fc_vport, disable); - ret = 0; - if (ql2xmaxqueues == 1 || ql2xmultique_tag || !ha->npiv_info) + if (ql2xmultique_tag) { + req = ha->req_q_map[1]; + goto vport_queue; + } else if (ql2xmaxqueues == 1 || !ha->npiv_info) goto vport_queue; /* Create a request queue in QoS mode for the vport */ for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) { if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0 && memcmp(ha->npiv_info[cnt].node_name, vha->node_name, - 8) == 0) { + 8) == 0) { qos = ha->npiv_info[cnt].q_qos; break; } @@ -1669,14 +1672,16 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) qla_printk(KERN_WARNING, ha, "Can't create request queue for vp_idx:%d\n", vha->vp_idx); - else + else { DEBUG2(qla_printk(KERN_INFO, ha, "Request Que:%d (QoS: %d) created for vp_idx:%d\n", ret, qos, vha->vp_idx)); + req = ha->req_q_map[ret]; + } } vport_queue: - vha->req = ha->req_q_map[ret]; + vha->req = req; return 0; vport_create_failed_2: diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index b4c6010ee5fa..13396beae2ce 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -15,7 +15,7 @@ static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *, struct rsp_que *rsp); static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *); -static void qla25xx_set_que(srb_t *, struct req_que **, struct rsp_que **); +static void qla25xx_set_que(srb_t *, struct rsp_que **); /** * qla2x00_get_cmd_direction() - Determine control_flag data direction. * @cmd: SCSI command @@ -722,7 +722,8 @@ qla24xx_start_scsi(srb_t *sp) /* Setup device pointers. */ ret = 0; - qla25xx_set_que(sp, &req, &rsp); + qla25xx_set_que(sp, &rsp); + req = vha->req; /* So we know we haven't pci_map'ed anything yet */ tot_dsds = 0; @@ -845,20 +846,15 @@ queuing_error: return QLA_FUNCTION_FAILED; } -static void qla25xx_set_que(srb_t *sp, struct req_que **req, - struct rsp_que **rsp) +static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp) { struct scsi_cmnd *cmd = sp->cmd; - struct scsi_qla_host *vha = sp->fcport->vha; struct qla_hw_data *ha = sp->fcport->vha->hw; int affinity = cmd->request->cpu; if (ql2xmultique_tag && affinity >= 0 && - affinity < ha->max_rsp_queues - 1) { + affinity < ha->max_rsp_queues - 1) *rsp = ha->rsp_q_map[affinity + 1]; - *req = ha->req_q_map[1]; - } else { - *req = vha->req; + else *rsp = ha->rsp_q_map[0]; - } } -- cgit v1.2.3 From ca9e9c3eb118d0cb9dc2e5232f6f2dcaa4b7a5e0 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Wed, 3 Jun 2009 09:55:20 -0700 Subject: [SCSI] qla2xxx: Check status of qla2x00_get_fw_version() call. Unlike earlier ISPs, recent ISPs (ISP81xx) can in fact fail this mailbox command. Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_gbl.h | 2 +- drivers/scsi/qla2xxx/qla_init.c | 7 +++++-- drivers/scsi/qla2xxx/qla_mbx.c | 7 +++++-- 3 files changed, 11 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index fbf99726e551..1ef18cce0c55 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -148,7 +148,7 @@ qla2x00_dump_ram(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t); extern int qla2x00_execute_fw(scsi_qla_host_t *, uint32_t); -extern void +extern int qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *, uint16_t *, uint32_t *, uint8_t *, uint32_t *, uint8_t *); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 34e6508bbab0..415fbf60de11 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -929,13 +929,16 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) /* Retrieve firmware information. */ if (rval == QLA_SUCCESS) { fw_major_version = ha->fw_major_version; - qla2x00_get_fw_version(vha, + rval = qla2x00_get_fw_version(vha, &ha->fw_major_version, &ha->fw_minor_version, &ha->fw_subminor_version, &ha->fw_attributes, &ha->fw_memory_size, ha->mpi_version, &ha->mpi_capabilities, ha->phy_version); + if (rval != QLA_SUCCESS) + goto failed; + ha->flags.npiv_supported = 0; if (IS_QLA2XXX_MIDTYPE(ha) && (ha->fw_attributes & BIT_2)) { @@ -987,7 +990,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) ha->fw_subminor_version); } } - +failed: if (rval) { DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n", vha->host_no)); diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index e0fee484f79c..b32eb69974a3 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -408,7 +408,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) * Context: * Kernel context. */ -void +int qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor, uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi, uint32_t *mpi_caps, uint8_t *phy) @@ -427,6 +427,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor, mcp->flags = 0; mcp->tov = MBX_TOV_SECONDS; rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) + goto failed; /* Return mailbox data. */ *major = mcp->mb[1]; @@ -446,7 +448,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor, phy[1] = mcp->mb[9] >> 8; phy[2] = mcp->mb[9] & 0xff; } - +failed: if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, @@ -455,6 +457,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor, /*EMPTY*/ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } + return rval; } /* -- cgit v1.2.3 From f4658b6ccc9d54b28b89004accc989db185b6a2e Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Wed, 3 Jun 2009 09:55:21 -0700 Subject: [SCSI] qla2xxx: Mark a port's state as needing-rediscovery during link disruptions. With RSCN states not being kept across qla2x00_configure_loop() invocations, loop-resync distruptions during fabric-discovery may cause ports to remain in a lost state. Force state renegotiation during a follow-on configure-loop iteration. Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_init.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 415fbf60de11..d145de0d2c2a 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -2064,8 +2064,10 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); - if (test_bit(RSCN_UPDATE, &save_flags)) + if (test_bit(RSCN_UPDATE, &save_flags)) { set_bit(RSCN_UPDATE, &vha->dpc_flags); + vha->flags.rscn_queue_overflow = 1; + } } return (rval); -- cgit v1.2.3 From 9f8fddeef2264a0315032b0aa2ee0052dad90076 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Wed, 3 Jun 2009 09:55:22 -0700 Subject: [SCSI] qla2xxx: Add 10Gb iiDMA support. Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_gs.c | 3 +++ drivers/scsi/qla2xxx/qla_init.c | 10 ++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 3dbb9e73e80a..917534b9f221 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -1879,6 +1879,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) case BIT_13: list[i].fp_speed = PORT_SPEED_4GB; break; + case BIT_12: + list[i].fp_speed = PORT_SPEED_10GB; + break; case BIT_11: list[i].fp_speed = PORT_SPEED_8GB; break; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index d145de0d2c2a..cb4d95263631 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -2248,7 +2248,8 @@ static void qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) { #define LS_UNKNOWN 2 - static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; + static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; + char *link_speed; int rval; uint16_t mb[6]; struct qla_hw_data *ha = vha->hw; @@ -2271,10 +2272,15 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) fcport->port_name[6], fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1])); } else { + link_speed = link_speeds[LS_UNKNOWN]; + if (fcport->fp_speed < 5) + link_speed = link_speeds[fcport->fp_speed]; + else if (fcport->fp_speed == 0x13) + link_speed = link_speeds[5]; DEBUG2(qla_printk(KERN_INFO, ha, "iIDMA adjusted to %s GB/s on " "%02x%02x%02x%02x%02x%02x%02x%02x.\n", - link_speeds[fcport->fp_speed], fcport->port_name[0], + link_speed, fcport->port_name[0], fcport->port_name[1], fcport->port_name[2], fcport->port_name[3], fcport->port_name[4], fcport->port_name[5], fcport->port_name[6], -- cgit v1.2.3 From 81eb9b4985b9cf965f9c05f4679a77fae2a85fe5 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Wed, 3 Jun 2009 09:55:23 -0700 Subject: [SCSI] qla2xxx: Add notification message when an NPIV fails to acquire a port-id. Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_mbx.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index b32eb69974a3..df919c072cae 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -2753,8 +2753,11 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, if (vp_idx == 0) return; - if (MSB(stat) == 1) + if (MSB(stat) == 1) { + DEBUG2(printk("scsi(%ld): Could not acquire ID for " + "VP[%d].\n", vha->host_no, vp_idx)); return; + } list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) if (vp_idx == vp->vp_idx) -- cgit v1.2.3 From eeebcc922326a2ea0302937b425a0d1471cbd6a7 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Wed, 3 Jun 2009 09:55:24 -0700 Subject: [SCSI] qla2xxx: Fallback enode-mac should not be a multicast address. Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index cb4d95263631..4649b2ae1948 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -4406,7 +4406,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) nv->max_luns_per_target = __constant_cpu_to_le16(128); nv->port_down_retry_count = __constant_cpu_to_le16(30); nv->link_down_timeout = __constant_cpu_to_le16(30); - nv->enode_mac[0] = 0x01; + nv->enode_mac[0] = 0x00; nv->enode_mac[1] = 0x02; nv->enode_mac[2] = 0x03; nv->enode_mac[3] = 0x04; -- cgit v1.2.3 From e8233ca40bfe7b9dade6cefc984e305516c4eceb Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Wed, 3 Jun 2009 09:55:25 -0700 Subject: [SCSI] qla2xxx: Avoid redundant RISC reset during (re)-initialization. ISP24xx and above ISPs perform a RISC reset in qla24xx_reset_chip(), which is called prior to qla24xx_chip_diag(). Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_init.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 4649b2ae1948..46bd08525964 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -730,9 +730,6 @@ qla24xx_chip_diag(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; - /* Perform RISC reset. */ - qla24xx_reset_risc(vha); - ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; rval = qla2x00_mbx_reg_test(vha); -- cgit v1.2.3 From aed10881129c52f0e5dc1c96ac706b5ce7708a13 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Wed, 3 Jun 2009 09:55:26 -0700 Subject: [SCSI] qla2xxx: Query supported RISC registers bits in determining a paused-state. ISP24xx and above must query the host-status register, not HCCR. Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_dbg.c | 6 ++---- drivers/scsi/qla2xxx/qla_fw.h | 1 - 2 files changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 68671a2b8b7f..4a990f4da4ea 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -149,11 +149,9 @@ qla24xx_pause_risc(struct device_reg_24xx __iomem *reg) int rval = QLA_SUCCESS; uint32_t cnt; - if (RD_REG_DWORD(®->hccr) & HCCRX_RISC_PAUSE) - return rval; - WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_PAUSE); - for (cnt = 30000; (RD_REG_DWORD(®->hccr) & HCCRX_RISC_PAUSE) == 0 && + for (cnt = 30000; + ((RD_REG_DWORD(®->host_status) & HSRX_RISC_PAUSED) == 0) && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(100); diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 9e56d4a4cb75..dfde2dd865cb 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -878,7 +878,6 @@ struct device_reg_24xx { /* HCCR statuses. */ #define HCCRX_HOST_INT BIT_6 /* Host to RISC interrupt bit. */ #define HCCRX_RISC_RESET BIT_5 /* RISC Reset mode bit. */ -#define HCCRX_RISC_PAUSE BIT_4 /* RISC Pause mode bit. */ /* HCCR commands. */ /* NOOP. */ #define HCCRX_NOOP 0x00000000 -- cgit v1.2.3 From 6805c1504eb4cfd4a31c05ed88fdeb56228eb3ba Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Wed, 3 Jun 2009 09:55:27 -0700 Subject: [SCSI] qla2xxx: Avoid explicit LOGO during driver host tear-down. As firmware will ultimately terminate (stop) and port states-cleared. Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_attr.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 74e69703ef98..cbdafb0aaf4c 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1465,7 +1465,8 @@ qla2x00_terminate_rport_io(struct fc_rport *rport) * At this point all fcport's software-states are cleared. Perform any * final cleanup of firmware resources (PCBs and XCBs). */ - if (fcport->loop_id != FC_NO_LOOP_ID) + if (fcport->loop_id != FC_NO_LOOP_ID && + !test_bit(UNLOADING, &fcport->vha->dpc_flags)) fcport->vha->hw->isp_ops->fabric_logout(fcport->vha, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); -- cgit v1.2.3 From f999f4c1961fe5399fd66c95860cc2d5d67e591e Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Wed, 3 Jun 2009 09:55:28 -0700 Subject: [SCSI] qla2xxx: Reduce lock-contention during do-work processing. Queued work processing will now be serialized with its own lower-priority spinlock. This also simplifies the work-queue interface for future work-queue consumers. Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_def.h | 2 ++ drivers/scsi/qla2xxx/qla_os.c | 45 +++++++++++++++++++++--------------------- 2 files changed, 24 insertions(+), 23 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 88ddae0e2b88..00aa48d975a6 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2561,6 +2561,8 @@ typedef struct scsi_qla_host { struct list_head list; struct list_head vp_fcports; /* list of fcports */ struct list_head work_list; + spinlock_t work_lock; + /* Commonly used flags and state information. */ struct Scsi_Host *host; unsigned long host_no; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 128b3d5c9663..dcf011679c8b 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -2533,6 +2533,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, INIT_LIST_HEAD(&vha->work_list); INIT_LIST_HEAD(&vha->list); + spin_lock_init(&vha->work_lock); + sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); return vha; @@ -2541,13 +2543,11 @@ fail: } static struct qla_work_evt * -qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type, - int locked) +qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) { struct qla_work_evt *e; - e = kzalloc(sizeof(struct qla_work_evt), locked ? GFP_ATOMIC: - GFP_KERNEL); + e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); if (!e) return NULL; @@ -2558,17 +2558,15 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type, } static int -qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e, int locked) +qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e) { - unsigned long uninitialized_var(flags); - struct qla_hw_data *ha = vha->hw; + unsigned long flags; - if (!locked) - spin_lock_irqsave(&ha->hardware_lock, flags); + spin_lock_irqsave(&vha->work_lock, flags); list_add_tail(&e->list, &vha->work_list); + spin_unlock_irqrestore(&vha->work_lock, flags); qla2xxx_wake_dpc(vha); - if (!locked) - spin_unlock_irqrestore(&ha->hardware_lock, flags); + return QLA_SUCCESS; } @@ -2578,13 +2576,13 @@ qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code, { struct qla_work_evt *e; - e = qla2x00_alloc_work(vha, QLA_EVT_AEN, 1); + e = qla2x00_alloc_work(vha, QLA_EVT_AEN); if (!e) return QLA_FUNCTION_FAILED; e->u.aen.code = code; e->u.aen.data = data; - return qla2x00_post_work(vha, e, 1); + return qla2x00_post_work(vha, e); } int @@ -2592,25 +2590,27 @@ qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb) { struct qla_work_evt *e; - e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK, 1); + e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK); if (!e) return QLA_FUNCTION_FAILED; memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); - return qla2x00_post_work(vha, e, 1); + return qla2x00_post_work(vha, e); } static void qla2x00_do_work(struct scsi_qla_host *vha) { - struct qla_work_evt *e; - struct qla_hw_data *ha = vha->hw; + struct qla_work_evt *e, *tmp; + unsigned long flags; + LIST_HEAD(work); - spin_lock_irq(&ha->hardware_lock); - while (!list_empty(&vha->work_list)) { - e = list_entry(vha->work_list.next, struct qla_work_evt, list); + spin_lock_irqsave(&vha->work_lock, flags); + list_splice_init(&vha->work_list, &work); + spin_unlock_irqrestore(&vha->work_lock, flags); + + list_for_each_entry_safe(e, tmp, &work, list) { list_del_init(&e->list); - spin_unlock_irq(&ha->hardware_lock); switch (e->type) { case QLA_EVT_AEN: @@ -2623,10 +2623,9 @@ qla2x00_do_work(struct scsi_qla_host *vha) } if (e->flags & QLA_EVT_FLAG_FREE) kfree(e); - spin_lock_irq(&ha->hardware_lock); } - spin_unlock_irq(&ha->hardware_lock); } + /* Relogins all the fcports of a vport * Context: dpc thread */ -- cgit v1.2.3 From 656e89122a737b60cebc7b8fcb669faf0e7bc905 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Wed, 3 Jun 2009 09:55:29 -0700 Subject: [SCSI] qla2xxx: Export additional firmware-states for application support. Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_attr.c | 18 ++++++++++++++++++ drivers/scsi/qla2xxx/qla_init.c | 7 ++++--- drivers/scsi/qla2xxx/qla_mbx.c | 4 +++- 3 files changed, 25 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index cbdafb0aaf4c..0f8796201504 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1233,6 +1233,22 @@ qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr, return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap); } +static ssize_t +qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + int rval; + uint16_t state[5]; + + rval = qla2x00_get_firmware_state(vha, state); + if (rval != QLA_SUCCESS) + memset(state, -1, sizeof(state)); + + return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0], + state[1], state[2], state[3], state[4]); +} + static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); @@ -1265,6 +1281,7 @@ static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL); static DEVICE_ATTR(vn_port_mac_address, S_IRUGO, qla2x00_vn_port_mac_address_show, NULL); static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL); +static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL); struct device_attribute *qla2x00_host_attrs[] = { &dev_attr_driver_version, @@ -1290,6 +1307,7 @@ struct device_attribute *qla2x00_host_attrs[] = { &dev_attr_vlan_id, &dev_attr_vn_port_mac_address, &dev_attr_fabric_param, + &dev_attr_fw_state, NULL, }; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 46bd08525964..36cea2224b3c 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1304,7 +1304,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) unsigned long wtime, mtime, cs84xx_time; uint16_t min_wait; /* Minimum wait time if loop is down */ uint16_t wait_time; /* Wait time if loop is coming ready */ - uint16_t state[3]; + uint16_t state[5]; struct qla_hw_data *ha = vha->hw; rval = QLA_SUCCESS; @@ -1403,8 +1403,9 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) vha->host_no, state[0], jiffies)); } while (1); - DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", - vha->host_no, state[0], jiffies)); + DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n", + vha->host_no, state[0], state[1], state[2], state[3], state[4], + jiffies)); if (rval) { DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n", diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index df919c072cae..f21557845d6f 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -1267,7 +1267,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) mcp->mb[0] = MBC_GET_FIRMWARE_STATE; mcp->out_mb = MBX_0; - mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); @@ -1276,6 +1276,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) states[0] = mcp->mb[1]; states[1] = mcp->mb[2]; states[2] = mcp->mb[3]; + states[3] = mcp->mb[4]; + states[4] = mcp->mb[5]; if (rval != QLA_SUCCESS) { /*EMPTY*/ -- cgit v1.2.3 From 18e7555a38eaadb757353c4b76667e07166ad26c Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Wed, 3 Jun 2009 09:55:30 -0700 Subject: [SCSI] qla2xxx: Synchronize MPI settings after a PE Reset. Ensure MPS remains in synchronization across all NIC/FCoE functions after a reset. Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_gbl.h | 6 ++++ drivers/scsi/qla2xxx/qla_init.c | 52 ++++++++++++++++++++++++++++++++++ drivers/scsi/qla2xxx/qla_mbx.c | 63 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 121 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 1ef18cce0c55..65b12d82867c 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -299,6 +299,12 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t *); extern int qla2x00_get_dcbx_params(scsi_qla_host_t *, dma_addr_t, uint16_t); +extern int +qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *); + +extern int +qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t); + /* * Global Function Prototypes in qla_isr.c source file. */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 36cea2224b3c..262026129325 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -886,6 +886,56 @@ cont_alloc: htonl(offsetof(struct qla2xxx_fw_dump, isp)); } +static int +qla81xx_mpi_sync(scsi_qla_host_t *vha) +{ +#define MPS_MASK 0xe0 + int rval; + uint16_t dc; + uint32_t dw; + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA81XX(vha->hw)) + return QLA_SUCCESS; + + rval = qla2x00_write_ram_word(vha, 0x7c00, 1); + if (rval != QLA_SUCCESS) { + DEBUG2(qla_printk(KERN_WARNING, ha, + "Sync-MPI: Unable to acquire semaphore.\n")); + goto done; + } + + pci_read_config_word(vha->hw->pdev, 0x54, &dc); + rval = qla2x00_read_ram_word(vha, 0x7a15, &dw); + if (rval != QLA_SUCCESS) { + DEBUG2(qla_printk(KERN_WARNING, ha, + "Sync-MPI: Unable to read sync.\n")); + goto done_release; + } + + dc &= MPS_MASK; + if (dc == (dw & MPS_MASK)) + goto done_release; + + dw &= ~MPS_MASK; + dw |= dc; + rval = qla2x00_write_ram_word(vha, 0x7a15, dw); + if (rval != QLA_SUCCESS) { + DEBUG2(qla_printk(KERN_WARNING, ha, + "Sync-MPI: Unable to gain sync.\n")); + } + +done_release: + rval = qla2x00_write_ram_word(vha, 0x7c00, 0); + if (rval != QLA_SUCCESS) { + DEBUG2(qla_printk(KERN_WARNING, ha, + "Sync-MPI: Unable to release semaphore.\n")); + } + +done: + return rval; +} + /** * qla2x00_setup_chip() - Load and start RISC firmware. * @ha: HA context @@ -910,6 +960,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) spin_unlock_irqrestore(&ha->hardware_lock, flags); } + qla81xx_mpi_sync(vha); + /* Load firmware sequences */ rval = ha->isp_ops->load_risc(vha, &srisc_address); if (rval == QLA_SUCCESS) { diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index f21557845d6f..451ece0760b0 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -3545,3 +3545,66 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, return rval; } + +int +qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_FWI2_CAPABLE(vha->hw)) + return QLA_FUNCTION_FAILED; + + DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + + mcp->mb[0] = MBC_READ_RAM_EXTENDED; + mcp->mb[1] = LSW(risc_addr); + mcp->mb[8] = MSW(risc_addr); + mcp->out_mb = MBX_8|MBX_1|MBX_0; + mcp->in_mb = MBX_3|MBX_2|MBX_0; + mcp->tov = 30; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, + vha->host_no, rval, mcp->mb[0])); + } else { + DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + *data = mcp->mb[3] << 16 | mcp->mb[2]; + } + + return rval; +} + +int +qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_FWI2_CAPABLE(vha->hw)) + return QLA_FUNCTION_FAILED; + + DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + + mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; + mcp->mb[1] = LSW(risc_addr); + mcp->mb[2] = LSW(data); + mcp->mb[3] = MSW(data); + mcp->mb[8] = MSW(risc_addr); + mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_0; + mcp->tov = 30; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, + vha->host_no, rval, mcp->mb[0])); + } else { + DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + } + + return rval; +} -- cgit v1.2.3 From 714df9399b3d2c0a7484e8cfb7c9cb100b0b7f19 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Wed, 3 Jun 2009 09:55:31 -0700 Subject: [SCSI] qla2xxx: Update version number to 8.03.01-k3. Signed-off-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index a1094e7d2b44..b63feaf43126 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.03.01-k2" +#define QLA2XXX_VERSION "8.03.01-k3" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 3 -- cgit v1.2.3 From 9d01e4cd7eb4a70b04cf5a5b4f79c99e8e3e3edc Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 8 Jun 2009 22:03:03 +0200 Subject: ide-tape: fix proc warning ide_tape_chrdev_get() was missing an ide_device_get() refcount increment which lead to the following warning: [ 278.147906] ------------[ cut here ]------------ [ 278.152685] WARNING: at fs/proc/generic.c:847 remove_proc_entry+0x199/0x1b8() [ 278.160070] Hardware name: P4I45PE 1.00 [ 278.160076] remove_proc_entry: removing non-empty directory 'ide0/hdb', leaking at least 'name' [ 278.160080] Modules linked in: rtc intel_agp pcspkr thermal processor thermal_sys parport_pc parport agpgart button [ 278.160100] Pid: 2312, comm: mt Not tainted 2.6.30-rc2 #3 [ 278.160105] Call Trace: [ 278.160117] [] warn_slowpath+0x71/0xa0 [ 278.160126] [] ? _spin_unlock_irqrestore+0x29/0x2c [ 278.160132] [] ? try_to_wake_up+0x1b6/0x1c0 [ 278.160141] [] ? default_wake_function+0xb/0xd [ 278.160149] [] ? pollwake+0x4a/0x55 [ 278.160156] [] ? _spin_unlock+0x24/0x26 [ 278.160163] [] ? add_partial+0x44/0x49 [ 278.160169] [] ? __slab_free+0xba/0x29c [ 278.160177] [] ? sysfs_delete_inode+0x0/0x3c [ 278.160184] [] remove_proc_entry+0x199/0x1b8 [ 278.160191] [] ? remove_dir+0x27/0x2e [ 278.160199] [] ide_proc_unregister_device+0x40/0x4c [ 278.160207] [] drive_release_dev+0x14/0x47 [ 278.160214] [] device_release+0x35/0x5a [ 278.160221] [] kobject_release+0x40/0x50 [ 278.160226] [] ? kobject_release+0x0/0x50 [ 278.160232] [] kref_put+0x3c/0x4a [ 278.160238] [] kobject_put+0x37/0x3c [ 278.160243] [] put_device+0xf/0x11 [ 278.160249] [] ide_device_put+0x2d/0x30 [ 278.160255] [] ide_tape_put+0x24/0x32 [ 278.160261] [] idetape_chrdev_release+0x17f/0x18e [ 278.160269] [] __fput+0xca/0x175 [ 278.160275] [] fput+0x19/0x1b [ 278.160280] [] filp_close+0x51/0x5b [ 278.160286] [] sys_close+0x73/0xad [ 278.160293] [] syscall_call+0x7/0xb [ 278.160298] ---[ end trace f16d907ea1f89336 ]--- Instead of trivially fixing it by adding the missing call, ide_tape_chrdev_get() and ide_tape_get() were merged into one function since both were almost identical. The only difference was that ide_tape_chrdev_get() was accessing the ide-tape reference through the idetape_devs[] array of minors instead of through the gendisk. Accomodate that by adding two additional parameters to ide_tape_get() to annotate the call site and invoke the proper behavior. As a result, remove ide_tape_chrdev_get(). Signed-off-by: Borislav Petkov Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/ide-tape.c | 35 +++++++++++++---------------------- 1 file changed, 13 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 055f52e1ea0e..51ea59e3f6ad 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -240,18 +240,27 @@ static struct class *idetape_sysfs_class; static void ide_tape_release(struct device *); -static struct ide_tape_obj *ide_tape_get(struct gendisk *disk) +static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES]; + +static struct ide_tape_obj *ide_tape_get(struct gendisk *disk, bool cdev, + unsigned int i) { struct ide_tape_obj *tape = NULL; mutex_lock(&idetape_ref_mutex); - tape = ide_drv_g(disk, ide_tape_obj); + + if (cdev) + tape = idetape_devs[i]; + else + tape = ide_drv_g(disk, ide_tape_obj); + if (tape) { if (ide_device_get(tape->drive)) tape = NULL; else get_device(&tape->dev); } + mutex_unlock(&idetape_ref_mutex); return tape; } @@ -266,24 +275,6 @@ static void ide_tape_put(struct ide_tape_obj *tape) mutex_unlock(&idetape_ref_mutex); } -/* - * The variables below are used for the character device interface. Additional - * state variables are defined in our ide_drive_t structure. - */ -static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES]; - -static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i) -{ - struct ide_tape_obj *tape = NULL; - - mutex_lock(&idetape_ref_mutex); - tape = idetape_devs[i]; - if (tape) - get_device(&tape->dev); - mutex_unlock(&idetape_ref_mutex); - return tape; -} - /* * called on each failed packet command retry to analyze the request sense. We * currently do not utilize this information. @@ -1495,7 +1486,7 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp) return -ENXIO; lock_kernel(); - tape = ide_tape_chrdev_get(i); + tape = ide_tape_get(NULL, true, i); if (!tape) { unlock_kernel(); return -ENXIO; @@ -1916,7 +1907,7 @@ static const struct file_operations idetape_fops = { static int idetape_open(struct block_device *bdev, fmode_t mode) { - struct ide_tape_obj *tape = ide_tape_get(bdev->bd_disk); + struct ide_tape_obj *tape = ide_tape_get(bdev->bd_disk, false, 0); if (!tape) return -ENXIO; -- cgit v1.2.3 From 75c2d7d71a85d02594da07d5d2ad587451b64b02 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Mon, 8 Jun 2009 22:03:03 +0200 Subject: sl82c105: add printk() logging facility Add missing printk() logging facility in sl82c105_dma_lost_irq(). Signed-off-by: Sergei Shtylyov Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/sl82c105.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c index b0a460625335..0924abff52ff 100644 --- a/drivers/ide/sl82c105.c +++ b/drivers/ide/sl82c105.c @@ -10,7 +10,7 @@ * with the timing registers setup. * -- Benjamin Herrenschmidt (01/11/03) benh@kernel.crashing.org * - * Copyright (C) 2006-2007 MontaVista Software, Inc. + * Copyright (C) 2006-2007,2009 MontaVista Software, Inc. * Copyright (C) 2007 Bartlomiej Zolnierkiewicz */ @@ -146,14 +146,15 @@ static void sl82c105_dma_lost_irq(ide_drive_t *drive) u32 val, mask = hwif->channel ? CTRL_IDE_IRQB : CTRL_IDE_IRQA; u8 dma_cmd; - printk("sl82c105: lost IRQ, resetting host\n"); + printk(KERN_WARNING "sl82c105: lost IRQ, resetting host\n"); /* * Check the raw interrupt from the drive. */ pci_read_config_dword(dev, 0x40, &val); if (val & mask) - printk("sl82c105: drive was requesting IRQ, but host lost it\n"); + printk(KERN_INFO "sl82c105: drive was requesting IRQ, " + "but host lost it\n"); /* * Was DMA enabled? If so, disable it - we're resetting the @@ -162,7 +163,7 @@ static void sl82c105_dma_lost_irq(ide_drive_t *drive) dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); if (dma_cmd & 1) { outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD); - printk("sl82c105: DMA was enabled\n"); + printk(KERN_INFO "sl82c105: DMA was enabled\n"); } sl82c105_reset_host(dev); -- cgit v1.2.3 From a20b2a44eca52818ef52a94959480b7e6ea2f528 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Mon, 8 Jun 2009 22:07:28 +0200 Subject: ide: skip probe if there are no devices on the port (v2) In ide_probe_port() skip probe if ide_port_wait_ready() returns -ENODEV and print error message instead of debug one if it returns -EBUSY. v2: Fix the default 'rc' value. Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/ide-probe.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 28f95cb41c29..f9c2fb7d0005 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -702,8 +702,14 @@ static int ide_probe_port(ide_hwif_t *hwif) if (irqd) disable_irq(hwif->irq); - if (ide_port_wait_ready(hwif) == -EBUSY) - printk(KERN_DEBUG "%s: Wait for ready failed before probe !\n", hwif->name); + rc = ide_port_wait_ready(hwif); + if (rc == -ENODEV) { + printk(KERN_INFO "%s: no devices on the port\n", hwif->name); + goto out; + } else if (rc == -EBUSY) + printk(KERN_ERR "%s: not ready before the probe\n", hwif->name); + else + rc = -ENODEV; /* * Second drive should only exist if first drive was found, @@ -714,7 +720,7 @@ static int ide_probe_port(ide_hwif_t *hwif) if (drive->dev_flags & IDE_DFLAG_PRESENT) rc = 0; } - +out: /* * Use cached IRQ number. It might be (and is...) changed by probe * code above -- cgit v1.2.3 From fbc56f0801f58041a4372a030933bac076b46aad Mon Sep 17 00:00:00 2001 From: Brian King Date: Mon, 8 Jun 2009 16:19:01 -0500 Subject: [SCSI] ibmvscsi: Add 16 byte CDB support Adds support for 16 byte CDBs to the ibmvscsi driver. Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvscsi.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 8d3925f6b5a1..9804b42dcd94 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -1684,6 +1684,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) host->max_lun = 8; host->max_id = max_id; host->max_channel = max_channel; + host->max_cmd_len = 16; if (scsi_add_host(hostdata->host, hostdata->dev)) goto add_host_failed; -- cgit v1.2.3 From e1a5ce5b88d06344caec0c71b4ee33e7296358dd Mon Sep 17 00:00:00 2001 From: Robert Jennings Date: Mon, 8 Jun 2009 16:19:03 -0500 Subject: [SCSI] ibmvscsi: Add specific timeouts for operations Previously we had one timeout that was used for all types of operations. This adds specific timeout values for different operations (init, login, adapter info MAD, abort task, and LUN reset). Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvscsi.c | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 9804b42dcd94..6038c0491f8a 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -87,7 +87,11 @@ */ static int max_id = 64; static int max_channel = 3; -static int init_timeout = 5; +static int init_timeout = 300; +static int login_timeout = 60; +static int info_timeout = 30; +static int abort_timeout = 60; +static int reset_timeout = 60; static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT; static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; @@ -849,7 +853,7 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) init_event_struct(evt_struct, adapter_info_rsp, VIOSRP_MAD_FORMAT, - init_timeout); + info_timeout); req = &evt_struct->iu.mad.adapter_info; memset(req, 0x00, sizeof(*req)); @@ -871,7 +875,7 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) } spin_lock_irqsave(hostdata->host->host_lock, flags); - if (ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2)) { + if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) { dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n"); dma_unmap_single(hostdata->dev, addr, @@ -944,7 +948,7 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) init_event_struct(evt_struct, login_rsp, VIOSRP_SRP_FORMAT, - init_timeout); + login_timeout); login = &evt_struct->iu.srp.login_req; memset(login, 0x00, sizeof(struct srp_login_req)); @@ -959,7 +963,7 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) */ atomic_set(&hostdata->request_limit, 0); - rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2); + rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2); spin_unlock_irqrestore(hostdata->host->host_lock, flags); dev_info(hostdata->dev, "sent SRP login\n"); return rc; @@ -1026,7 +1030,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) init_event_struct(evt, sync_completion, VIOSRP_SRP_FORMAT, - init_timeout); + abort_timeout); tsk_mgmt = &evt->iu.srp.tsk_mgmt; @@ -1040,7 +1044,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) evt->sync_srp = &srp_rsp; init_completion(&evt->comp); - rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2); + rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2); if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) break; @@ -1149,7 +1153,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) init_event_struct(evt, sync_completion, VIOSRP_SRP_FORMAT, - init_timeout); + reset_timeout); tsk_mgmt = &evt->iu.srp.tsk_mgmt; @@ -1162,7 +1166,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) evt->sync_srp = &srp_rsp; init_completion(&evt->comp); - rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2); + rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2); if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) break; @@ -1394,7 +1398,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, init_event_struct(evt_struct, sync_completion, VIOSRP_MAD_FORMAT, - init_timeout); + info_timeout); host_config = &evt_struct->iu.mad.host_config; @@ -1416,7 +1420,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, init_completion(&evt_struct->comp); spin_lock_irqsave(hostdata->host->host_lock, flags); - rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2); + rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); spin_unlock_irqrestore(hostdata->host->host_lock, flags); if (rc == 0) wait_for_completion(&evt_struct->comp); @@ -1441,7 +1445,7 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev) spin_lock_irqsave(shost->host_lock, lock_flags); if (sdev->type == TYPE_DISK) { sdev->allow_restart = 1; - blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); + blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); } scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); spin_unlock_irqrestore(shost->host_lock, lock_flags); -- cgit v1.2.3 From 3507e13fcba6b97501891a410ec8ef9f1f188620 Mon Sep 17 00:00:00 2001 From: Brian King Date: Mon, 8 Jun 2009 16:19:04 -0500 Subject: [SCSI] ibmvscsi: Send adapter info before login The ibmvscsi driver currently sends the SRP Login before sending the Adapter Info MAD, which can result in commands getting sent to the virtual adapter before we are ready for them. This results in a slight window where the target devices may not behave as expected. Change the order and close the window. Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvscsi.c | 170 +++++++++++++++++++-------------------- 1 file changed, 85 insertions(+), 85 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 6038c0491f8a..2ed46b8efedf 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -785,6 +785,83 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, /* ------------------------------------------------------------ * Routines for driver initialization */ + +/** + * login_rsp: - Handle response to SRP login request + * @evt_struct: srp_event_struct with the response + * + * Used as a "done" callback by when sending srp_login. Gets called + * by ibmvscsi_handle_crq() +*/ +static void login_rsp(struct srp_event_struct *evt_struct) +{ + struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; + switch (evt_struct->xfer_iu->srp.login_rsp.opcode) { + case SRP_LOGIN_RSP: /* it worked! */ + break; + case SRP_LOGIN_REJ: /* refused! */ + dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n", + evt_struct->xfer_iu->srp.login_rej.reason); + /* Login failed. */ + atomic_set(&hostdata->request_limit, -1); + return; + default: + dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n", + evt_struct->xfer_iu->srp.login_rsp.opcode); + /* Login failed. */ + atomic_set(&hostdata->request_limit, -1); + return; + } + + dev_info(hostdata->dev, "SRP_LOGIN succeeded\n"); + + /* Now we know what the real request-limit is. + * This value is set rather than added to request_limit because + * request_limit could have been set to -1 by this client. + */ + atomic_set(&hostdata->request_limit, + evt_struct->xfer_iu->srp.login_rsp.req_lim_delta); + + /* If we had any pending I/Os, kick them */ + scsi_unblock_requests(hostdata->host); +} + +/** + * send_srp_login: - Sends the srp login + * @hostdata: ibmvscsi_host_data of host + * + * Returns zero if successful. +*/ +static int send_srp_login(struct ibmvscsi_host_data *hostdata) +{ + int rc; + unsigned long flags; + struct srp_login_req *login; + struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool); + + BUG_ON(!evt_struct); + init_event_struct(evt_struct, login_rsp, + VIOSRP_SRP_FORMAT, login_timeout); + + login = &evt_struct->iu.srp.login_req; + memset(login, 0, sizeof(*login)); + login->opcode = SRP_LOGIN_REQ; + login->req_it_iu_len = sizeof(union srp_iu); + login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; + + spin_lock_irqsave(hostdata->host->host_lock, flags); + /* Start out with a request limit of 0, since this is negotiated in + * the login request we are just sending and login requests always + * get sent by the driver regardless of request_limit. + */ + atomic_set(&hostdata->request_limit, 0); + + rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2); + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + dev_info(hostdata->dev, "sent SRP login\n"); + return rc; +}; + /** * adapter_info_rsp: - Handle response to MAD adapter info request * @evt_struct: srp_event_struct with the response @@ -825,6 +902,8 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct) hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; } } + + send_srp_login(hostdata); } /** @@ -844,11 +923,7 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) dma_addr_t addr; evt_struct = get_event_struct(&hostdata->pool); - if (!evt_struct) { - dev_err(hostdata->dev, - "couldn't allocate an event for ADAPTER_INFO_REQ!\n"); - return; - } + BUG_ON(!evt_struct); init_event_struct(evt_struct, adapter_info_rsp, @@ -886,89 +961,14 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) }; /** - * login_rsp: - Handle response to SRP login request - * @evt_struct: srp_event_struct with the response + * init_adapter: Start virtual adapter initialization sequence * - * Used as a "done" callback by when sending srp_login. Gets called - * by ibmvscsi_handle_crq() -*/ -static void login_rsp(struct srp_event_struct *evt_struct) + */ +static void init_adapter(struct ibmvscsi_host_data *hostdata) { - struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; - switch (evt_struct->xfer_iu->srp.login_rsp.opcode) { - case SRP_LOGIN_RSP: /* it worked! */ - break; - case SRP_LOGIN_REJ: /* refused! */ - dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n", - evt_struct->xfer_iu->srp.login_rej.reason); - /* Login failed. */ - atomic_set(&hostdata->request_limit, -1); - return; - default: - dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n", - evt_struct->xfer_iu->srp.login_rsp.opcode); - /* Login failed. */ - atomic_set(&hostdata->request_limit, -1); - return; - } - - dev_info(hostdata->dev, "SRP_LOGIN succeeded\n"); - - /* Now we know what the real request-limit is. - * This value is set rather than added to request_limit because - * request_limit could have been set to -1 by this client. - */ - atomic_set(&hostdata->request_limit, - evt_struct->xfer_iu->srp.login_rsp.req_lim_delta); - - /* If we had any pending I/Os, kick them */ - scsi_unblock_requests(hostdata->host); - send_mad_adapter_info(hostdata); - return; } -/** - * send_srp_login: - Sends the srp login - * @hostdata: ibmvscsi_host_data of host - * - * Returns zero if successful. -*/ -static int send_srp_login(struct ibmvscsi_host_data *hostdata) -{ - int rc; - unsigned long flags; - struct srp_login_req *login; - struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool); - if (!evt_struct) { - dev_err(hostdata->dev, "couldn't allocate an event for login req!\n"); - return FAILED; - } - - init_event_struct(evt_struct, - login_rsp, - VIOSRP_SRP_FORMAT, - login_timeout); - - login = &evt_struct->iu.srp.login_req; - memset(login, 0x00, sizeof(struct srp_login_req)); - login->opcode = SRP_LOGIN_REQ; - login->req_it_iu_len = sizeof(union srp_iu); - login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; - - spin_lock_irqsave(hostdata->host->host_lock, flags); - /* Start out with a request limit of 0, since this is negotiated in - * the login request we are just sending and login requests always - * get sent by the driver regardless of request_limit. - */ - atomic_set(&hostdata->request_limit, 0); - - rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2); - spin_unlock_irqrestore(hostdata->host->host_lock, flags); - dev_info(hostdata->dev, "sent SRP login\n"); - return rc; -}; - /** * sync_completion: Signal that a synchronous command has completed * Note that after returning from this call, the evt_struct is freed. @@ -1282,7 +1282,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, if ((rc = ibmvscsi_ops->send_crq(hostdata, 0xC002000000000000LL, 0)) == 0) { /* Now login */ - send_srp_login(hostdata); + init_adapter(hostdata); } else { dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc); } @@ -1292,7 +1292,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, dev_info(hostdata->dev, "partner initialization complete\n"); /* Now login */ - send_srp_login(hostdata); + init_adapter(hostdata); break; default: dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format); -- cgit v1.2.3 From c1988e3123751fd425fbae99d5c1776608e965a9 Mon Sep 17 00:00:00 2001 From: Robert Jennings Date: Mon, 8 Jun 2009 16:19:07 -0500 Subject: [SCSI] ibmvscsi: Enable fast fail feature A new mode of error reporting, fast fail, has been added to the VIOS which allows failover to happen more quickly. If this new fast fail mode is enabled on the VIOS and the vSCSI client supports the mode, the VIOS will not return MEDIUM error on path failures, but rather return VIOSRP_ADAPTER_FAIL in the crq response, which ibmvscsi will translate to DID_ERROR. This new mode can be enabled for single path configurations as well, so it is the new default error reporting mode. A module parameter is provided to disable this new behavior on the off chance it causes a problem on some old VIOS version. Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvscsi.c | 59 +++++++++++++++++++++++++++++++++++++++- drivers/scsi/ibmvscsi/viosrp.h | 14 +++++++++- 2 files changed, 71 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 2ed46b8efedf..822fbc32a2ae 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -94,6 +94,7 @@ static int abort_timeout = 60; static int reset_timeout = 60; static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT; static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; +static int fast_fail = 1; static struct scsi_transport_template *ibmvscsi_transport_template; @@ -114,6 +115,8 @@ module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds"); module_param_named(max_requests, max_requests, int, S_IRUGO); MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter"); +module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]"); /* ------------------------------------------------------------ * Routines for the event pool and event structs @@ -862,6 +865,60 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) return rc; }; +/** + * fast_fail_rsp: - Handle response to MAD enable fast fail + * @evt_struct: srp_event_struct with the response + * + * Used as a "done" callback by when sending enable fast fail. Gets called + * by ibmvscsi_handle_crq() + */ +static void fast_fail_rsp(struct srp_event_struct *evt_struct) +{ + struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; + u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status; + + if (status == VIOSRP_MAD_NOT_SUPPORTED) + dev_err(hostdata->dev, "fast_fail not supported in server\n"); + else if (status == VIOSRP_MAD_FAILED) + dev_err(hostdata->dev, "fast_fail request failed\n"); + else if (status != VIOSRP_MAD_SUCCESS) + dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status); + + send_srp_login(hostdata); +} + +/** + * init_host - Start host initialization + * @hostdata: ibmvscsi_host_data of host + * + * Returns zero if successful. + */ +static int enable_fast_fail(struct ibmvscsi_host_data *hostdata) +{ + int rc; + unsigned long flags; + struct viosrp_fast_fail *fast_fail_mad; + struct srp_event_struct *evt_struct; + + if (!fast_fail) + return send_srp_login(hostdata); + + evt_struct = get_event_struct(&hostdata->pool); + BUG_ON(!evt_struct); + + init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout); + + fast_fail_mad = &evt_struct->iu.mad.fast_fail; + memset(fast_fail_mad, 0, sizeof(*fast_fail_mad)); + fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL; + fast_fail_mad->common.length = sizeof(*fast_fail_mad); + + spin_lock_irqsave(hostdata->host->host_lock, flags); + rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + return rc; +} + /** * adapter_info_rsp: - Handle response to MAD adapter info request * @evt_struct: srp_event_struct with the response @@ -903,7 +960,7 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct) } } - send_srp_login(hostdata); + enable_fast_fail(hostdata); } /** diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h index 204604501ad8..f5a9c26d1da8 100644 --- a/drivers/scsi/ibmvscsi/viosrp.h +++ b/drivers/scsi/ibmvscsi/viosrp.h @@ -86,7 +86,14 @@ enum viosrp_mad_types { VIOSRP_EMPTY_IU_TYPE = 0x01, VIOSRP_ERROR_LOG_TYPE = 0x02, VIOSRP_ADAPTER_INFO_TYPE = 0x03, - VIOSRP_HOST_CONFIG_TYPE = 0x04 + VIOSRP_HOST_CONFIG_TYPE = 0x04, + VIOSRP_ENABLE_FAST_FAIL = 0x08, +}; + +enum viosrp_mad_status { + VIOSRP_MAD_SUCCESS = 0x00, + VIOSRP_MAD_NOT_SUPPORTED = 0xF1, + VIOSRP_MAD_FAILED = 0xF7, }; /* @@ -127,11 +134,16 @@ struct viosrp_host_config { u64 buffer; }; +struct viosrp_fast_fail { + struct mad_common common; +}; + union mad_iu { struct viosrp_empty_iu empty_iu; struct viosrp_error_log error_log; struct viosrp_adapter_info adapter_info; struct viosrp_host_config host_config; + struct viosrp_fast_fail fast_fail; }; union viosrp_iu { -- cgit v1.2.3 From 126c5cc37e682e7c5ae96754994b1cb50c2d0cb5 Mon Sep 17 00:00:00 2001 From: Brian King Date: Mon, 8 Jun 2009 16:19:08 -0500 Subject: [SCSI] ibmvscsi: Add support for capabilities MAD Add support to ibmvscsi for the capabilities MAD. This command gets sent to the Virtual I/O server prior to login in order to communicate client capabilities. Additionally it returns information regarding capabilities that the server supports. The two main capabilities communicated in this MAD are related to partition migration and client reserve. Client reserve allows for SCSI-2 reservations to be sent to virtual disks which are backed by physical LUNs and will result in the reservation being sent to the physical LUN. Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvscsi.c | 224 ++++++++++++++++++++++++++++++++++----- drivers/scsi/ibmvscsi/ibmvscsi.h | 4 + drivers/scsi/ibmvscsi/viosrp.h | 54 ++++++++++ 3 files changed, 255 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 822fbc32a2ae..11d2602ae88e 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -70,6 +70,7 @@ #include #include #include +#include #include #include #include @@ -95,6 +96,7 @@ static int reset_timeout = 60; static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT; static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; static int fast_fail = 1; +static int client_reserve = 1; static struct scsi_transport_template *ibmvscsi_transport_template; @@ -117,6 +119,8 @@ module_param_named(max_requests, max_requests, int, S_IRUGO); MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter"); module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]"); +module_param_named(client_reserve, client_reserve, int, S_IRUGO ); +MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release"); /* ------------------------------------------------------------ * Routines for the event pool and event structs @@ -789,6 +793,53 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, * Routines for driver initialization */ +/** + * map_persist_bufs: - Pre-map persistent data for adapter logins + * @hostdata: ibmvscsi_host_data of host + * + * Map the capabilities and adapter info DMA buffers to avoid runtime failures. + * Return 1 on error, 0 on success. + */ +static int map_persist_bufs(struct ibmvscsi_host_data *hostdata) +{ + + hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps, + sizeof(hostdata->caps), DMA_BIDIRECTIONAL); + + if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) { + dev_err(hostdata->dev, "Unable to map capabilities buffer!\n"); + return 1; + } + + hostdata->adapter_info_addr = dma_map_single(hostdata->dev, + &hostdata->madapter_info, + sizeof(hostdata->madapter_info), + DMA_BIDIRECTIONAL); + if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) { + dev_err(hostdata->dev, "Unable to map adapter info buffer!\n"); + dma_unmap_single(hostdata->dev, hostdata->caps_addr, + sizeof(hostdata->caps), DMA_BIDIRECTIONAL); + return 1; + } + + return 0; +} + +/** + * unmap_persist_bufs: - Unmap persistent data needed for adapter logins + * @hostdata: ibmvscsi_host_data of host + * + * Unmap the capabilities and adapter info DMA buffers + */ +static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata) +{ + dma_unmap_single(hostdata->dev, hostdata->caps_addr, + sizeof(hostdata->caps), DMA_BIDIRECTIONAL); + + dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr, + sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL); +} + /** * login_rsp: - Handle response to SRP login request * @evt_struct: srp_event_struct with the response @@ -817,6 +868,7 @@ static void login_rsp(struct srp_event_struct *evt_struct) } dev_info(hostdata->dev, "SRP_LOGIN succeeded\n"); + hostdata->client_migrated = 0; /* Now we know what the real request-limit is. * This value is set rather than added to request_limit because @@ -865,6 +917,93 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) return rc; }; +/** + * capabilities_rsp: - Handle response to MAD adapter capabilities request + * @evt_struct: srp_event_struct with the response + * + * Used as a "done" callback by when sending adapter_info. + */ +static void capabilities_rsp(struct srp_event_struct *evt_struct) +{ + struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; + + if (evt_struct->xfer_iu->mad.capabilities.common.status) { + dev_err(hostdata->dev, "error 0x%X getting capabilities info\n", + evt_struct->xfer_iu->mad.capabilities.common.status); + } else { + if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP) + dev_info(hostdata->dev, "Partition migration not supported\n"); + + if (client_reserve) { + if (hostdata->caps.reserve.common.server_support == + SERVER_SUPPORTS_CAP) + dev_info(hostdata->dev, "Client reserve enabled\n"); + else + dev_info(hostdata->dev, "Client reserve not supported\n"); + } + } + + send_srp_login(hostdata); +} + +/** + * send_mad_capabilities: - Sends the mad capabilities request + * and stores the result so it can be retrieved with + * @hostdata: ibmvscsi_host_data of host + */ +static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata) +{ + struct viosrp_capabilities *req; + struct srp_event_struct *evt_struct; + unsigned long flags; + struct device_node *of_node = hostdata->dev->archdata.of_node; + const char *location; + + evt_struct = get_event_struct(&hostdata->pool); + BUG_ON(!evt_struct); + + init_event_struct(evt_struct, capabilities_rsp, + VIOSRP_MAD_FORMAT, info_timeout); + + req = &evt_struct->iu.mad.capabilities; + memset(req, 0, sizeof(*req)); + + hostdata->caps.flags = CAP_LIST_SUPPORTED; + if (hostdata->client_migrated) + hostdata->caps.flags |= CLIENT_MIGRATED; + + strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev), + sizeof(hostdata->caps.name)); + hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0'; + + location = of_get_property(of_node, "ibm,loc-code", NULL); + location = location ? location : dev_name(hostdata->dev); + strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc)); + hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0'; + + req->common.type = VIOSRP_CAPABILITIES_TYPE; + req->buffer = hostdata->caps_addr; + + hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES; + hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration); + hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP; + hostdata->caps.migration.ecl = 1; + + if (client_reserve) { + hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES; + hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve); + hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP; + hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2; + req->common.length = sizeof(hostdata->caps); + } else + req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve); + + spin_lock_irqsave(hostdata->host->host_lock, flags); + if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) + dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n"); + spin_unlock_irqrestore(hostdata->host->host_lock, flags); +}; + /** * fast_fail_rsp: - Handle response to MAD enable fast fail * @evt_struct: srp_event_struct with the response @@ -884,7 +1023,7 @@ static void fast_fail_rsp(struct srp_event_struct *evt_struct) else if (status != VIOSRP_MAD_SUCCESS) dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status); - send_srp_login(hostdata); + send_mad_capabilities(hostdata); } /** @@ -900,8 +1039,10 @@ static int enable_fast_fail(struct ibmvscsi_host_data *hostdata) struct viosrp_fast_fail *fast_fail_mad; struct srp_event_struct *evt_struct; - if (!fast_fail) - return send_srp_login(hostdata); + if (!fast_fail) { + send_mad_capabilities(hostdata); + return 0; + } evt_struct = get_event_struct(&hostdata->pool); BUG_ON(!evt_struct); @@ -929,10 +1070,6 @@ static int enable_fast_fail(struct ibmvscsi_host_data *hostdata) static void adapter_info_rsp(struct srp_event_struct *evt_struct) { struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; - dma_unmap_single(hostdata->dev, - evt_struct->iu.mad.adapter_info.buffer, - evt_struct->iu.mad.adapter_info.common.length, - DMA_BIDIRECTIONAL); if (evt_struct->xfer_iu->mad.adapter_info.common.status) { dev_err(hostdata->dev, "error %d getting adapter info\n", @@ -977,7 +1114,6 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) struct viosrp_adapter_info *req; struct srp_event_struct *evt_struct; unsigned long flags; - dma_addr_t addr; evt_struct = get_event_struct(&hostdata->pool); BUG_ON(!evt_struct); @@ -992,28 +1128,11 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) req->common.type = VIOSRP_ADAPTER_INFO_TYPE; req->common.length = sizeof(hostdata->madapter_info); - req->buffer = addr = dma_map_single(hostdata->dev, - &hostdata->madapter_info, - sizeof(hostdata->madapter_info), - DMA_BIDIRECTIONAL); + req->buffer = hostdata->adapter_info_addr; - if (dma_mapping_error(hostdata->dev, req->buffer)) { - if (!firmware_has_feature(FW_FEATURE_CMO)) - dev_err(hostdata->dev, - "Unable to map request_buffer for " - "adapter_info!\n"); - free_event_struct(&hostdata->pool, evt_struct); - return; - } - spin_lock_irqsave(hostdata->host->host_lock, flags); - if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) { + if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n"); - dma_unmap_single(hostdata->dev, - addr, - sizeof(hostdata->madapter_info), - DMA_BIDIRECTIONAL); - } spin_unlock_irqrestore(hostdata->host->host_lock, flags); }; @@ -1361,6 +1480,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, if (crq->format == 0x06) { /* We need to re-setup the interpartition connection */ dev_info(hostdata->dev, "Re-enabling adapter!\n"); + hostdata->client_migrated = 1; purge_requests(hostdata, DID_REQUEUE); if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, hostdata)) || @@ -1529,6 +1649,46 @@ static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) /* ------------------------------------------------------------ * sysfs attributes */ +static ssize_t show_host_vhost_loc(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvscsi_host_data *hostdata = shost_priv(shost); + int len; + + len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n", + hostdata->caps.loc); + return len; +} + +static struct device_attribute ibmvscsi_host_vhost_loc = { + .attr = { + .name = "vhost_loc", + .mode = S_IRUGO, + }, + .show = show_host_vhost_loc, +}; + +static ssize_t show_host_vhost_name(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvscsi_host_data *hostdata = shost_priv(shost); + int len; + + len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n", + hostdata->caps.name); + return len; +} + +static struct device_attribute ibmvscsi_host_vhost_name = { + .attr = { + .name = "vhost_name", + .mode = S_IRUGO, + }, + .show = show_host_vhost_name, +}; + static ssize_t show_host_srp_version(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1652,6 +1812,8 @@ static struct device_attribute ibmvscsi_host_config = { }; static struct device_attribute *ibmvscsi_attrs[] = { + &ibmvscsi_host_vhost_loc, + &ibmvscsi_host_vhost_name, &ibmvscsi_host_srp_version, &ibmvscsi_host_partition_name, &ibmvscsi_host_partition_number, @@ -1732,6 +1894,11 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) atomic_set(&hostdata->request_limit, -1); hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT; + if (map_persist_bufs(hostdata)) { + dev_err(&vdev->dev, "couldn't map persistent buffers\n"); + goto persist_bufs_failed; + } + rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events); if (rc != 0 && rc != H_RESOURCE) { dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc); @@ -1792,6 +1959,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) init_pool_failed: ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events); init_crq_failed: + unmap_persist_bufs(hostdata); + persist_bufs_failed: scsi_host_put(host); scsi_host_alloc_failed: return -1; @@ -1800,6 +1969,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) static int ibmvscsi_remove(struct vio_dev *vdev) { struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data; + unmap_persist_bufs(hostdata); release_event_pool(&hostdata->pool, hostdata); ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events); diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h index 2d4339d5e16e..76425303def0 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.h +++ b/drivers/scsi/ibmvscsi/ibmvscsi.h @@ -90,6 +90,7 @@ struct event_pool { /* all driver data associated with a host adapter */ struct ibmvscsi_host_data { atomic_t request_limit; + int client_migrated; struct device *dev; struct event_pool pool; struct crq_queue queue; @@ -97,6 +98,9 @@ struct ibmvscsi_host_data { struct list_head sent; struct Scsi_Host *host; struct mad_adapter_info_data madapter_info; + struct capabilities caps; + dma_addr_t caps_addr; + dma_addr_t adapter_info_addr; }; /* routines for managing a command/response queue */ diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h index f5a9c26d1da8..2cd735d1d196 100644 --- a/drivers/scsi/ibmvscsi/viosrp.h +++ b/drivers/scsi/ibmvscsi/viosrp.h @@ -37,6 +37,7 @@ #define SRP_VERSION "16.a" #define SRP_MAX_IU_LEN 256 +#define SRP_MAX_LOC_LEN 32 union srp_iu { struct srp_login_req login_req; @@ -87,6 +88,7 @@ enum viosrp_mad_types { VIOSRP_ERROR_LOG_TYPE = 0x02, VIOSRP_ADAPTER_INFO_TYPE = 0x03, VIOSRP_HOST_CONFIG_TYPE = 0x04, + VIOSRP_CAPABILITIES_TYPE = 0x05, VIOSRP_ENABLE_FAST_FAIL = 0x08, }; @@ -96,6 +98,28 @@ enum viosrp_mad_status { VIOSRP_MAD_FAILED = 0xF7, }; +enum viosrp_capability_type { + MIGRATION_CAPABILITIES = 0x01, + RESERVATION_CAPABILITIES = 0x02, +}; + +enum viosrp_capability_support { + SERVER_DOES_NOT_SUPPORTS_CAP = 0x0, + SERVER_SUPPORTS_CAP = 0x01, + SERVER_CAP_DATA = 0x02, +}; + +enum viosrp_reserve_type { + CLIENT_RESERVE_SCSI_2 = 0x01, +}; + +enum viosrp_capability_flag { + CLIENT_MIGRATED = 0x01, + CLIENT_RECONNECT = 0x02, + CAP_LIST_SUPPORTED = 0x04, + CAP_LIST_DATA = 0x08, +}; + /* * Common MAD header */ @@ -138,12 +162,42 @@ struct viosrp_fast_fail { struct mad_common common; }; +struct viosrp_capabilities { + struct mad_common common; + u64 buffer; +}; + +struct mad_capability_common { + u32 cap_type; + u16 length; + u16 server_support; +}; + +struct mad_reserve_cap { + struct mad_capability_common common; + u32 type; +}; + +struct mad_migration_cap { + struct mad_capability_common common; + u32 ecl; +}; + +struct capabilities{ + u32 flags; + char name[SRP_MAX_LOC_LEN]; + char loc[SRP_MAX_LOC_LEN]; + struct mad_migration_cap migration; + struct mad_reserve_cap reserve; +}; + union mad_iu { struct viosrp_empty_iu empty_iu; struct viosrp_error_log error_log; struct viosrp_adapter_info adapter_info; struct viosrp_host_config host_config; struct viosrp_fast_fail fast_fail; + struct viosrp_capabilities capabilities; }; union viosrp_iu { -- cgit v1.2.3 From 43514774ff40c4fbe0cbbd3d8293a359f1a9fe71 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 8 Jun 2009 18:14:41 -0700 Subject: [SCSI] iscsi class: Add new NETLINK_ISCSI messages for cnic/bnx2i driver. Add ISCSI_NETLINK messages for iSCSI NICs to get information such as path from userspace. Original iscsid messages are now always sent as multicast to group 1. The new messages are sent to group 2. The multicast changes were made by Mike Christie. Signed-off-by: Michael Chan Signed-off-by: Benjamin Li Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/scsi_transport_iscsi.c | 122 +++++++++++++++++++++++++----------- 1 file changed, 86 insertions(+), 36 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index d69a53aa406f..f3e664628d7a 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -37,7 +37,6 @@ #define ISCSI_TRANSPORT_VERSION "2.0-870" struct iscsi_internal { - int daemon_pid; struct scsi_transport_template t; struct iscsi_transport *iscsi_transport; struct list_head list; @@ -938,23 +937,9 @@ iscsi_if_transport_lookup(struct iscsi_transport *tt) } static int -iscsi_broadcast_skb(struct sk_buff *skb, gfp_t gfp) +iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp) { - return netlink_broadcast(nls, skb, 0, 1, gfp); -} - -static int -iscsi_unicast_skb(struct sk_buff *skb, int pid) -{ - int rc; - - rc = netlink_unicast(nls, skb, pid, MSG_DONTWAIT); - if (rc < 0) { - printk(KERN_ERR "iscsi: can not unicast skb (%d)\n", rc); - return rc; - } - - return 0; + return nlmsg_multicast(nls, skb, 0, group, gfp); } int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, @@ -980,7 +965,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, return -ENOMEM; } - nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); + nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = NLMSG_DATA(nlh); memset(ev, 0, sizeof(*ev)); ev->transport_handle = iscsi_handle(conn->transport); @@ -991,10 +976,45 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, memcpy(pdu, hdr, sizeof(struct iscsi_hdr)); memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size); - return iscsi_unicast_skb(skb, priv->daemon_pid); + return iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC); } EXPORT_SYMBOL_GPL(iscsi_recv_pdu); +int iscsi_offload_mesg(struct Scsi_Host *shost, + struct iscsi_transport *transport, uint32_t type, + char *data, uint16_t data_size) +{ + struct nlmsghdr *nlh; + struct sk_buff *skb; + struct iscsi_uevent *ev; + int len = NLMSG_SPACE(sizeof(*ev) + data_size); + + skb = alloc_skb(len, GFP_NOIO); + if (!skb) { + printk(KERN_ERR "can not deliver iscsi offload message:OOM\n"); + return -ENOMEM; + } + + nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); + ev = NLMSG_DATA(nlh); + memset(ev, 0, sizeof(*ev)); + ev->type = type; + ev->transport_handle = iscsi_handle(transport); + switch (type) { + case ISCSI_KEVENT_PATH_REQ: + ev->r.req_path.host_no = shost->host_no; + break; + case ISCSI_KEVENT_IF_DOWN: + ev->r.notify_if_down.host_no = shost->host_no; + break; + } + + memcpy((char *)ev + sizeof(*ev), data, data_size); + + return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_NOIO); +} +EXPORT_SYMBOL_GPL(iscsi_offload_mesg); + void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) { struct nlmsghdr *nlh; @@ -1014,7 +1034,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) return; } - nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); + nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = NLMSG_DATA(nlh); ev->transport_handle = iscsi_handle(conn->transport); ev->type = ISCSI_KEVENT_CONN_ERROR; @@ -1022,7 +1042,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) ev->r.connerror.cid = conn->cid; ev->r.connerror.sid = iscsi_conn_get_sid(conn); - iscsi_broadcast_skb(skb, GFP_ATOMIC); + iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC); iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n", error); @@ -1030,8 +1050,8 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) EXPORT_SYMBOL_GPL(iscsi_conn_error_event); static int -iscsi_if_send_reply(int pid, int seq, int type, int done, int multi, - void *payload, int size) +iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi, + void *payload, int size) { struct sk_buff *skb; struct nlmsghdr *nlh; @@ -1045,10 +1065,10 @@ iscsi_if_send_reply(int pid, int seq, int type, int done, int multi, return -ENOMEM; } - nlh = __nlmsg_put(skb, pid, seq, t, (len - sizeof(*nlh)), 0); + nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0); nlh->nlmsg_flags = flags; memcpy(NLMSG_DATA(nlh), payload, size); - return iscsi_unicast_skb(skb, pid); + return iscsi_multicast_skb(skb, group, GFP_ATOMIC); } static int @@ -1085,7 +1105,7 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) return -ENOMEM; } - nlhstat = __nlmsg_put(skbstat, priv->daemon_pid, 0, 0, + nlhstat = __nlmsg_put(skbstat, 0, 0, 0, (len - sizeof(*nlhstat)), 0); evstat = NLMSG_DATA(nlhstat); memset(evstat, 0, sizeof(*evstat)); @@ -1109,7 +1129,8 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) skb_trim(skbstat, NLMSG_ALIGN(actual_size)); nlhstat->nlmsg_len = actual_size; - err = iscsi_unicast_skb(skbstat, priv->daemon_pid); + err = iscsi_multicast_skb(skbstat, ISCSI_NL_GRP_ISCSID, + GFP_ATOMIC); } while (err < 0 && err != -ECONNREFUSED); return err; @@ -1143,7 +1164,7 @@ int iscsi_session_event(struct iscsi_cls_session *session, return -ENOMEM; } - nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); + nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = NLMSG_DATA(nlh); ev->transport_handle = iscsi_handle(session->transport); @@ -1172,7 +1193,7 @@ int iscsi_session_event(struct iscsi_cls_session *session, * this will occur if the daemon is not up, so we just warn * the user and when the daemon is restarted it will handle it */ - rc = iscsi_broadcast_skb(skb, GFP_KERNEL); + rc = iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL); if (rc == -ESRCH) iscsi_cls_session_printk(KERN_ERR, session, "Cannot notify userspace of session " @@ -1393,7 +1414,31 @@ iscsi_set_host_param(struct iscsi_transport *transport, } static int -iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) +iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev) +{ + struct Scsi_Host *shost; + struct iscsi_path *params; + int err; + + if (!transport->set_path) + return -ENOSYS; + + shost = scsi_host_lookup(ev->u.set_path.host_no); + if (!shost) { + printk(KERN_ERR "set path could not find host no %u\n", + ev->u.set_path.host_no); + return -ENODEV; + } + + params = (struct iscsi_path *)((char *)ev + sizeof(*ev)); + err = transport->set_path(shost, params); + + scsi_host_put(shost); + return err; +} + +static int +iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) { int err = 0; struct iscsi_uevent *ev = NLMSG_DATA(nlh); @@ -1403,6 +1448,11 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) struct iscsi_cls_conn *conn; struct iscsi_endpoint *ep = NULL; + if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE) + *group = ISCSI_NL_GRP_UIP; + else + *group = ISCSI_NL_GRP_ISCSID; + priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); if (!priv) return -EINVAL; @@ -1411,8 +1461,6 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if (!try_module_get(transport->owner)) return -EINVAL; - priv->daemon_pid = NETLINK_CREDS(skb)->pid; - switch (nlh->nlmsg_type) { case ISCSI_UEVENT_CREATE_SESSION: err = iscsi_if_create_session(priv, ep, ev, @@ -1506,6 +1554,9 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) case ISCSI_UEVENT_SET_HOST_PARAM: err = iscsi_set_host_param(transport, ev); break; + case ISCSI_UEVENT_PATH_UPDATE: + err = iscsi_set_path(transport, ev); + break; default: err = -ENOSYS; break; @@ -1528,6 +1579,7 @@ iscsi_if_rx(struct sk_buff *skb) uint32_t rlen; struct nlmsghdr *nlh; struct iscsi_uevent *ev; + uint32_t group; nlh = nlmsg_hdr(skb); if (nlh->nlmsg_len < sizeof(*nlh) || @@ -1540,7 +1592,7 @@ iscsi_if_rx(struct sk_buff *skb) if (rlen > skb->len) rlen = skb->len; - err = iscsi_if_recv_msg(skb, nlh); + err = iscsi_if_recv_msg(skb, nlh, &group); if (err) { ev->type = ISCSI_KEVENT_IF_ERROR; ev->iferror = err; @@ -1554,8 +1606,7 @@ iscsi_if_rx(struct sk_buff *skb) */ if (ev->type == ISCSI_UEVENT_GET_STATS && !err) break; - err = iscsi_if_send_reply( - NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq, + err = iscsi_if_send_reply(group, nlh->nlmsg_seq, nlh->nlmsg_type, 0, 0, ev, sizeof(*ev)); } while (err < 0 && err != -ECONNREFUSED); skb_pull(skb, rlen); @@ -1803,7 +1854,6 @@ iscsi_register_transport(struct iscsi_transport *tt) if (!priv) return NULL; INIT_LIST_HEAD(&priv->list); - priv->daemon_pid = -1; priv->iscsi_transport = tt; priv->t.user_scan = iscsi_user_scan; priv->t.create_work_queue = 1; -- cgit v1.2.3 From 4edd473f208cff77ce1f7ef26d5a41f31fa198e0 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 8 Jun 2009 18:14:42 -0700 Subject: [SCSI] bnx2: Add support for CNIC driver. Add interface and functions to support a new CNIC driver to drive the Broadcom bnx2 hardware for iSCSI offload. Signed-off-by: Michael Chan Acked-by: David S. Miller Signed-off-by: James Bottomley --- drivers/net/bnx2.c | 193 ++++++++++++++++++++++++++++++++++++++++++++++++++++- drivers/net/bnx2.h | 18 +++++ 2 files changed, 208 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index b0cb29d4cc01..3f5fcb0156a1 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -49,6 +49,10 @@ #include #include +#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) +#define BCM_CNIC 1 +#include "cnic_if.h" +#endif #include "bnx2.h" #include "bnx2_fw.h" @@ -315,6 +319,158 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val) spin_unlock_bh(&bp->indirect_lock); } +#ifdef BCM_CNIC +static int +bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info) +{ + struct bnx2 *bp = netdev_priv(dev); + struct drv_ctl_io *io = &info->data.io; + + switch (info->cmd) { + case DRV_CTL_IO_WR_CMD: + bnx2_reg_wr_ind(bp, io->offset, io->data); + break; + case DRV_CTL_IO_RD_CMD: + io->data = bnx2_reg_rd_ind(bp, io->offset); + break; + case DRV_CTL_CTX_WR_CMD: + bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data); + break; + default: + return -EINVAL; + } + return 0; +} + +static void bnx2_setup_cnic_irq_info(struct bnx2 *bp) +{ + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; + int sb_id; + + if (bp->flags & BNX2_FLAG_USING_MSIX) { + cp->drv_state |= CNIC_DRV_STATE_USING_MSIX; + bnapi->cnic_present = 0; + sb_id = bp->irq_nvecs; + cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX; + } else { + cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; + bnapi->cnic_tag = bnapi->last_status_idx; + bnapi->cnic_present = 1; + sb_id = 0; + cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; + } + + cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector; + cp->irq_arr[0].status_blk = (void *) + ((unsigned long) bnapi->status_blk.msi + + (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id)); + cp->irq_arr[0].status_blk_num = sb_id; + cp->num_irq = 1; +} + +static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops, + void *data) +{ + struct bnx2 *bp = netdev_priv(dev); + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + if (ops == NULL) + return -EINVAL; + + if (cp->drv_state & CNIC_DRV_STATE_REGD) + return -EBUSY; + + bp->cnic_data = data; + rcu_assign_pointer(bp->cnic_ops, ops); + + cp->num_irq = 0; + cp->drv_state = CNIC_DRV_STATE_REGD; + + bnx2_setup_cnic_irq_info(bp); + + return 0; +} + +static int bnx2_unregister_cnic(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + cp->drv_state = 0; + bnapi->cnic_present = 0; + rcu_assign_pointer(bp->cnic_ops, NULL); + synchronize_rcu(); + return 0; +} + +struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + cp->drv_owner = THIS_MODULE; + cp->chip_id = bp->chip_id; + cp->pdev = bp->pdev; + cp->io_base = bp->regview; + cp->drv_ctl = bnx2_drv_ctl; + cp->drv_register_cnic = bnx2_register_cnic; + cp->drv_unregister_cnic = bnx2_unregister_cnic; + + return cp; +} +EXPORT_SYMBOL(bnx2_cnic_probe); + +static void +bnx2_cnic_stop(struct bnx2 *bp) +{ + struct cnic_ops *c_ops; + struct cnic_ctl_info info; + + rcu_read_lock(); + c_ops = rcu_dereference(bp->cnic_ops); + if (c_ops) { + info.cmd = CNIC_CTL_STOP_CMD; + c_ops->cnic_ctl(bp->cnic_data, &info); + } + rcu_read_unlock(); +} + +static void +bnx2_cnic_start(struct bnx2 *bp) +{ + struct cnic_ops *c_ops; + struct cnic_ctl_info info; + + rcu_read_lock(); + c_ops = rcu_dereference(bp->cnic_ops); + if (c_ops) { + if (!(bp->flags & BNX2_FLAG_USING_MSIX)) { + struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; + + bnapi->cnic_tag = bnapi->last_status_idx; + } + info.cmd = CNIC_CTL_START_CMD; + c_ops->cnic_ctl(bp->cnic_data, &info); + } + rcu_read_unlock(); +} + +#else + +static void +bnx2_cnic_stop(struct bnx2 *bp) +{ +} + +static void +bnx2_cnic_start(struct bnx2 *bp) +{ +} + +#endif + static int bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val) { @@ -488,6 +644,7 @@ bnx2_napi_enable(struct bnx2 *bp) static void bnx2_netif_stop(struct bnx2 *bp) { + bnx2_cnic_stop(bp); bnx2_disable_int_sync(bp); if (netif_running(bp->dev)) { bnx2_napi_disable(bp); @@ -504,6 +661,7 @@ bnx2_netif_start(struct bnx2 *bp) netif_tx_wake_all_queues(bp->dev); bnx2_napi_enable(bp); bnx2_enable_int(bp); + bnx2_cnic_start(bp); } } } @@ -3164,6 +3322,11 @@ bnx2_has_work(struct bnx2_napi *bnapi) if (bnx2_has_fast_work(bnapi)) return 1; +#ifdef BCM_CNIC + if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx)) + return 1; +#endif + if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) != (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS)) return 1; @@ -3193,6 +3356,23 @@ bnx2_chk_missed_msi(struct bnx2 *bp) bp->idle_chk_status_idx = bnapi->last_status_idx; } +#ifdef BCM_CNIC +static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi) +{ + struct cnic_ops *c_ops; + + if (!bnapi->cnic_present) + return; + + rcu_read_lock(); + c_ops = rcu_dereference(bp->cnic_ops); + if (c_ops) + bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data, + bnapi->status_blk.msi); + rcu_read_unlock(); +} +#endif + static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi) { struct status_block *sblk = bnapi->status_blk.msi; @@ -3267,6 +3447,10 @@ static int bnx2_poll(struct napi_struct *napi, int budget) work_done = bnx2_poll_work(bp, bnapi, work_done, budget); +#ifdef BCM_CNIC + bnx2_poll_cnic(bp, bnapi); +#endif + /* bnapi->last_status_idx is used below to tell the hw how * much work has been processed, so we must read it before * checking for more work. @@ -4632,8 +4816,11 @@ bnx2_init_chip(struct bnx2 *bp) val = REG_RD(bp, BNX2_MQ_CONFIG); val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; - if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1) - val |= BNX2_MQ_CONFIG_HALT_DIS; + if (CHIP_NUM(bp) == CHIP_NUM_5709) { + val |= BNX2_MQ_CONFIG_BIN_MQ_MODE; + if (CHIP_REV(bp) == CHIP_REV_Ax) + val |= BNX2_MQ_CONFIG_HALT_DIS; + } REG_WR(bp, BNX2_MQ_CONFIG, val); @@ -7471,7 +7658,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) INIT_WORK(&bp->reset_task, bnx2_reset_task); dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); - mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS); + mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1); dev->mem_end = dev->mem_start + mem_len; dev->irq = pdev->irq; diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index 5b570e17c839..a1ff739bc9b5 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h @@ -361,6 +361,9 @@ struct l2_fhdr { #define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28) #define BNX2_L2CTX_HOST_BDIDX 0x00000004 +#define BNX2_L2CTX_STATUSB_NUM_SHIFT 16 +#define BNX2_L2CTX_STATUSB_NUM(sb_id) \ + (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_STATUSB_NUM_SHIFT) : 0) #define BNX2_L2CTX_HOST_BSEQ 0x00000008 #define BNX2_L2CTX_NX_BSEQ 0x0000000c #define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010 @@ -5900,6 +5903,7 @@ struct l2_fhdr { #define BNX2_RXP_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) #define BNX2_RXP_SCRATCH 0x000e0000 +#define BNX2_RXP_SCRATCH_RXP_FLOOD 0x000e0024 #define BNX2_RXP_SCRATCH_RSS_TBL_SZ 0x000e0038 #define BNX2_RXP_SCRATCH_RSS_TBL 0x000e003c #define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES 128 @@ -6678,6 +6682,11 @@ struct bnx2_napi { u32 last_status_idx; u32 int_num; +#ifdef BCM_CNIC + u32 cnic_tag; + int cnic_present; +#endif + struct bnx2_rx_ring_info rx_ring; struct bnx2_tx_ring_info tx_ring; }; @@ -6727,6 +6736,11 @@ struct bnx2 { int tx_ring_size; u32 tx_wake_thresh; +#ifdef BCM_CNIC + struct cnic_ops *cnic_ops; + void *cnic_data; +#endif + /* End of fields used in the performance code paths. */ unsigned int current_interval; @@ -6885,6 +6899,10 @@ struct bnx2 { u32 idle_chk_status_idx; +#ifdef BCM_CNIC + struct cnic_eth_dev cnic_eth_dev; +#endif + const struct firmware *mips_firmware; const struct firmware *rv2p_firmware; }; -- cgit v1.2.3 From a463696039f7097ce87c21db3cf5c16cdcb3850d Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 8 Jun 2009 18:14:43 -0700 Subject: [SCSI] cnic: Add new Broadcom CNIC driver. The CNIC driver controls BNX2 hardware rings and resources used by iSCSI. Most hardware resources for iSCSI are separate from those used for ethernet networking. iSCSI uses a separate MAC address and IP address. The CNIC driver creates a UIO interface to handle the non-offloaded packets such as ARP, etc in userspace. Signed-off-by: Michael Chan Acked-by: David S. Miller Signed-off-by: James Bottomley --- drivers/net/Kconfig | 11 + drivers/net/Makefile | 1 + drivers/net/cnic.c | 2711 +++++++++++++++++++++++++++++++++++++++++++++++ drivers/net/cnic.h | 299 ++++++ drivers/net/cnic_defs.h | 580 ++++++++++ drivers/net/cnic_if.h | 299 ++++++ 6 files changed, 3901 insertions(+) create mode 100644 drivers/net/cnic.c create mode 100644 drivers/net/cnic.h create mode 100644 drivers/net/cnic_defs.h create mode 100644 drivers/net/cnic_if.h (limited to 'drivers') diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 214a92d1ef75..f3c4a3b910bb 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2264,6 +2264,17 @@ config BNX2 To compile this driver as a module, choose M here: the module will be called bnx2. This is recommended. +config CNIC + tristate "Broadcom CNIC support" + depends on BNX2 + depends on UIO + help + This driver supports offload features of Broadcom NetXtremeII + gigabit Ethernet cards. + + To compile this driver as a module, choose M here: the module + will be called cnic. This is recommended. + config SPIDER_NET tristate "Spider Gigabit Ethernet driver" depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB) diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 1fc4602a6ff2..e6f1f8c3f8d4 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -73,6 +73,7 @@ obj-$(CONFIG_STNIC) += stnic.o 8390.o obj-$(CONFIG_FEALNX) += fealnx.o obj-$(CONFIG_TIGON3) += tg3.o obj-$(CONFIG_BNX2) += bnx2.o +obj-$(CONFIG_CNIC) += cnic.o obj-$(CONFIG_BNX2X) += bnx2x.o bnx2x-objs := bnx2x_main.o bnx2x_link.o spidernet-y += spider_net.o spider_net_ethtool.o diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c new file mode 100644 index 000000000000..8d740376bbd2 --- /dev/null +++ b/drivers/net/cnic.c @@ -0,0 +1,2711 @@ +/* cnic.c: Broadcom CNIC core network driver. + * + * Copyright (c) 2006-2009 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com) + * Modified and maintained by: Michael Chan + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#define BCM_VLAN 1 +#endif +#include +#include +#include +#include +#include +#include + +#include "cnic_if.h" +#include "bnx2.h" +#include "cnic.h" +#include "cnic_defs.h" + +#define DRV_MODULE_NAME "cnic" +#define PFX DRV_MODULE_NAME ": " + +static char version[] __devinitdata = + "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; + +MODULE_AUTHOR("Michael Chan and John(Zongxi) " + "Chen (zongxi@broadcom.com"); +MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(CNIC_MODULE_VERSION); + +static LIST_HEAD(cnic_dev_list); +static DEFINE_RWLOCK(cnic_dev_lock); +static DEFINE_MUTEX(cnic_lock); + +static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; + +static int cnic_service_bnx2(void *, void *); +static int cnic_ctl(void *, struct cnic_ctl_info *); + +static struct cnic_ops cnic_bnx2_ops = { + .cnic_owner = THIS_MODULE, + .cnic_handler = cnic_service_bnx2, + .cnic_ctl = cnic_ctl, +}; + +static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *); +static void cnic_init_bnx2_tx_ring(struct cnic_dev *); +static void cnic_init_bnx2_rx_ring(struct cnic_dev *); +static int cnic_cm_set_pg(struct cnic_sock *); + +static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) +{ + struct cnic_dev *dev = uinfo->priv; + struct cnic_local *cp = dev->cnic_priv; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (cp->uio_dev != -1) + return -EBUSY; + + cp->uio_dev = iminor(inode); + + cnic_shutdown_bnx2_rx_ring(dev); + + cnic_init_bnx2_tx_ring(dev); + cnic_init_bnx2_rx_ring(dev); + + return 0; +} + +static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode) +{ + struct cnic_dev *dev = uinfo->priv; + struct cnic_local *cp = dev->cnic_priv; + + cp->uio_dev = -1; + return 0; +} + +static inline void cnic_hold(struct cnic_dev *dev) +{ + atomic_inc(&dev->ref_count); +} + +static inline void cnic_put(struct cnic_dev *dev) +{ + atomic_dec(&dev->ref_count); +} + +static inline void csk_hold(struct cnic_sock *csk) +{ + atomic_inc(&csk->ref_count); +} + +static inline void csk_put(struct cnic_sock *csk) +{ + atomic_dec(&csk->ref_count); +} + +static struct cnic_dev *cnic_from_netdev(struct net_device *netdev) +{ + struct cnic_dev *cdev; + + read_lock(&cnic_dev_lock); + list_for_each_entry(cdev, &cnic_dev_list, list) { + if (netdev == cdev->netdev) { + cnic_hold(cdev); + read_unlock(&cnic_dev_lock); + return cdev; + } + } + read_unlock(&cnic_dev_lock); + return NULL; +} + +static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct drv_ctl_info info; + struct drv_ctl_io *io = &info.data.io; + + info.cmd = DRV_CTL_CTX_WR_CMD; + io->cid_addr = cid_addr; + io->offset = off; + io->data = val; + ethdev->drv_ctl(dev->netdev, &info); +} + +static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct drv_ctl_info info; + struct drv_ctl_io *io = &info.data.io; + + info.cmd = DRV_CTL_IO_WR_CMD; + io->offset = off; + io->data = val; + ethdev->drv_ctl(dev->netdev, &info); +} + +static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct drv_ctl_info info; + struct drv_ctl_io *io = &info.data.io; + + info.cmd = DRV_CTL_IO_RD_CMD; + io->offset = off; + ethdev->drv_ctl(dev->netdev, &info); + return io->data; +} + +static int cnic_in_use(struct cnic_sock *csk) +{ + return test_bit(SK_F_INUSE, &csk->flags); +} + +static void cnic_kwq_completion(struct cnic_dev *dev, u32 count) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct drv_ctl_info info; + + info.cmd = DRV_CTL_COMPLETION_CMD; + info.data.comp.comp_count = count; + ethdev->drv_ctl(dev->netdev, &info); +} + +static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, + struct cnic_sock *csk) +{ + struct iscsi_path path_req; + char *buf = NULL; + u16 len = 0; + u32 msg_type = ISCSI_KEVENT_IF_DOWN; + struct cnic_ulp_ops *ulp_ops; + + if (cp->uio_dev == -1) + return -ENODEV; + + if (csk) { + len = sizeof(path_req); + buf = (char *) &path_req; + memset(&path_req, 0, len); + + msg_type = ISCSI_KEVENT_PATH_REQ; + path_req.handle = (u64) csk->l5_cid; + if (test_bit(SK_F_IPV6, &csk->flags)) { + memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0], + sizeof(struct in6_addr)); + path_req.ip_addr_len = 16; + } else { + memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0], + sizeof(struct in_addr)); + path_req.ip_addr_len = 4; + } + path_req.vlan_id = csk->vlan_id; + path_req.pmtu = csk->mtu; + } + + rcu_read_lock(); + ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]); + if (ulp_ops) + ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len); + rcu_read_unlock(); + return 0; +} + +static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, + char *buf, u16 len) +{ + int rc = -EINVAL; + + switch (msg_type) { + case ISCSI_UEVENT_PATH_UPDATE: { + struct cnic_local *cp; + u32 l5_cid; + struct cnic_sock *csk; + struct iscsi_path *path_resp; + + if (len < sizeof(*path_resp)) + break; + + path_resp = (struct iscsi_path *) buf; + cp = dev->cnic_priv; + l5_cid = (u32) path_resp->handle; + if (l5_cid >= MAX_CM_SK_TBL_SZ) + break; + + csk = &cp->csk_tbl[l5_cid]; + csk_hold(csk); + if (cnic_in_use(csk)) { + memcpy(csk->ha, path_resp->mac_addr, 6); + if (test_bit(SK_F_IPV6, &csk->flags)) + memcpy(&csk->src_ip[0], &path_resp->src.v6_addr, + sizeof(struct in6_addr)); + else + memcpy(&csk->src_ip[0], &path_resp->src.v4_addr, + sizeof(struct in_addr)); + if (is_valid_ether_addr(csk->ha)) + cnic_cm_set_pg(csk); + } + csk_put(csk); + rc = 0; + } + } + + return rc; +} + +static int cnic_offld_prep(struct cnic_sock *csk) +{ + if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) + return 0; + + if (!test_bit(SK_F_CONNECT_START, &csk->flags)) { + clear_bit(SK_F_OFFLD_SCHED, &csk->flags); + return 0; + } + + return 1; +} + +static int cnic_close_prep(struct cnic_sock *csk) +{ + clear_bit(SK_F_CONNECT_START, &csk->flags); + smp_mb__after_clear_bit(); + + if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { + while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) + msleep(1); + + return 1; + } + return 0; +} + +static int cnic_abort_prep(struct cnic_sock *csk) +{ + clear_bit(SK_F_CONNECT_START, &csk->flags); + smp_mb__after_clear_bit(); + + while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) + msleep(1); + + if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { + csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP; + return 1; + } + + return 0; +} + +int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) +{ + struct cnic_dev *dev; + + if (ulp_type >= MAX_CNIC_ULP_TYPE) { + printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n", + ulp_type); + return -EINVAL; + } + mutex_lock(&cnic_lock); + if (cnic_ulp_tbl[ulp_type]) { + printk(KERN_ERR PFX "cnic_register_driver: Type %d has already " + "been registered\n", ulp_type); + mutex_unlock(&cnic_lock); + return -EBUSY; + } + + read_lock(&cnic_dev_lock); + list_for_each_entry(dev, &cnic_dev_list, list) { + struct cnic_local *cp = dev->cnic_priv; + + clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]); + } + read_unlock(&cnic_dev_lock); + + rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops); + mutex_unlock(&cnic_lock); + + /* Prevent race conditions with netdev_event */ + rtnl_lock(); + read_lock(&cnic_dev_lock); + list_for_each_entry(dev, &cnic_dev_list, list) { + struct cnic_local *cp = dev->cnic_priv; + + if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type])) + ulp_ops->cnic_init(dev); + } + read_unlock(&cnic_dev_lock); + rtnl_unlock(); + + return 0; +} + +int cnic_unregister_driver(int ulp_type) +{ + struct cnic_dev *dev; + + if (ulp_type >= MAX_CNIC_ULP_TYPE) { + printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n", + ulp_type); + return -EINVAL; + } + mutex_lock(&cnic_lock); + if (!cnic_ulp_tbl[ulp_type]) { + printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not " + "been registered\n", ulp_type); + goto out_unlock; + } + read_lock(&cnic_dev_lock); + list_for_each_entry(dev, &cnic_dev_list, list) { + struct cnic_local *cp = dev->cnic_priv; + + if (rcu_dereference(cp->ulp_ops[ulp_type])) { + printk(KERN_ERR PFX "cnic_unregister_driver: Type %d " + "still has devices registered\n", ulp_type); + read_unlock(&cnic_dev_lock); + goto out_unlock; + } + } + read_unlock(&cnic_dev_lock); + + rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL); + + mutex_unlock(&cnic_lock); + synchronize_rcu(); + return 0; + +out_unlock: + mutex_unlock(&cnic_lock); + return -EINVAL; +} + +static int cnic_start_hw(struct cnic_dev *); +static void cnic_stop_hw(struct cnic_dev *); + +static int cnic_register_device(struct cnic_dev *dev, int ulp_type, + void *ulp_ctx) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_ulp_ops *ulp_ops; + + if (ulp_type >= MAX_CNIC_ULP_TYPE) { + printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n", + ulp_type); + return -EINVAL; + } + mutex_lock(&cnic_lock); + if (cnic_ulp_tbl[ulp_type] == NULL) { + printk(KERN_ERR PFX "cnic_register_device: Driver with type %d " + "has not been registered\n", ulp_type); + mutex_unlock(&cnic_lock); + return -EAGAIN; + } + if (rcu_dereference(cp->ulp_ops[ulp_type])) { + printk(KERN_ERR PFX "cnic_register_device: Type %d has already " + "been registered to this device\n", ulp_type); + mutex_unlock(&cnic_lock); + return -EBUSY; + } + + clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]); + cp->ulp_handle[ulp_type] = ulp_ctx; + ulp_ops = cnic_ulp_tbl[ulp_type]; + rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops); + cnic_hold(dev); + + if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) + if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type])) + ulp_ops->cnic_start(cp->ulp_handle[ulp_type]); + + mutex_unlock(&cnic_lock); + + return 0; + +} +EXPORT_SYMBOL(cnic_register_driver); + +static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) +{ + struct cnic_local *cp = dev->cnic_priv; + + if (ulp_type >= MAX_CNIC_ULP_TYPE) { + printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n", + ulp_type); + return -EINVAL; + } + mutex_lock(&cnic_lock); + if (rcu_dereference(cp->ulp_ops[ulp_type])) { + rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL); + cnic_put(dev); + } else { + printk(KERN_ERR PFX "cnic_unregister_device: device not " + "registered to this ulp type %d\n", ulp_type); + mutex_unlock(&cnic_lock); + return -EINVAL; + } + mutex_unlock(&cnic_lock); + + synchronize_rcu(); + + return 0; +} +EXPORT_SYMBOL(cnic_unregister_driver); + +static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id) +{ + id_tbl->start = start_id; + id_tbl->max = size; + id_tbl->next = 0; + spin_lock_init(&id_tbl->lock); + id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL); + if (!id_tbl->table) + return -ENOMEM; + + return 0; +} + +static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl) +{ + kfree(id_tbl->table); + id_tbl->table = NULL; +} + +static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id) +{ + int ret = -1; + + id -= id_tbl->start; + if (id >= id_tbl->max) + return ret; + + spin_lock(&id_tbl->lock); + if (!test_bit(id, id_tbl->table)) { + set_bit(id, id_tbl->table); + ret = 0; + } + spin_unlock(&id_tbl->lock); + return ret; +} + +/* Returns -1 if not successful */ +static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl) +{ + u32 id; + + spin_lock(&id_tbl->lock); + id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next); + if (id >= id_tbl->max) { + id = -1; + if (id_tbl->next != 0) { + id = find_first_zero_bit(id_tbl->table, id_tbl->next); + if (id >= id_tbl->next) + id = -1; + } + } + + if (id < id_tbl->max) { + set_bit(id, id_tbl->table); + id_tbl->next = (id + 1) & (id_tbl->max - 1); + id += id_tbl->start; + } + + spin_unlock(&id_tbl->lock); + + return id; +} + +static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id) +{ + if (id == -1) + return; + + id -= id_tbl->start; + if (id >= id_tbl->max) + return; + + clear_bit(id, id_tbl->table); +} + +static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) +{ + int i; + + if (!dma->pg_arr) + return; + + for (i = 0; i < dma->num_pages; i++) { + if (dma->pg_arr[i]) { + pci_free_consistent(dev->pcidev, BCM_PAGE_SIZE, + dma->pg_arr[i], dma->pg_map_arr[i]); + dma->pg_arr[i] = NULL; + } + } + if (dma->pgtbl) { + pci_free_consistent(dev->pcidev, dma->pgtbl_size, + dma->pgtbl, dma->pgtbl_map); + dma->pgtbl = NULL; + } + kfree(dma->pg_arr); + dma->pg_arr = NULL; + dma->num_pages = 0; +} + +static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) +{ + int i; + u32 *page_table = dma->pgtbl; + + for (i = 0; i < dma->num_pages; i++) { + /* Each entry needs to be in big endian format. */ + *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); + page_table++; + *page_table = (u32) dma->pg_map_arr[i]; + page_table++; + } +} + +static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, + int pages, int use_pg_tbl) +{ + int i, size; + struct cnic_local *cp = dev->cnic_priv; + + size = pages * (sizeof(void *) + sizeof(dma_addr_t)); + dma->pg_arr = kzalloc(size, GFP_ATOMIC); + if (dma->pg_arr == NULL) + return -ENOMEM; + + dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages); + dma->num_pages = pages; + + for (i = 0; i < pages; i++) { + dma->pg_arr[i] = pci_alloc_consistent(dev->pcidev, + BCM_PAGE_SIZE, + &dma->pg_map_arr[i]); + if (dma->pg_arr[i] == NULL) + goto error; + } + if (!use_pg_tbl) + return 0; + + dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) & + ~(BCM_PAGE_SIZE - 1); + dma->pgtbl = pci_alloc_consistent(dev->pcidev, dma->pgtbl_size, + &dma->pgtbl_map); + if (dma->pgtbl == NULL) + goto error; + + cp->setup_pgtbl(dev, dma); + + return 0; + +error: + cnic_free_dma(dev, dma); + return -ENOMEM; +} + +static void cnic_free_resc(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + int i = 0; + + if (cp->cnic_uinfo) { + cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); + while (cp->uio_dev != -1 && i < 15) { + msleep(100); + i++; + } + uio_unregister_device(cp->cnic_uinfo); + kfree(cp->cnic_uinfo); + cp->cnic_uinfo = NULL; + } + + if (cp->l2_buf) { + pci_free_consistent(dev->pcidev, cp->l2_buf_size, + cp->l2_buf, cp->l2_buf_map); + cp->l2_buf = NULL; + } + + if (cp->l2_ring) { + pci_free_consistent(dev->pcidev, cp->l2_ring_size, + cp->l2_ring, cp->l2_ring_map); + cp->l2_ring = NULL; + } + + for (i = 0; i < cp->ctx_blks; i++) { + if (cp->ctx_arr[i].ctx) { + pci_free_consistent(dev->pcidev, cp->ctx_blk_size, + cp->ctx_arr[i].ctx, + cp->ctx_arr[i].mapping); + cp->ctx_arr[i].ctx = NULL; + } + } + kfree(cp->ctx_arr); + cp->ctx_arr = NULL; + cp->ctx_blks = 0; + + cnic_free_dma(dev, &cp->gbl_buf_info); + cnic_free_dma(dev, &cp->conn_buf_info); + cnic_free_dma(dev, &cp->kwq_info); + cnic_free_dma(dev, &cp->kcq_info); + kfree(cp->iscsi_tbl); + cp->iscsi_tbl = NULL; + kfree(cp->ctx_tbl); + cp->ctx_tbl = NULL; + + cnic_free_id_tbl(&cp->cid_tbl); +} + +static int cnic_alloc_context(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + + if (CHIP_NUM(cp) == CHIP_NUM_5709) { + int i, k, arr_size; + + cp->ctx_blk_size = BCM_PAGE_SIZE; + cp->cids_per_blk = BCM_PAGE_SIZE / 128; + arr_size = BNX2_MAX_CID / cp->cids_per_blk * + sizeof(struct cnic_ctx); + cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); + if (cp->ctx_arr == NULL) + return -ENOMEM; + + k = 0; + for (i = 0; i < 2; i++) { + u32 j, reg, off, lo, hi; + + if (i == 0) + off = BNX2_PG_CTX_MAP; + else + off = BNX2_ISCSI_CTX_MAP; + + reg = cnic_reg_rd_ind(dev, off); + lo = reg >> 16; + hi = reg & 0xffff; + for (j = lo; j < hi; j += cp->cids_per_blk, k++) + cp->ctx_arr[k].cid = j; + } + + cp->ctx_blks = k; + if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) { + cp->ctx_blks = 0; + return -ENOMEM; + } + + for (i = 0; i < cp->ctx_blks; i++) { + cp->ctx_arr[i].ctx = + pci_alloc_consistent(dev->pcidev, BCM_PAGE_SIZE, + &cp->ctx_arr[i].mapping); + if (cp->ctx_arr[i].ctx == NULL) + return -ENOMEM; + } + } + return 0; +} + +static int cnic_alloc_bnx2_resc(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct uio_info *uinfo; + int ret; + + ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1); + if (ret) + goto error; + cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr; + + ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 1); + if (ret) + goto error; + cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr; + + ret = cnic_alloc_context(dev); + if (ret) + goto error; + + cp->l2_ring_size = 2 * BCM_PAGE_SIZE; + cp->l2_ring = pci_alloc_consistent(dev->pcidev, cp->l2_ring_size, + &cp->l2_ring_map); + if (!cp->l2_ring) + goto error; + + cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; + cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size); + cp->l2_buf = pci_alloc_consistent(dev->pcidev, cp->l2_buf_size, + &cp->l2_buf_map); + if (!cp->l2_buf) + goto error; + + uinfo = kzalloc(sizeof(*uinfo), GFP_ATOMIC); + if (!uinfo) + goto error; + + uinfo->mem[0].addr = dev->netdev->base_addr; + uinfo->mem[0].internal_addr = dev->regview; + uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start; + uinfo->mem[0].memtype = UIO_MEM_PHYS; + + uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK; + if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) + uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; + else + uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE; + uinfo->mem[1].memtype = UIO_MEM_LOGICAL; + + uinfo->mem[2].addr = (unsigned long) cp->l2_ring; + uinfo->mem[2].size = cp->l2_ring_size; + uinfo->mem[2].memtype = UIO_MEM_LOGICAL; + + uinfo->mem[3].addr = (unsigned long) cp->l2_buf; + uinfo->mem[3].size = cp->l2_buf_size; + uinfo->mem[3].memtype = UIO_MEM_LOGICAL; + + uinfo->name = "bnx2_cnic"; + uinfo->version = CNIC_MODULE_VERSION; + uinfo->irq = UIO_IRQ_CUSTOM; + + uinfo->open = cnic_uio_open; + uinfo->release = cnic_uio_close; + + uinfo->priv = dev; + + ret = uio_register_device(&dev->pcidev->dev, uinfo); + if (ret) { + kfree(uinfo); + goto error; + } + + cp->cnic_uinfo = uinfo; + + return 0; + +error: + cnic_free_resc(dev); + return ret; +} + +static inline u32 cnic_kwq_avail(struct cnic_local *cp) +{ + return cp->max_kwq_idx - + ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx); +} + +static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num_wqes) +{ + struct cnic_local *cp = dev->cnic_priv; + struct kwqe *prod_qe; + u16 prod, sw_prod, i; + + if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) + return -EAGAIN; /* bnx2 is down */ + + spin_lock_bh(&cp->cnic_ulp_lock); + if (num_wqes > cnic_kwq_avail(cp) && + !(cp->cnic_local_flags & CNIC_LCL_FL_KWQ_INIT)) { + spin_unlock_bh(&cp->cnic_ulp_lock); + return -EAGAIN; + } + + cp->cnic_local_flags &= ~CNIC_LCL_FL_KWQ_INIT; + + prod = cp->kwq_prod_idx; + sw_prod = prod & MAX_KWQ_IDX; + for (i = 0; i < num_wqes; i++) { + prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)]; + memcpy(prod_qe, wqes[i], sizeof(struct kwqe)); + prod++; + sw_prod = prod & MAX_KWQ_IDX; + } + cp->kwq_prod_idx = prod; + + CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx); + + spin_unlock_bh(&cp->cnic_ulp_lock); + return 0; +} + +static void service_kcqes(struct cnic_dev *dev, int num_cqes) +{ + struct cnic_local *cp = dev->cnic_priv; + int i, j; + + i = 0; + j = 1; + while (num_cqes) { + struct cnic_ulp_ops *ulp_ops; + int ulp_type; + u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag; + u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK; + + if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION)) + cnic_kwq_completion(dev, 1); + + while (j < num_cqes) { + u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag; + + if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer) + break; + + if (unlikely(next_op & KCQE_RAMROD_COMPLETION)) + cnic_kwq_completion(dev, 1); + j++; + } + + if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA) + ulp_type = CNIC_ULP_RDMA; + else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI) + ulp_type = CNIC_ULP_ISCSI; + else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4) + ulp_type = CNIC_ULP_L4; + else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2) + goto end; + else { + printk(KERN_ERR PFX "%s: Unknown type of KCQE(0x%x)\n", + dev->netdev->name, kcqe_op_flag); + goto end; + } + + rcu_read_lock(); + ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); + if (likely(ulp_ops)) { + ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], + cp->completed_kcq + i, j); + } + rcu_read_unlock(); +end: + num_cqes -= j; + i += j; + j = 1; + } + return; +} + +static u16 cnic_bnx2_next_idx(u16 idx) +{ + return idx + 1; +} + +static u16 cnic_bnx2_hw_idx(u16 idx) +{ + return idx; +} + +static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod) +{ + struct cnic_local *cp = dev->cnic_priv; + u16 i, ri, last; + struct kcqe *kcqe; + int kcqe_cnt = 0, last_cnt = 0; + + i = ri = last = *sw_prod; + ri &= MAX_KCQ_IDX; + + while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) { + kcqe = &cp->kcq[KCQ_PG(ri)][KCQ_IDX(ri)]; + cp->completed_kcq[kcqe_cnt++] = kcqe; + i = cp->next_idx(i); + ri = i & MAX_KCQ_IDX; + if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) { + last_cnt = kcqe_cnt; + last = i; + } + } + + *sw_prod = last; + return last_cnt; +} + +static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp) +{ + u16 rx_cons = *cp->rx_cons_ptr; + u16 tx_cons = *cp->tx_cons_ptr; + + if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { + cp->tx_cons = tx_cons; + cp->rx_cons = rx_cons; + uio_event_notify(cp->cnic_uinfo); + } +} + +static int cnic_service_bnx2(void *data, void *status_blk) +{ + struct cnic_dev *dev = data; + struct status_block *sblk = status_blk; + struct cnic_local *cp = dev->cnic_priv; + u32 status_idx = sblk->status_idx; + u16 hw_prod, sw_prod; + int kcqe_cnt; + + if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) + return status_idx; + + cp->kwq_con_idx = *cp->kwq_con_idx_ptr; + + hw_prod = sblk->status_completion_producer_index; + sw_prod = cp->kcq_prod_idx; + while (sw_prod != hw_prod) { + kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod); + if (kcqe_cnt == 0) + goto done; + + service_kcqes(dev, kcqe_cnt); + + /* Tell compiler that status_blk fields can change. */ + barrier(); + if (status_idx != sblk->status_idx) { + status_idx = sblk->status_idx; + cp->kwq_con_idx = *cp->kwq_con_idx_ptr; + hw_prod = sblk->status_completion_producer_index; + } else + break; + } + +done: + CNIC_WR16(dev, cp->kcq_io_addr, sw_prod); + + cp->kcq_prod_idx = sw_prod; + + cnic_chk_bnx2_pkt_rings(cp); + return status_idx; +} + +static void cnic_service_bnx2_msix(unsigned long data) +{ + struct cnic_dev *dev = (struct cnic_dev *) data; + struct cnic_local *cp = dev->cnic_priv; + struct status_block_msix *status_blk = cp->bnx2_status_blk; + u32 status_idx = status_blk->status_idx; + u16 hw_prod, sw_prod; + int kcqe_cnt; + + cp->kwq_con_idx = status_blk->status_cmd_consumer_index; + + hw_prod = status_blk->status_completion_producer_index; + sw_prod = cp->kcq_prod_idx; + while (sw_prod != hw_prod) { + kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod); + if (kcqe_cnt == 0) + goto done; + + service_kcqes(dev, kcqe_cnt); + + /* Tell compiler that status_blk fields can change. */ + barrier(); + if (status_idx != status_blk->status_idx) { + status_idx = status_blk->status_idx; + cp->kwq_con_idx = status_blk->status_cmd_consumer_index; + hw_prod = status_blk->status_completion_producer_index; + } else + break; + } + +done: + CNIC_WR16(dev, cp->kcq_io_addr, sw_prod); + cp->kcq_prod_idx = sw_prod; + + cnic_chk_bnx2_pkt_rings(cp); + + cp->last_status_idx = status_idx; + CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); +} + +static irqreturn_t cnic_irq(int irq, void *dev_instance) +{ + struct cnic_dev *dev = dev_instance; + struct cnic_local *cp = dev->cnic_priv; + u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; + + if (cp->ack_int) + cp->ack_int(dev); + + prefetch(cp->status_blk); + prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); + + if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) + tasklet_schedule(&cp->cnic_irq_task); + + return IRQ_HANDLED; +} + +static void cnic_ulp_stop(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + int if_type; + + rcu_read_lock(); + for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { + struct cnic_ulp_ops *ulp_ops; + + ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); + if (!ulp_ops) + continue; + + if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type])) + ulp_ops->cnic_stop(cp->ulp_handle[if_type]); + } + rcu_read_unlock(); +} + +static void cnic_ulp_start(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + int if_type; + + rcu_read_lock(); + for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { + struct cnic_ulp_ops *ulp_ops; + + ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); + if (!ulp_ops || !ulp_ops->cnic_start) + continue; + + if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type])) + ulp_ops->cnic_start(cp->ulp_handle[if_type]); + } + rcu_read_unlock(); +} + +static int cnic_ctl(void *data, struct cnic_ctl_info *info) +{ + struct cnic_dev *dev = data; + + switch (info->cmd) { + case CNIC_CTL_STOP_CMD: + cnic_hold(dev); + mutex_lock(&cnic_lock); + + cnic_ulp_stop(dev); + cnic_stop_hw(dev); + + mutex_unlock(&cnic_lock); + cnic_put(dev); + break; + case CNIC_CTL_START_CMD: + cnic_hold(dev); + mutex_lock(&cnic_lock); + + if (!cnic_start_hw(dev)) + cnic_ulp_start(dev); + + mutex_unlock(&cnic_lock); + cnic_put(dev); + break; + default: + return -EINVAL; + } + return 0; +} + +static void cnic_ulp_init(struct cnic_dev *dev) +{ + int i; + struct cnic_local *cp = dev->cnic_priv; + + rcu_read_lock(); + for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { + struct cnic_ulp_ops *ulp_ops; + + ulp_ops = rcu_dereference(cnic_ulp_tbl[i]); + if (!ulp_ops || !ulp_ops->cnic_init) + continue; + + if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i])) + ulp_ops->cnic_init(dev); + + } + rcu_read_unlock(); +} + +static void cnic_ulp_exit(struct cnic_dev *dev) +{ + int i; + struct cnic_local *cp = dev->cnic_priv; + + rcu_read_lock(); + for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { + struct cnic_ulp_ops *ulp_ops; + + ulp_ops = rcu_dereference(cnic_ulp_tbl[i]); + if (!ulp_ops || !ulp_ops->cnic_exit) + continue; + + if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i])) + ulp_ops->cnic_exit(dev); + + } + rcu_read_unlock(); +} + +static int cnic_cm_offload_pg(struct cnic_sock *csk) +{ + struct cnic_dev *dev = csk->dev; + struct l4_kwq_offload_pg *l4kwqe; + struct kwqe *wqes[1]; + + l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1; + memset(l4kwqe, 0, sizeof(*l4kwqe)); + wqes[0] = (struct kwqe *) l4kwqe; + + l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG; + l4kwqe->flags = + L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT; + l4kwqe->l2hdr_nbytes = ETH_HLEN; + + l4kwqe->da0 = csk->ha[0]; + l4kwqe->da1 = csk->ha[1]; + l4kwqe->da2 = csk->ha[2]; + l4kwqe->da3 = csk->ha[3]; + l4kwqe->da4 = csk->ha[4]; + l4kwqe->da5 = csk->ha[5]; + + l4kwqe->sa0 = dev->mac_addr[0]; + l4kwqe->sa1 = dev->mac_addr[1]; + l4kwqe->sa2 = dev->mac_addr[2]; + l4kwqe->sa3 = dev->mac_addr[3]; + l4kwqe->sa4 = dev->mac_addr[4]; + l4kwqe->sa5 = dev->mac_addr[5]; + + l4kwqe->etype = ETH_P_IP; + l4kwqe->ipid_count = DEF_IPID_COUNT; + l4kwqe->host_opaque = csk->l5_cid; + + if (csk->vlan_id) { + l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING; + l4kwqe->vlan_tag = csk->vlan_id; + l4kwqe->l2hdr_nbytes += 4; + } + + return dev->submit_kwqes(dev, wqes, 1); +} + +static int cnic_cm_update_pg(struct cnic_sock *csk) +{ + struct cnic_dev *dev = csk->dev; + struct l4_kwq_update_pg *l4kwqe; + struct kwqe *wqes[1]; + + l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1; + memset(l4kwqe, 0, sizeof(*l4kwqe)); + wqes[0] = (struct kwqe *) l4kwqe; + + l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG; + l4kwqe->flags = + L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT; + l4kwqe->pg_cid = csk->pg_cid; + + l4kwqe->da0 = csk->ha[0]; + l4kwqe->da1 = csk->ha[1]; + l4kwqe->da2 = csk->ha[2]; + l4kwqe->da3 = csk->ha[3]; + l4kwqe->da4 = csk->ha[4]; + l4kwqe->da5 = csk->ha[5]; + + l4kwqe->pg_host_opaque = csk->l5_cid; + l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA; + + return dev->submit_kwqes(dev, wqes, 1); +} + +static int cnic_cm_upload_pg(struct cnic_sock *csk) +{ + struct cnic_dev *dev = csk->dev; + struct l4_kwq_upload *l4kwqe; + struct kwqe *wqes[1]; + + l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1; + memset(l4kwqe, 0, sizeof(*l4kwqe)); + wqes[0] = (struct kwqe *) l4kwqe; + + l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG; + l4kwqe->flags = + L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT; + l4kwqe->cid = csk->pg_cid; + + return dev->submit_kwqes(dev, wqes, 1); +} + +static int cnic_cm_conn_req(struct cnic_sock *csk) +{ + struct cnic_dev *dev = csk->dev; + struct l4_kwq_connect_req1 *l4kwqe1; + struct l4_kwq_connect_req2 *l4kwqe2; + struct l4_kwq_connect_req3 *l4kwqe3; + struct kwqe *wqes[3]; + u8 tcp_flags = 0; + int num_wqes = 2; + + l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1; + l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2; + l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3; + memset(l4kwqe1, 0, sizeof(*l4kwqe1)); + memset(l4kwqe2, 0, sizeof(*l4kwqe2)); + memset(l4kwqe3, 0, sizeof(*l4kwqe3)); + + l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3; + l4kwqe3->flags = + L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT; + l4kwqe3->ka_timeout = csk->ka_timeout; + l4kwqe3->ka_interval = csk->ka_interval; + l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count; + l4kwqe3->tos = csk->tos; + l4kwqe3->ttl = csk->ttl; + l4kwqe3->snd_seq_scale = csk->snd_seq_scale; + l4kwqe3->pmtu = csk->mtu; + l4kwqe3->rcv_buf = csk->rcv_buf; + l4kwqe3->snd_buf = csk->snd_buf; + l4kwqe3->seed = csk->seed; + + wqes[0] = (struct kwqe *) l4kwqe1; + if (test_bit(SK_F_IPV6, &csk->flags)) { + wqes[1] = (struct kwqe *) l4kwqe2; + wqes[2] = (struct kwqe *) l4kwqe3; + num_wqes = 3; + + l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6; + l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2; + l4kwqe2->flags = + L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT | + L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT; + l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]); + l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]); + l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]); + l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]); + l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]); + l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]); + l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) - + sizeof(struct tcphdr); + } else { + wqes[1] = (struct kwqe *) l4kwqe3; + l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) - + sizeof(struct tcphdr); + } + + l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1; + l4kwqe1->flags = + (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) | + L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT; + l4kwqe1->cid = csk->cid; + l4kwqe1->pg_cid = csk->pg_cid; + l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]); + l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]); + l4kwqe1->src_port = be16_to_cpu(csk->src_port); + l4kwqe1->dst_port = be16_to_cpu(csk->dst_port); + if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK) + tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK; + if (csk->tcp_flags & SK_TCP_KEEP_ALIVE) + tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE; + if (csk->tcp_flags & SK_TCP_NAGLE) + tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE; + if (csk->tcp_flags & SK_TCP_TIMESTAMP) + tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP; + if (csk->tcp_flags & SK_TCP_SACK) + tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK; + if (csk->tcp_flags & SK_TCP_SEG_SCALING) + tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING; + + l4kwqe1->tcp_flags = tcp_flags; + + return dev->submit_kwqes(dev, wqes, num_wqes); +} + +static int cnic_cm_close_req(struct cnic_sock *csk) +{ + struct cnic_dev *dev = csk->dev; + struct l4_kwq_close_req *l4kwqe; + struct kwqe *wqes[1]; + + l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2; + memset(l4kwqe, 0, sizeof(*l4kwqe)); + wqes[0] = (struct kwqe *) l4kwqe; + + l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE; + l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT; + l4kwqe->cid = csk->cid; + + return dev->submit_kwqes(dev, wqes, 1); +} + +static int cnic_cm_abort_req(struct cnic_sock *csk) +{ + struct cnic_dev *dev = csk->dev; + struct l4_kwq_reset_req *l4kwqe; + struct kwqe *wqes[1]; + + l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2; + memset(l4kwqe, 0, sizeof(*l4kwqe)); + wqes[0] = (struct kwqe *) l4kwqe; + + l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET; + l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT; + l4kwqe->cid = csk->cid; + + return dev->submit_kwqes(dev, wqes, 1); +} + +static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid, + u32 l5_cid, struct cnic_sock **csk, void *context) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_sock *csk1; + + if (l5_cid >= MAX_CM_SK_TBL_SZ) + return -EINVAL; + + csk1 = &cp->csk_tbl[l5_cid]; + if (atomic_read(&csk1->ref_count)) + return -EAGAIN; + + if (test_and_set_bit(SK_F_INUSE, &csk1->flags)) + return -EBUSY; + + csk1->dev = dev; + csk1->cid = cid; + csk1->l5_cid = l5_cid; + csk1->ulp_type = ulp_type; + csk1->context = context; + + csk1->ka_timeout = DEF_KA_TIMEOUT; + csk1->ka_interval = DEF_KA_INTERVAL; + csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT; + csk1->tos = DEF_TOS; + csk1->ttl = DEF_TTL; + csk1->snd_seq_scale = DEF_SND_SEQ_SCALE; + csk1->rcv_buf = DEF_RCV_BUF; + csk1->snd_buf = DEF_SND_BUF; + csk1->seed = DEF_SEED; + + *csk = csk1; + return 0; +} + +static void cnic_cm_cleanup(struct cnic_sock *csk) +{ + if (csk->src_port) { + struct cnic_dev *dev = csk->dev; + struct cnic_local *cp = dev->cnic_priv; + + cnic_free_id(&cp->csk_port_tbl, csk->src_port); + csk->src_port = 0; + } +} + +static void cnic_close_conn(struct cnic_sock *csk) +{ + if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) { + cnic_cm_upload_pg(csk); + clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); + } + cnic_cm_cleanup(csk); +} + +static int cnic_cm_destroy(struct cnic_sock *csk) +{ + if (!cnic_in_use(csk)) + return -EINVAL; + + csk_hold(csk); + clear_bit(SK_F_INUSE, &csk->flags); + smp_mb__after_clear_bit(); + while (atomic_read(&csk->ref_count) != 1) + msleep(1); + cnic_cm_cleanup(csk); + + csk->flags = 0; + csk_put(csk); + return 0; +} + +static inline u16 cnic_get_vlan(struct net_device *dev, + struct net_device **vlan_dev) +{ + if (dev->priv_flags & IFF_802_1Q_VLAN) { + *vlan_dev = vlan_dev_real_dev(dev); + return vlan_dev_vlan_id(dev); + } + *vlan_dev = dev; + return 0; +} + +static int cnic_get_v4_route(struct sockaddr_in *dst_addr, + struct dst_entry **dst) +{ + struct flowi fl; + int err; + struct rtable *rt; + + memset(&fl, 0, sizeof(fl)); + fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr; + + err = ip_route_output_key(&init_net, &rt, &fl); + if (!err) + *dst = &rt->u.dst; + return err; +} + +static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, + struct dst_entry **dst) +{ +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + struct flowi fl; + + memset(&fl, 0, sizeof(fl)); + ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr); + if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL) + fl.oif = dst_addr->sin6_scope_id; + + *dst = ip6_route_output(&init_net, NULL, &fl); + if (*dst) + return 0; +#endif + + return -ENETUNREACH; +} + +static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr, + int ulp_type) +{ + struct cnic_dev *dev = NULL; + struct dst_entry *dst; + struct net_device *netdev = NULL; + int err = -ENETUNREACH; + + if (dst_addr->sin_family == AF_INET) + err = cnic_get_v4_route(dst_addr, &dst); + else if (dst_addr->sin_family == AF_INET6) { + struct sockaddr_in6 *dst_addr6 = + (struct sockaddr_in6 *) dst_addr; + + err = cnic_get_v6_route(dst_addr6, &dst); + } else + return NULL; + + if (err) + return NULL; + + if (!dst->dev) + goto done; + + cnic_get_vlan(dst->dev, &netdev); + + dev = cnic_from_netdev(netdev); + +done: + dst_release(dst); + if (dev) + cnic_put(dev); + return dev; +} + +static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr) +{ + struct cnic_dev *dev = csk->dev; + struct cnic_local *cp = dev->cnic_priv; + + return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk); +} + +static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr) +{ + struct cnic_dev *dev = csk->dev; + struct cnic_local *cp = dev->cnic_priv; + int is_v6, err, rc = -ENETUNREACH; + struct dst_entry *dst; + struct net_device *realdev; + u32 local_port; + + if (saddr->local.v6.sin6_family == AF_INET6 && + saddr->remote.v6.sin6_family == AF_INET6) + is_v6 = 1; + else if (saddr->local.v4.sin_family == AF_INET && + saddr->remote.v4.sin_family == AF_INET) + is_v6 = 0; + else + return -EINVAL; + + clear_bit(SK_F_IPV6, &csk->flags); + + if (is_v6) { +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + set_bit(SK_F_IPV6, &csk->flags); + err = cnic_get_v6_route(&saddr->remote.v6, &dst); + if (err) + return err; + + if (!dst || dst->error || !dst->dev) + goto err_out; + + memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr, + sizeof(struct in6_addr)); + csk->dst_port = saddr->remote.v6.sin6_port; + local_port = saddr->local.v6.sin6_port; +#else + return rc; +#endif + + } else { + err = cnic_get_v4_route(&saddr->remote.v4, &dst); + if (err) + return err; + + if (!dst || dst->error || !dst->dev) + goto err_out; + + csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr; + csk->dst_port = saddr->remote.v4.sin_port; + local_port = saddr->local.v4.sin_port; + } + + csk->vlan_id = cnic_get_vlan(dst->dev, &realdev); + if (realdev != dev->netdev) + goto err_out; + + if (local_port >= CNIC_LOCAL_PORT_MIN && + local_port < CNIC_LOCAL_PORT_MAX) { + if (cnic_alloc_id(&cp->csk_port_tbl, local_port)) + local_port = 0; + } else + local_port = 0; + + if (!local_port) { + local_port = cnic_alloc_new_id(&cp->csk_port_tbl); + if (local_port == -1) { + rc = -ENOMEM; + goto err_out; + } + } + csk->src_port = local_port; + + csk->mtu = dst_mtu(dst); + rc = 0; + +err_out: + dst_release(dst); + return rc; +} + +static void cnic_init_csk_state(struct cnic_sock *csk) +{ + csk->state = 0; + clear_bit(SK_F_OFFLD_SCHED, &csk->flags); + clear_bit(SK_F_CLOSING, &csk->flags); +} + +static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr) +{ + int err = 0; + + if (!cnic_in_use(csk)) + return -EINVAL; + + if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags)) + return -EINVAL; + + cnic_init_csk_state(csk); + + err = cnic_get_route(csk, saddr); + if (err) + goto err_out; + + err = cnic_resolve_addr(csk, saddr); + if (!err) + return 0; + +err_out: + clear_bit(SK_F_CONNECT_START, &csk->flags); + return err; +} + +static int cnic_cm_abort(struct cnic_sock *csk) +{ + struct cnic_local *cp = csk->dev->cnic_priv; + u32 opcode; + + if (!cnic_in_use(csk)) + return -EINVAL; + + if (cnic_abort_prep(csk)) + return cnic_cm_abort_req(csk); + + /* Getting here means that we haven't started connect, or + * connect was not successful. + */ + + csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP; + if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) + opcode = csk->state; + else + opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD; + cp->close_conn(csk, opcode); + + return 0; +} + +static int cnic_cm_close(struct cnic_sock *csk) +{ + if (!cnic_in_use(csk)) + return -EINVAL; + + if (cnic_close_prep(csk)) { + csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; + return cnic_cm_close_req(csk); + } + return 0; +} + +static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk, + u8 opcode) +{ + struct cnic_ulp_ops *ulp_ops; + int ulp_type = csk->ulp_type; + + rcu_read_lock(); + ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); + if (ulp_ops) { + if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE) + ulp_ops->cm_connect_complete(csk); + else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) + ulp_ops->cm_close_complete(csk); + else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) + ulp_ops->cm_remote_abort(csk); + else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) + ulp_ops->cm_abort_complete(csk); + else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED) + ulp_ops->cm_remote_close(csk); + } + rcu_read_unlock(); +} + +static int cnic_cm_set_pg(struct cnic_sock *csk) +{ + if (cnic_offld_prep(csk)) { + if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) + cnic_cm_update_pg(csk); + else + cnic_cm_offload_pg(csk); + } + return 0; +} + +static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe) +{ + struct cnic_local *cp = dev->cnic_priv; + u32 l5_cid = kcqe->pg_host_opaque; + u8 opcode = kcqe->op_code; + struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; + + csk_hold(csk); + if (!cnic_in_use(csk)) + goto done; + + if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { + clear_bit(SK_F_OFFLD_SCHED, &csk->flags); + goto done; + } + csk->pg_cid = kcqe->pg_cid; + set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); + cnic_cm_conn_req(csk); + +done: + csk_put(csk); +} + +static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) +{ + struct cnic_local *cp = dev->cnic_priv; + struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe; + u8 opcode = l4kcqe->op_code; + u32 l5_cid; + struct cnic_sock *csk; + + if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG || + opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { + cnic_cm_process_offld_pg(dev, l4kcqe); + return; + } + + l5_cid = l4kcqe->conn_id; + if (opcode & 0x80) + l5_cid = l4kcqe->cid; + if (l5_cid >= MAX_CM_SK_TBL_SZ) + return; + + csk = &cp->csk_tbl[l5_cid]; + csk_hold(csk); + + if (!cnic_in_use(csk)) { + csk_put(csk); + return; + } + + switch (opcode) { + case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE: + if (l4kcqe->status == 0) + set_bit(SK_F_OFFLD_COMPLETE, &csk->flags); + + smp_mb__before_clear_bit(); + clear_bit(SK_F_OFFLD_SCHED, &csk->flags); + cnic_cm_upcall(cp, csk, opcode); + break; + + case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: + if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) + csk->state = opcode; + /* fall through */ + case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: + case L4_KCQE_OPCODE_VALUE_RESET_COMP: + cp->close_conn(csk, opcode); + break; + + case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED: + cnic_cm_upcall(cp, csk, opcode); + break; + } + csk_put(csk); +} + +static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num) +{ + struct cnic_dev *dev = data; + int i; + + for (i = 0; i < num; i++) + cnic_cm_process_kcqe(dev, kcqe[i]); +} + +static struct cnic_ulp_ops cm_ulp_ops = { + .indicate_kcqes = cnic_cm_indicate_kcqe, +}; + +static void cnic_cm_free_mem(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + + kfree(cp->csk_tbl); + cp->csk_tbl = NULL; + cnic_free_id_tbl(&cp->csk_port_tbl); +} + +static int cnic_cm_alloc_mem(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + + cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ, + GFP_KERNEL); + if (!cp->csk_tbl) + return -ENOMEM; + + if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE, + CNIC_LOCAL_PORT_MIN)) { + cnic_cm_free_mem(dev); + return -ENOMEM; + } + return 0; +} + +static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode) +{ + if ((opcode == csk->state) || + (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED && + csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)) { + if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) + return 1; + } + return 0; +} + +static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode) +{ + struct cnic_dev *dev = csk->dev; + struct cnic_local *cp = dev->cnic_priv; + + clear_bit(SK_F_CONNECT_START, &csk->flags); + if (cnic_ready_to_close(csk, opcode)) { + cnic_close_conn(csk); + cnic_cm_upcall(cp, csk, opcode); + } +} + +static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev) +{ +} + +static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev) +{ + u32 seed; + + get_random_bytes(&seed, 4); + cnic_ctx_wr(dev, 45, 0, seed); + return 0; +} + +static int cnic_cm_open(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + int err; + + err = cnic_cm_alloc_mem(dev); + if (err) + return err; + + err = cp->start_cm(dev); + + if (err) + goto err_out; + + dev->cm_create = cnic_cm_create; + dev->cm_destroy = cnic_cm_destroy; + dev->cm_connect = cnic_cm_connect; + dev->cm_abort = cnic_cm_abort; + dev->cm_close = cnic_cm_close; + dev->cm_select_dev = cnic_cm_select_dev; + + cp->ulp_handle[CNIC_ULP_L4] = dev; + rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops); + return 0; + +err_out: + cnic_cm_free_mem(dev); + return err; +} + +static int cnic_cm_shutdown(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + int i; + + cp->stop_cm(dev); + + if (!cp->csk_tbl) + return 0; + + for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) { + struct cnic_sock *csk = &cp->csk_tbl[i]; + + clear_bit(SK_F_INUSE, &csk->flags); + cnic_cm_cleanup(csk); + } + cnic_cm_free_mem(dev); + + return 0; +} + +static void cnic_init_context(struct cnic_dev *dev, u32 cid) +{ + struct cnic_local *cp = dev->cnic_priv; + u32 cid_addr; + int i; + + if (CHIP_NUM(cp) == CHIP_NUM_5709) + return; + + cid_addr = GET_CID_ADDR(cid); + + for (i = 0; i < CTX_SIZE; i += 4) + cnic_ctx_wr(dev, cid_addr, i, 0); +} + +static int cnic_setup_5709_context(struct cnic_dev *dev, int valid) +{ + struct cnic_local *cp = dev->cnic_priv; + int ret = 0, i; + u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0; + + if (CHIP_NUM(cp) != CHIP_NUM_5709) + return 0; + + for (i = 0; i < cp->ctx_blks; i++) { + int j; + u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; + u32 val; + + memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE); + + CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, + (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); + CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1, + (u64) cp->ctx_arr[i].mapping >> 32); + CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx | + BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); + for (j = 0; j < 10; j++) { + + val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL); + if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ)) + break; + udelay(5); + } + if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) { + ret = -EBUSY; + break; + } + } + return ret; +} + +static void cnic_free_irq(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + + if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { + cp->disable_int_sync(dev); + tasklet_disable(&cp->cnic_irq_task); + free_irq(ethdev->irq_arr[0].vector, dev); + } +} + +static int cnic_init_bnx2_irq(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + + if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { + int err, i = 0; + int sblk_num = cp->status_blk_num; + u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) + + BNX2_HC_SB_CONFIG_1; + + CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT); + + CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8); + CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220); + CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220); + + cp->bnx2_status_blk = cp->status_blk; + cp->last_status_idx = cp->bnx2_status_blk->status_idx; + tasklet_init(&cp->cnic_irq_task, &cnic_service_bnx2_msix, + (unsigned long) dev); + err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, + "cnic", dev); + if (err) { + tasklet_disable(&cp->cnic_irq_task); + return err; + } + while (cp->bnx2_status_blk->status_completion_producer_index && + i < 10) { + CNIC_WR(dev, BNX2_HC_COALESCE_NOW, + 1 << (11 + sblk_num)); + udelay(10); + i++; + barrier(); + } + if (cp->bnx2_status_blk->status_completion_producer_index) { + cnic_free_irq(dev); + goto failed; + } + + } else { + struct status_block *sblk = cp->status_blk; + u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND); + int i = 0; + + while (sblk->status_completion_producer_index && i < 10) { + CNIC_WR(dev, BNX2_HC_COMMAND, + hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); + udelay(10); + i++; + barrier(); + } + if (sblk->status_completion_producer_index) + goto failed; + + } + return 0; + +failed: + printk(KERN_ERR PFX "%s: " "KCQ index not resetting to 0.\n", + dev->netdev->name); + return -EBUSY; +} + +static void cnic_enable_bnx2_int(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + + if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) + return; + + CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); +} + +static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + + if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) + return; + + CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | + BNX2_PCICFG_INT_ACK_CMD_MASK_INT); + CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD); + synchronize_irq(ethdev->irq_arr[0].vector); +} + +static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + u32 cid_addr, tx_cid, sb_id; + u32 val, offset0, offset1, offset2, offset3; + int i; + struct tx_bd *txbd; + dma_addr_t buf_map; + struct status_block *s_blk = cp->status_blk; + + sb_id = cp->status_blk_num; + tx_cid = 20; + cnic_init_context(dev, tx_cid); + cnic_init_context(dev, tx_cid + 1); + cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2; + if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { + struct status_block_msix *sblk = cp->status_blk; + + tx_cid = TX_TSS_CID + sb_id - 1; + cnic_init_context(dev, tx_cid); + CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) | + (TX_TSS_CID << 7)); + cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index; + } + cp->tx_cons = *cp->tx_cons_ptr; + + cid_addr = GET_CID_ADDR(tx_cid); + if (CHIP_NUM(cp) == CHIP_NUM_5709) { + u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40; + + for (i = 0; i < PHY_CTX_SIZE; i += 4) + cnic_ctx_wr(dev, cid_addr2, i, 0); + + offset0 = BNX2_L2CTX_TYPE_XI; + offset1 = BNX2_L2CTX_CMD_TYPE_XI; + offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; + offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; + } else { + offset0 = BNX2_L2CTX_TYPE; + offset1 = BNX2_L2CTX_CMD_TYPE; + offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; + offset3 = BNX2_L2CTX_TBDR_BHADDR_LO; + } + val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2; + cnic_ctx_wr(dev, cid_addr, offset0, val); + + val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); + cnic_ctx_wr(dev, cid_addr, offset1, val); + + txbd = (struct tx_bd *) cp->l2_ring; + + buf_map = cp->l2_buf_map; + for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) { + txbd->tx_bd_haddr_hi = (u64) buf_map >> 32; + txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff; + } + val = (u64) cp->l2_ring_map >> 32; + cnic_ctx_wr(dev, cid_addr, offset2, val); + txbd->tx_bd_haddr_hi = val; + + val = (u64) cp->l2_ring_map & 0xffffffff; + cnic_ctx_wr(dev, cid_addr, offset3, val); + txbd->tx_bd_haddr_lo = val; +} + +static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + u32 cid_addr, sb_id, val, coal_reg, coal_val; + int i; + struct rx_bd *rxbd; + struct status_block *s_blk = cp->status_blk; + + sb_id = cp->status_blk_num; + cnic_init_context(dev, 2); + cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2; + coal_reg = BNX2_HC_COMMAND; + coal_val = CNIC_RD(dev, coal_reg); + if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { + struct status_block_msix *sblk = cp->status_blk; + + cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index; + coal_reg = BNX2_HC_COALESCE_NOW; + coal_val = 1 << (11 + sb_id); + } + i = 0; + while (!(*cp->rx_cons_ptr != 0) && i < 10) { + CNIC_WR(dev, coal_reg, coal_val); + udelay(10); + i++; + barrier(); + } + cp->rx_cons = *cp->rx_cons_ptr; + + cid_addr = GET_CID_ADDR(2); + val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | + BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8); + cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val); + + if (sb_id == 0) + val = 2 << BNX2_L2CTX_STATUSB_NUM_SHIFT; + else + val = BNX2_L2CTX_STATUSB_NUM(sb_id); + cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); + + rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE); + for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) { + dma_addr_t buf_map; + int n = (i % cp->l2_rx_ring_size) + 1; + + buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size); + rxbd->rx_bd_len = cp->l2_single_buf_size; + rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; + rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; + rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; + } + val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32; + cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); + rxbd->rx_bd_haddr_hi = val; + + val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff; + cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); + rxbd->rx_bd_haddr_lo = val; + + val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD); + cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2)); +} + +static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev) +{ + struct kwqe *wqes[1], l2kwqe; + + memset(&l2kwqe, 0, sizeof(l2kwqe)); + wqes[0] = &l2kwqe; + l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) | + (L2_KWQE_OPCODE_VALUE_FLUSH << + KWQE_OPCODE_SHIFT) | 2; + dev->submit_kwqes(dev, wqes, 1); +} + +static void cnic_set_bnx2_mac(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + u32 val; + + val = cp->func << 2; + + cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val); + + val = cnic_reg_rd_ind(dev, cp->shmem_base + + BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER); + dev->mac_addr[0] = (u8) (val >> 8); + dev->mac_addr[1] = (u8) val; + + CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val); + + val = cnic_reg_rd_ind(dev, cp->shmem_base + + BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER); + dev->mac_addr[2] = (u8) (val >> 24); + dev->mac_addr[3] = (u8) (val >> 16); + dev->mac_addr[4] = (u8) (val >> 8); + dev->mac_addr[5] = (u8) val; + + CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val); + + val = 4 | BNX2_RPM_SORT_USER2_BC_EN; + if (CHIP_NUM(cp) != CHIP_NUM_5709) + val |= BNX2_RPM_SORT_USER2_PROM_VLAN; + + CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0); + CNIC_WR(dev, BNX2_RPM_SORT_USER2, val); + CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA); +} + +static int cnic_start_bnx2_hw(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct status_block *sblk = cp->status_blk; + u32 val; + int err; + + cnic_set_bnx2_mac(dev); + + val = CNIC_RD(dev, BNX2_MQ_CONFIG); + val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; + if (BCM_PAGE_BITS > 12) + val |= (12 - 8) << 4; + else + val |= (BCM_PAGE_BITS - 8) << 4; + + CNIC_WR(dev, BNX2_MQ_CONFIG, val); + + CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8); + CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220); + CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220); + + err = cnic_setup_5709_context(dev, 1); + if (err) + return err; + + cnic_init_context(dev, KWQ_CID); + cnic_init_context(dev, KCQ_CID); + + cp->kwq_cid_addr = GET_CID_ADDR(KWQ_CID); + cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX; + + cp->max_kwq_idx = MAX_KWQ_IDX; + cp->kwq_prod_idx = 0; + cp->kwq_con_idx = 0; + cp->cnic_local_flags |= CNIC_LCL_FL_KWQ_INIT; + + if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708) + cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15; + else + cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index; + + /* Initialize the kernel work queue context. */ + val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | + (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; + cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_TYPE, val); + + val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; + cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); + + val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; + cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); + + val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); + cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); + + val = (u32) cp->kwq_info.pgtbl_map; + cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); + + cp->kcq_cid_addr = GET_CID_ADDR(KCQ_CID); + cp->kcq_io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX; + + cp->kcq_prod_idx = 0; + + /* Initialize the kernel complete queue context. */ + val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | + (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; + cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_TYPE, val); + + val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; + cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); + + val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; + cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); + + val = (u32) ((u64) cp->kcq_info.pgtbl_map >> 32); + cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); + + val = (u32) cp->kcq_info.pgtbl_map; + cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); + + cp->int_num = 0; + if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { + u32 sb_id = cp->status_blk_num; + u32 sb = BNX2_L2CTX_STATUSB_NUM(sb_id); + + cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; + cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); + cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); + } + + /* Enable Commnad Scheduler notification when we write to the + * host producer index of the kernel contexts. */ + CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2); + + /* Enable Command Scheduler notification when we write to either + * the Send Queue or Receive Queue producer indexes of the kernel + * bypass contexts. */ + CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7); + CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7); + + /* Notify COM when the driver post an application buffer. */ + CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000); + + /* Set the CP and COM doorbells. These two processors polls the + * doorbell for a non zero value before running. This must be done + * after setting up the kernel queue contexts. */ + cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1); + cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1); + + cnic_init_bnx2_tx_ring(dev); + cnic_init_bnx2_rx_ring(dev); + + err = cnic_init_bnx2_irq(dev); + if (err) { + printk(KERN_ERR PFX "%s: cnic_init_irq failed\n", + dev->netdev->name); + cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); + cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); + return err; + } + + return 0; +} + +static int cnic_start_hw(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + int err; + + if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) + return -EALREADY; + + err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev); + if (err) { + printk(KERN_ERR PFX "%s: register_cnic failed\n", + dev->netdev->name); + goto err2; + } + + dev->regview = ethdev->io_base; + cp->chip_id = ethdev->chip_id; + pci_dev_get(dev->pcidev); + cp->func = PCI_FUNC(dev->pcidev->devfn); + cp->status_blk = ethdev->irq_arr[0].status_blk; + cp->status_blk_num = ethdev->irq_arr[0].status_blk_num; + + err = cp->alloc_resc(dev); + if (err) { + printk(KERN_ERR PFX "%s: allocate resource failure\n", + dev->netdev->name); + goto err1; + } + + err = cp->start_hw(dev); + if (err) + goto err1; + + err = cnic_cm_open(dev); + if (err) + goto err1; + + set_bit(CNIC_F_CNIC_UP, &dev->flags); + + cp->enable_int(dev); + + return 0; + +err1: + ethdev->drv_unregister_cnic(dev->netdev); + cp->free_resc(dev); + pci_dev_put(dev->pcidev); +err2: + return err; +} + +static void cnic_stop_bnx2_hw(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + + cnic_disable_bnx2_int_sync(dev); + + cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); + cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); + + cnic_init_context(dev, KWQ_CID); + cnic_init_context(dev, KCQ_CID); + + cnic_setup_5709_context(dev, 0); + cnic_free_irq(dev); + + ethdev->drv_unregister_cnic(dev->netdev); + + cnic_free_resc(dev); +} + +static void cnic_stop_hw(struct cnic_dev *dev) +{ + if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { + struct cnic_local *cp = dev->cnic_priv; + + clear_bit(CNIC_F_CNIC_UP, &dev->flags); + rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL); + synchronize_rcu(); + cnic_cm_shutdown(dev); + cp->stop_hw(dev); + pci_dev_put(dev->pcidev); + } +} + +static void cnic_free_dev(struct cnic_dev *dev) +{ + int i = 0; + + while ((atomic_read(&dev->ref_count) != 0) && i < 10) { + msleep(100); + i++; + } + if (atomic_read(&dev->ref_count) != 0) + printk(KERN_ERR PFX "%s: Failed waiting for ref count to go" + " to zero.\n", dev->netdev->name); + + printk(KERN_INFO PFX "Removed CNIC device: %s\n", dev->netdev->name); + dev_put(dev->netdev); + kfree(dev); +} + +static struct cnic_dev *cnic_alloc_dev(struct net_device *dev, + struct pci_dev *pdev) +{ + struct cnic_dev *cdev; + struct cnic_local *cp; + int alloc_size; + + alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local); + + cdev = kzalloc(alloc_size , GFP_KERNEL); + if (cdev == NULL) { + printk(KERN_ERR PFX "%s: allocate dev struct failure\n", + dev->name); + return NULL; + } + + cdev->netdev = dev; + cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev); + cdev->register_device = cnic_register_device; + cdev->unregister_device = cnic_unregister_device; + cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv; + + cp = cdev->cnic_priv; + cp->dev = cdev; + cp->uio_dev = -1; + cp->l2_single_buf_size = 0x400; + cp->l2_rx_ring_size = 3; + + spin_lock_init(&cp->cnic_ulp_lock); + + printk(KERN_INFO PFX "Added CNIC device: %s\n", dev->name); + + return cdev; +} + +static struct cnic_dev *init_bnx2_cnic(struct net_device *dev) +{ + struct pci_dev *pdev; + struct cnic_dev *cdev; + struct cnic_local *cp; + struct cnic_eth_dev *ethdev = NULL; + struct cnic_eth_dev *(*probe)(void *) = NULL; + + probe = __symbol_get("bnx2_cnic_probe"); + if (probe) { + ethdev = (*probe)(dev); + symbol_put_addr(probe); + } + if (!ethdev) + return NULL; + + pdev = ethdev->pdev; + if (!pdev) + return NULL; + + dev_hold(dev); + pci_dev_get(pdev); + if (pdev->device == PCI_DEVICE_ID_NX2_5709 || + pdev->device == PCI_DEVICE_ID_NX2_5709S) { + u8 rev; + + pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); + if (rev < 0x10) { + pci_dev_put(pdev); + goto cnic_err; + } + } + pci_dev_put(pdev); + + cdev = cnic_alloc_dev(dev, pdev); + if (cdev == NULL) + goto cnic_err; + + set_bit(CNIC_F_BNX2_CLASS, &cdev->flags); + cdev->submit_kwqes = cnic_submit_bnx2_kwqes; + + cp = cdev->cnic_priv; + cp->ethdev = ethdev; + cdev->pcidev = pdev; + + cp->cnic_ops = &cnic_bnx2_ops; + cp->start_hw = cnic_start_bnx2_hw; + cp->stop_hw = cnic_stop_bnx2_hw; + cp->setup_pgtbl = cnic_setup_page_tbl; + cp->alloc_resc = cnic_alloc_bnx2_resc; + cp->free_resc = cnic_free_resc; + cp->start_cm = cnic_cm_init_bnx2_hw; + cp->stop_cm = cnic_cm_stop_bnx2_hw; + cp->enable_int = cnic_enable_bnx2_int; + cp->disable_int_sync = cnic_disable_bnx2_int_sync; + cp->close_conn = cnic_close_bnx2_conn; + cp->next_idx = cnic_bnx2_next_idx; + cp->hw_idx = cnic_bnx2_hw_idx; + return cdev; + +cnic_err: + dev_put(dev); + return NULL; +} + +static struct cnic_dev *is_cnic_dev(struct net_device *dev) +{ + struct ethtool_drvinfo drvinfo; + struct cnic_dev *cdev = NULL; + + if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) { + memset(&drvinfo, 0, sizeof(drvinfo)); + dev->ethtool_ops->get_drvinfo(dev, &drvinfo); + + if (!strcmp(drvinfo.driver, "bnx2")) + cdev = init_bnx2_cnic(dev); + if (cdev) { + write_lock(&cnic_dev_lock); + list_add(&cdev->list, &cnic_dev_list); + write_unlock(&cnic_dev_lock); + } + } + return cdev; +} + +/** + * netdev event handler + */ +static int cnic_netdev_event(struct notifier_block *this, unsigned long event, + void *ptr) +{ + struct net_device *netdev = ptr; + struct cnic_dev *dev; + int if_type; + int new_dev = 0; + + dev = cnic_from_netdev(netdev); + + if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) { + /* Check for the hot-plug device */ + dev = is_cnic_dev(netdev); + if (dev) { + new_dev = 1; + cnic_hold(dev); + } + } + if (dev) { + struct cnic_local *cp = dev->cnic_priv; + + if (new_dev) + cnic_ulp_init(dev); + else if (event == NETDEV_UNREGISTER) + cnic_ulp_exit(dev); + else if (event == NETDEV_UP) { + mutex_lock(&cnic_lock); + if (!cnic_start_hw(dev)) + cnic_ulp_start(dev); + mutex_unlock(&cnic_lock); + } + + rcu_read_lock(); + for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { + struct cnic_ulp_ops *ulp_ops; + void *ctx; + + ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); + if (!ulp_ops || !ulp_ops->indicate_netevent) + continue; + + ctx = cp->ulp_handle[if_type]; + + ulp_ops->indicate_netevent(ctx, event); + } + rcu_read_unlock(); + + if (event == NETDEV_GOING_DOWN) { + mutex_lock(&cnic_lock); + cnic_ulp_stop(dev); + cnic_stop_hw(dev); + mutex_unlock(&cnic_lock); + } else if (event == NETDEV_UNREGISTER) { + write_lock(&cnic_dev_lock); + list_del_init(&dev->list); + write_unlock(&cnic_dev_lock); + + cnic_put(dev); + cnic_free_dev(dev); + goto done; + } + cnic_put(dev); + } +done: + return NOTIFY_DONE; +} + +static struct notifier_block cnic_netdev_notifier = { + .notifier_call = cnic_netdev_event +}; + +static void cnic_release(void) +{ + struct cnic_dev *dev; + + while (!list_empty(&cnic_dev_list)) { + dev = list_entry(cnic_dev_list.next, struct cnic_dev, list); + if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { + cnic_ulp_stop(dev); + cnic_stop_hw(dev); + } + + cnic_ulp_exit(dev); + list_del_init(&dev->list); + cnic_free_dev(dev); + } +} + +static int __init cnic_init(void) +{ + int rc = 0; + + printk(KERN_INFO "%s", version); + + rc = register_netdevice_notifier(&cnic_netdev_notifier); + if (rc) { + cnic_release(); + return rc; + } + + return 0; +} + +static void __exit cnic_exit(void) +{ + unregister_netdevice_notifier(&cnic_netdev_notifier); + cnic_release(); + return; +} + +module_init(cnic_init); +module_exit(cnic_exit); diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h new file mode 100644 index 000000000000..5192d4a9df5a --- /dev/null +++ b/drivers/net/cnic.h @@ -0,0 +1,299 @@ +/* cnic.h: Broadcom CNIC core network driver. + * + * Copyright (c) 2006-2009 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + */ + + +#ifndef CNIC_H +#define CNIC_H + +#define KWQ_PAGE_CNT 4 +#define KCQ_PAGE_CNT 16 + +#define KWQ_CID 24 +#define KCQ_CID 25 + +/* + * krnlq_context definition + */ +#define L5_KRNLQ_FLAGS 0x00000000 +#define L5_KRNLQ_SIZE 0x00000000 +#define L5_KRNLQ_TYPE 0x00000000 +#define KRNLQ_FLAGS_PG_SZ (0xf<<0) +#define KRNLQ_FLAGS_PG_SZ_256 (0<<0) +#define KRNLQ_FLAGS_PG_SZ_512 (1<<0) +#define KRNLQ_FLAGS_PG_SZ_1K (2<<0) +#define KRNLQ_FLAGS_PG_SZ_2K (3<<0) +#define KRNLQ_FLAGS_PG_SZ_4K (4<<0) +#define KRNLQ_FLAGS_PG_SZ_8K (5<<0) +#define KRNLQ_FLAGS_PG_SZ_16K (6<<0) +#define KRNLQ_FLAGS_PG_SZ_32K (7<<0) +#define KRNLQ_FLAGS_PG_SZ_64K (8<<0) +#define KRNLQ_FLAGS_PG_SZ_128K (9<<0) +#define KRNLQ_FLAGS_PG_SZ_256K (10<<0) +#define KRNLQ_FLAGS_PG_SZ_512K (11<<0) +#define KRNLQ_FLAGS_PG_SZ_1M (12<<0) +#define KRNLQ_FLAGS_PG_SZ_2M (13<<0) +#define KRNLQ_FLAGS_QE_SELF_SEQ (1<<15) +#define KRNLQ_SIZE_TYPE_SIZE ((((0x28 + 0x1f) & ~0x1f) / 0x20) << 16) +#define KRNLQ_TYPE_TYPE (0xf<<28) +#define KRNLQ_TYPE_TYPE_EMPTY (0<<28) +#define KRNLQ_TYPE_TYPE_KRNLQ (6<<28) + +#define L5_KRNLQ_HOST_QIDX 0x00000004 +#define L5_KRNLQ_HOST_FW_QIDX 0x00000008 +#define L5_KRNLQ_NX_QE_SELF_SEQ 0x0000000c +#define L5_KRNLQ_QE_SELF_SEQ_MAX 0x0000000c +#define L5_KRNLQ_NX_QE_HADDR_HI 0x00000010 +#define L5_KRNLQ_NX_QE_HADDR_LO 0x00000014 +#define L5_KRNLQ_PGTBL_PGIDX 0x00000018 +#define L5_KRNLQ_NX_PG_QIDX 0x00000018 +#define L5_KRNLQ_PGTBL_NPAGES 0x0000001c +#define L5_KRNLQ_QIDX_INCR 0x0000001c +#define L5_KRNLQ_PGTBL_HADDR_HI 0x00000020 +#define L5_KRNLQ_PGTBL_HADDR_LO 0x00000024 + +#define BNX2_PG_CTX_MAP 0x1a0034 +#define BNX2_ISCSI_CTX_MAP 0x1a0074 + +struct cnic_redirect_entry { + struct dst_entry *old_dst; + struct dst_entry *new_dst; +}; + +#define MAX_COMPLETED_KCQE 64 + +#define MAX_CNIC_L5_CONTEXT 256 + +#define MAX_CM_SK_TBL_SZ MAX_CNIC_L5_CONTEXT + +#define MAX_ISCSI_TBL_SZ 256 + +#define CNIC_LOCAL_PORT_MIN 60000 +#define CNIC_LOCAL_PORT_MAX 61000 +#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN) + +#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe)) +#define KCQE_CNT (BCM_PAGE_SIZE / sizeof(struct kcqe)) +#define MAX_KWQE_CNT (KWQE_CNT - 1) +#define MAX_KCQE_CNT (KCQE_CNT - 1) + +#define MAX_KWQ_IDX ((KWQ_PAGE_CNT * KWQE_CNT) - 1) +#define MAX_KCQ_IDX ((KCQ_PAGE_CNT * KCQE_CNT) - 1) + +#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BCM_PAGE_BITS - 5)) +#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT) + +#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BCM_PAGE_BITS - 5)) +#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT) + +#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) == \ + (MAX_KCQE_CNT - 1)) ? \ + (x) + 2 : (x) + 1 + +#define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp) +#define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp) +#define BNX2X_KWQ_DATA(cp, x) \ + &(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)] + +#define DEF_IPID_COUNT 0xc001 + +#define DEF_KA_TIMEOUT 10000 +#define DEF_KA_INTERVAL 300000 +#define DEF_KA_MAX_PROBE_COUNT 3 +#define DEF_TOS 0 +#define DEF_TTL 0xfe +#define DEF_SND_SEQ_SCALE 0 +#define DEF_RCV_BUF 0xffff +#define DEF_SND_BUF 0xffff +#define DEF_SEED 0 +#define DEF_MAX_RT_TIME 500 +#define DEF_MAX_DA_COUNT 2 +#define DEF_SWS_TIMER 1000 +#define DEF_MAX_CWND 0xffff + +struct cnic_ctx { + u32 cid; + void *ctx; + dma_addr_t mapping; +}; + +#define BNX2_MAX_CID 0x2000 + +struct cnic_dma { + int num_pages; + void **pg_arr; + dma_addr_t *pg_map_arr; + int pgtbl_size; + u32 *pgtbl; + dma_addr_t pgtbl_map; +}; + +struct cnic_id_tbl { + spinlock_t lock; + u32 start; + u32 max; + u32 next; + unsigned long *table; +}; + +#define CNIC_KWQ16_DATA_SIZE 128 + +struct kwqe_16_data { + u8 data[CNIC_KWQ16_DATA_SIZE]; +}; + +struct cnic_iscsi { + struct cnic_dma task_array_info; + struct cnic_dma r2tq_info; + struct cnic_dma hq_info; +}; + +struct cnic_context { + u32 cid; + struct kwqe_16_data *kwqe_data; + dma_addr_t kwqe_data_mapping; + wait_queue_head_t waitq; + int wait_cond; + unsigned long timestamp; + u32 ctx_flags; +#define CTX_FL_OFFLD_START 0x00000001 + u8 ulp_proto_id; + union { + struct cnic_iscsi *iscsi; + } proto; +}; + +struct cnic_local { + + spinlock_t cnic_ulp_lock; + void *ulp_handle[MAX_CNIC_ULP_TYPE]; + unsigned long ulp_flags[MAX_CNIC_ULP_TYPE]; +#define ULP_F_INIT 0 +#define ULP_F_START 1 + struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE]; + + /* protected by ulp_lock */ + u32 cnic_local_flags; +#define CNIC_LCL_FL_KWQ_INIT 0x00000001 + + struct cnic_dev *dev; + + struct cnic_eth_dev *ethdev; + + void *l2_ring; + dma_addr_t l2_ring_map; + int l2_ring_size; + int l2_rx_ring_size; + + void *l2_buf; + dma_addr_t l2_buf_map; + int l2_buf_size; + int l2_single_buf_size; + + u16 *rx_cons_ptr; + u16 *tx_cons_ptr; + u16 rx_cons; + u16 tx_cons; + + u32 kwq_cid_addr; + u32 kcq_cid_addr; + + struct cnic_dma kwq_info; + struct kwqe **kwq; + + struct cnic_dma kwq_16_data_info; + + u16 max_kwq_idx; + + u16 kwq_prod_idx; + u32 kwq_io_addr; + + u16 *kwq_con_idx_ptr; + u16 kwq_con_idx; + + struct cnic_dma kcq_info; + struct kcqe **kcq; + + u16 kcq_prod_idx; + u32 kcq_io_addr; + + void *status_blk; + struct status_block_msix *bnx2_status_blk; + struct host_status_block *bnx2x_status_blk; + + u32 status_blk_num; + u32 int_num; + u32 last_status_idx; + struct tasklet_struct cnic_irq_task; + + struct kcqe *completed_kcq[MAX_COMPLETED_KCQE]; + + struct cnic_sock *csk_tbl; + struct cnic_id_tbl csk_port_tbl; + + struct cnic_dma conn_buf_info; + struct cnic_dma gbl_buf_info; + + struct cnic_iscsi *iscsi_tbl; + struct cnic_context *ctx_tbl; + struct cnic_id_tbl cid_tbl; + int max_iscsi_conn; + atomic_t iscsi_conn; + + /* per connection parameters */ + int num_iscsi_tasks; + int num_ccells; + int task_array_size; + int r2tq_size; + int hq_size; + int num_cqs; + + struct cnic_ctx *ctx_arr; + int ctx_blks; + int ctx_blk_size; + int cids_per_blk; + + u32 chip_id; + int func; + u32 shmem_base; + + u32 uio_dev; + struct uio_info *cnic_uinfo; + + struct cnic_ops *cnic_ops; + int (*start_hw)(struct cnic_dev *); + void (*stop_hw)(struct cnic_dev *); + void (*setup_pgtbl)(struct cnic_dev *, + struct cnic_dma *); + int (*alloc_resc)(struct cnic_dev *); + void (*free_resc)(struct cnic_dev *); + int (*start_cm)(struct cnic_dev *); + void (*stop_cm)(struct cnic_dev *); + void (*enable_int)(struct cnic_dev *); + void (*disable_int_sync)(struct cnic_dev *); + void (*ack_int)(struct cnic_dev *); + void (*close_conn)(struct cnic_sock *, u32 opcode); + u16 (*next_idx)(u16); + u16 (*hw_idx)(u16); +}; + +struct bnx2x_bd_chain_next { + u32 addr_lo; + u32 addr_hi; + u8 reserved[8]; +}; + +#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN (ISCSI_KCQE_OPCODE_UPDATE_CONN) +#define ISCSI_RAMROD_CMD_ID_INIT (ISCSI_KCQE_OPCODE_INIT) + +#define CDU_REGION_NUMBER_XCM_AG 2 +#define CDU_REGION_NUMBER_UCM_AG 4 + +#endif + diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h new file mode 100644 index 000000000000..cee80f694457 --- /dev/null +++ b/drivers/net/cnic_defs.h @@ -0,0 +1,580 @@ + +/* cnic.c: Broadcom CNIC core network driver. + * + * Copyright (c) 2006-2009 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + */ + +#ifndef CNIC_DEFS_H +#define CNIC_DEFS_H + +/* KWQ (kernel work queue) request op codes */ +#define L2_KWQE_OPCODE_VALUE_FLUSH (4) + +#define L4_KWQE_OPCODE_VALUE_CONNECT1 (50) +#define L4_KWQE_OPCODE_VALUE_CONNECT2 (51) +#define L4_KWQE_OPCODE_VALUE_CONNECT3 (52) +#define L4_KWQE_OPCODE_VALUE_RESET (53) +#define L4_KWQE_OPCODE_VALUE_CLOSE (54) +#define L4_KWQE_OPCODE_VALUE_UPDATE_SECRET (60) +#define L4_KWQE_OPCODE_VALUE_INIT_ULP (61) + +#define L4_KWQE_OPCODE_VALUE_OFFLOAD_PG (1) +#define L4_KWQE_OPCODE_VALUE_UPDATE_PG (9) +#define L4_KWQE_OPCODE_VALUE_UPLOAD_PG (14) + +#define L5CM_RAMROD_CMD_ID_BASE (0x80) +#define L5CM_RAMROD_CMD_ID_TCP_CONNECT (L5CM_RAMROD_CMD_ID_BASE + 3) +#define L5CM_RAMROD_CMD_ID_CLOSE (L5CM_RAMROD_CMD_ID_BASE + 12) +#define L5CM_RAMROD_CMD_ID_ABORT (L5CM_RAMROD_CMD_ID_BASE + 13) +#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE (L5CM_RAMROD_CMD_ID_BASE + 14) +#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD (L5CM_RAMROD_CMD_ID_BASE + 15) + +/* KCQ (kernel completion queue) response op codes */ +#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP (53) +#define L4_KCQE_OPCODE_VALUE_RESET_COMP (54) +#define L4_KCQE_OPCODE_VALUE_FW_TCP_UPDATE (55) +#define L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE (56) +#define L4_KCQE_OPCODE_VALUE_RESET_RECEIVED (57) +#define L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED (58) +#define L4_KCQE_OPCODE_VALUE_INIT_ULP (61) + +#define L4_KCQE_OPCODE_VALUE_OFFLOAD_PG (1) +#define L4_KCQE_OPCODE_VALUE_UPDATE_PG (9) +#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG (14) + +/* KCQ (kernel completion queue) completion status */ +#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0) +#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93) + +#define L4_LAYER_CODE (4) +#define L2_LAYER_CODE (2) + +/* + * L4 KCQ CQE + */ +struct l4_kcq { + u32 cid; + u32 pg_cid; + u32 conn_id; + u32 pg_host_opaque; +#if defined(__BIG_ENDIAN) + u16 status; + u16 reserved1; +#elif defined(__LITTLE_ENDIAN) + u16 reserved1; + u16 status; +#endif + u32 reserved2[2]; +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KCQ_RESERVED3 (0x7<<0) +#define L4_KCQ_RESERVED3_SHIFT 0 +#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */ +#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3 +#define L4_KCQ_LAYER_CODE (0x7<<4) +#define L4_KCQ_LAYER_CODE_SHIFT 4 +#define L4_KCQ_RESERVED4 (0x1<<7) +#define L4_KCQ_RESERVED4_SHIFT 7 + u8 op_code; + u16 qe_self_seq; +#elif defined(__LITTLE_ENDIAN) + u16 qe_self_seq; + u8 op_code; + u8 flags; +#define L4_KCQ_RESERVED3 (0xF<<0) +#define L4_KCQ_RESERVED3_SHIFT 0 +#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */ +#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3 +#define L4_KCQ_LAYER_CODE (0x7<<4) +#define L4_KCQ_LAYER_CODE_SHIFT 4 +#define L4_KCQ_RESERVED4 (0x1<<7) +#define L4_KCQ_RESERVED4_SHIFT 7 +#endif +}; + + +/* + * L4 KCQ CQE PG upload + */ +struct l4_kcq_upload_pg { + u32 pg_cid; +#if defined(__BIG_ENDIAN) + u16 pg_status; + u16 pg_ipid_count; +#elif defined(__LITTLE_ENDIAN) + u16 pg_ipid_count; + u16 pg_status; +#endif + u32 reserved1[5]; +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0) +#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0 +#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4) +#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4 +#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7) +#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7 + u8 op_code; + u16 qe_self_seq; +#elif defined(__LITTLE_ENDIAN) + u16 qe_self_seq; + u8 op_code; + u8 flags; +#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0) +#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0 +#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4) +#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4 +#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7) +#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7 +#endif +}; + + +/* + * Gracefully close the connection request + */ +struct l4_kwq_close_req { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0) +#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0 +#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4) +#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_code; + u8 flags; +#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0) +#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0 +#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4) +#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 cid; + u32 reserved2[6]; +}; + + +/* + * The first request to be passed in order to establish connection in option2 + */ +struct l4_kwq_connect_req1 { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0) +#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0 +#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4) +#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u8 reserved0; + u8 conn_flags; +#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0) +#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0 +#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1) +#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1 +#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2) +#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2 +#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3) +#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3 +#elif defined(__LITTLE_ENDIAN) + u8 conn_flags; +#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0) +#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0 +#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1) +#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1 +#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2) +#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2 +#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3) +#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3 + u8 reserved0; + u8 op_code; + u8 flags; +#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0) +#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0 +#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4) +#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 cid; + u32 pg_cid; + u32 src_ip; + u32 dst_ip; +#if defined(__BIG_ENDIAN) + u16 dst_port; + u16 src_port; +#elif defined(__LITTLE_ENDIAN) + u16 src_port; + u16 dst_port; +#endif +#if defined(__BIG_ENDIAN) + u8 rsrv1[3]; + u8 tcp_flags; +#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0) +#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0 +#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1) +#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1 +#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2) +#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2 +#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3) +#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3 +#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4) +#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4 +#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5) +#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5 +#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6) +#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6 +#elif defined(__LITTLE_ENDIAN) + u8 tcp_flags; +#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0) +#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0 +#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1) +#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1 +#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2) +#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2 +#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3) +#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3 +#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4) +#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4 +#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5) +#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5 +#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6) +#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6 + u8 rsrv1[3]; +#endif + u32 rsrv2; +}; + + +/* + * The second ( optional )request to be passed in order to establish + * connection in option2 - for IPv6 only + */ +struct l4_kwq_connect_req2 { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0) +#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0 +#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4) +#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u8 reserved0; + u8 rsrv; +#elif defined(__LITTLE_ENDIAN) + u8 rsrv; + u8 reserved0; + u8 op_code; + u8 flags; +#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0) +#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0 +#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4) +#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 reserved2; + u32 src_ip_v6_2; + u32 src_ip_v6_3; + u32 src_ip_v6_4; + u32 dst_ip_v6_2; + u32 dst_ip_v6_3; + u32 dst_ip_v6_4; +}; + + +/* + * The third ( and last )request to be passed in order to establish + * connection in option2 + */ +struct l4_kwq_connect_req3 { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0) +#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0 +#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4) +#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_code; + u8 flags; +#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0) +#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0 +#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4) +#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 ka_timeout; + u32 ka_interval ; +#if defined(__BIG_ENDIAN) + u8 snd_seq_scale; + u8 ttl; + u8 tos; + u8 ka_max_probe_count; +#elif defined(__LITTLE_ENDIAN) + u8 ka_max_probe_count; + u8 tos; + u8 ttl; + u8 snd_seq_scale; +#endif +#if defined(__BIG_ENDIAN) + u16 pmtu; + u16 mss; +#elif defined(__LITTLE_ENDIAN) + u16 mss; + u16 pmtu; +#endif + u32 rcv_buf; + u32 snd_buf; + u32 seed; +}; + + +/* + * a KWQE request to offload a PG connection + */ +struct l4_kwq_offload_pg { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0) +#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0 +#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4) +#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4 +#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_code; + u8 flags; +#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0) +#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0 +#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4) +#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4 +#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7 +#endif +#if defined(__BIG_ENDIAN) + u8 l2hdr_nbytes; + u8 pg_flags; +#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0) +#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0 +#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1) +#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1 +#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2) +#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2 + u8 da0; + u8 da1; +#elif defined(__LITTLE_ENDIAN) + u8 da1; + u8 da0; + u8 pg_flags; +#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0) +#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0 +#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1) +#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1 +#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2) +#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2 + u8 l2hdr_nbytes; +#endif +#if defined(__BIG_ENDIAN) + u8 da2; + u8 da3; + u8 da4; + u8 da5; +#elif defined(__LITTLE_ENDIAN) + u8 da5; + u8 da4; + u8 da3; + u8 da2; +#endif +#if defined(__BIG_ENDIAN) + u8 sa0; + u8 sa1; + u8 sa2; + u8 sa3; +#elif defined(__LITTLE_ENDIAN) + u8 sa3; + u8 sa2; + u8 sa1; + u8 sa0; +#endif +#if defined(__BIG_ENDIAN) + u8 sa4; + u8 sa5; + u16 etype; +#elif defined(__LITTLE_ENDIAN) + u16 etype; + u8 sa5; + u8 sa4; +#endif +#if defined(__BIG_ENDIAN) + u16 vlan_tag; + u16 ipid_start; +#elif defined(__LITTLE_ENDIAN) + u16 ipid_start; + u16 vlan_tag; +#endif +#if defined(__BIG_ENDIAN) + u16 ipid_count; + u16 reserved3; +#elif defined(__LITTLE_ENDIAN) + u16 reserved3; + u16 ipid_count; +#endif + u32 host_opaque; +}; + + +/* + * Abortively close the connection request + */ +struct l4_kwq_reset_req { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0) +#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0 +#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4) +#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4 +#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_code; + u8 flags; +#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0) +#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0 +#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4) +#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4 +#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 cid; + u32 reserved2[6]; +}; + + +/* + * a KWQE request to update a PG connection + */ +struct l4_kwq_update_pg { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0) +#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0 +#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4) +#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4 +#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7 + u8 opcode; + u16 oper16; +#elif defined(__LITTLE_ENDIAN) + u16 oper16; + u8 opcode; + u8 flags; +#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0) +#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0 +#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4) +#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4 +#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 pg_cid; + u32 pg_host_opaque; +#if defined(__BIG_ENDIAN) + u8 pg_valids; +#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0) +#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0 +#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1) +#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1 +#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2) +#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2 + u8 pg_unused_a; + u16 pg_ipid_count; +#elif defined(__LITTLE_ENDIAN) + u16 pg_ipid_count; + u8 pg_unused_a; + u8 pg_valids; +#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0) +#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0 +#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1) +#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1 +#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2) +#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2 +#endif +#if defined(__BIG_ENDIAN) + u16 reserverd3; + u8 da0; + u8 da1; +#elif defined(__LITTLE_ENDIAN) + u8 da1; + u8 da0; + u16 reserverd3; +#endif +#if defined(__BIG_ENDIAN) + u8 da2; + u8 da3; + u8 da4; + u8 da5; +#elif defined(__LITTLE_ENDIAN) + u8 da5; + u8 da4; + u8 da3; + u8 da2; +#endif + u32 reserved4; + u32 reserved5; +}; + + +/* + * a KWQE request to upload a PG or L4 context + */ +struct l4_kwq_upload { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0) +#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0 +#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4) +#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4 +#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7 + u8 opcode; + u16 oper16; +#elif defined(__LITTLE_ENDIAN) + u16 oper16; + u8 opcode; + u8 flags; +#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0) +#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0 +#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4) +#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4 +#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 cid; + u32 reserved2[6]; +}; + +#endif /* CNIC_DEFS_H */ diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h new file mode 100644 index 000000000000..06380963a34e --- /dev/null +++ b/drivers/net/cnic_if.h @@ -0,0 +1,299 @@ +/* cnic_if.h: Broadcom CNIC core network driver. + * + * Copyright (c) 2006 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + */ + + +#ifndef CNIC_IF_H +#define CNIC_IF_H + +#define CNIC_MODULE_VERSION "2.0.0" +#define CNIC_MODULE_RELDATE "May 21, 2009" + +#define CNIC_ULP_RDMA 0 +#define CNIC_ULP_ISCSI 1 +#define CNIC_ULP_L4 2 +#define MAX_CNIC_ULP_TYPE_EXT 2 +#define MAX_CNIC_ULP_TYPE 3 + +struct kwqe { + u32 kwqe_op_flag; + +#define KWQE_OPCODE_MASK 0x00ff0000 +#define KWQE_OPCODE_SHIFT 16 +#define KWQE_FLAGS_LAYER_SHIFT 28 +#define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT) + + u32 kwqe_info0; + u32 kwqe_info1; + u32 kwqe_info2; + u32 kwqe_info3; + u32 kwqe_info4; + u32 kwqe_info5; + u32 kwqe_info6; +}; + +struct kwqe_16 { + u32 kwqe_info0; + u32 kwqe_info1; + u32 kwqe_info2; + u32 kwqe_info3; +}; + +struct kcqe { + u32 kcqe_info0; + u32 kcqe_info1; + u32 kcqe_info2; + u32 kcqe_info3; + u32 kcqe_info4; + u32 kcqe_info5; + u32 kcqe_info6; + u32 kcqe_op_flag; + #define KCQE_RAMROD_COMPLETION (0x1<<27) /* Everest */ + #define KCQE_FLAGS_LAYER_MASK (0x7<<28) + #define KCQE_FLAGS_LAYER_MASK_MISC (0<<28) + #define KCQE_FLAGS_LAYER_MASK_L2 (2<<28) + #define KCQE_FLAGS_LAYER_MASK_L3 (3<<28) + #define KCQE_FLAGS_LAYER_MASK_L4 (4<<28) + #define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28) + #define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28) + #define KCQE_FLAGS_NEXT (1<<31) + #define KCQE_FLAGS_OPCODE_MASK (0xff<<16) + #define KCQE_FLAGS_OPCODE_SHIFT (16) + #define KCQE_OPCODE(op) \ + (((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT) +}; + +#define MAX_CNIC_CTL_DATA 64 +#define MAX_DRV_CTL_DATA 64 + +#define CNIC_CTL_STOP_CMD 1 +#define CNIC_CTL_START_CMD 2 +#define CNIC_CTL_COMPLETION_CMD 3 + +#define DRV_CTL_IO_WR_CMD 0x101 +#define DRV_CTL_IO_RD_CMD 0x102 +#define DRV_CTL_CTX_WR_CMD 0x103 +#define DRV_CTL_CTXTBL_WR_CMD 0x104 +#define DRV_CTL_COMPLETION_CMD 0x105 + +struct cnic_ctl_completion { + u32 cid; +}; + +struct drv_ctl_completion { + u32 comp_count; +}; + +struct cnic_ctl_info { + int cmd; + union { + struct cnic_ctl_completion comp; + char bytes[MAX_CNIC_CTL_DATA]; + } data; +}; + +struct drv_ctl_io { + u32 cid_addr; + u32 offset; + u32 data; + dma_addr_t dma_addr; +}; + +struct drv_ctl_info { + int cmd; + union { + struct drv_ctl_completion comp; + struct drv_ctl_io io; + char bytes[MAX_DRV_CTL_DATA]; + } data; +}; + +struct cnic_ops { + struct module *cnic_owner; + /* Calls to these functions are protected by RCU. When + * unregistering, we wait for any calls to complete before + * continuing. + */ + int (*cnic_handler)(void *, void *); + int (*cnic_ctl)(void *, struct cnic_ctl_info *); +}; + +#define MAX_CNIC_VEC 8 + +struct cnic_irq { + unsigned int vector; + void *status_blk; + u32 status_blk_num; + u32 irq_flags; +#define CNIC_IRQ_FL_MSIX 0x00000001 +}; + +struct cnic_eth_dev { + struct module *drv_owner; + u32 drv_state; +#define CNIC_DRV_STATE_REGD 0x00000001 +#define CNIC_DRV_STATE_USING_MSIX 0x00000002 + u32 chip_id; + u32 max_kwqe_pending; + struct pci_dev *pdev; + void __iomem *io_base; + + u32 ctx_tbl_offset; + u32 ctx_tbl_len; + int ctx_blk_size; + u32 starting_cid; + u32 max_iscsi_conn; + u32 max_fcoe_conn; + u32 max_rdma_conn; + u32 reserved0[2]; + + int num_irq; + struct cnic_irq irq_arr[MAX_CNIC_VEC]; + int (*drv_register_cnic)(struct net_device *, + struct cnic_ops *, void *); + int (*drv_unregister_cnic)(struct net_device *); + int (*drv_submit_kwqes_32)(struct net_device *, + struct kwqe *[], u32); + int (*drv_submit_kwqes_16)(struct net_device *, + struct kwqe_16 *[], u32); + int (*drv_ctl)(struct net_device *, struct drv_ctl_info *); + unsigned long reserved1[2]; +}; + +struct cnic_sockaddr { + union { + struct sockaddr_in v4; + struct sockaddr_in6 v6; + } local; + union { + struct sockaddr_in v4; + struct sockaddr_in6 v6; + } remote; +}; + +struct cnic_sock { + struct cnic_dev *dev; + void *context; + u32 src_ip[4]; + u32 dst_ip[4]; + u16 src_port; + u16 dst_port; + u16 vlan_id; + unsigned char old_ha[6]; + unsigned char ha[6]; + u32 mtu; + u32 cid; + u32 l5_cid; + u32 pg_cid; + int ulp_type; + + u32 ka_timeout; + u32 ka_interval; + u8 ka_max_probe_count; + u8 tos; + u8 ttl; + u8 snd_seq_scale; + u32 rcv_buf; + u32 snd_buf; + u32 seed; + + unsigned long tcp_flags; +#define SK_TCP_NO_DELAY_ACK 0x1 +#define SK_TCP_KEEP_ALIVE 0x2 +#define SK_TCP_NAGLE 0x4 +#define SK_TCP_TIMESTAMP 0x8 +#define SK_TCP_SACK 0x10 +#define SK_TCP_SEG_SCALING 0x20 + unsigned long flags; +#define SK_F_INUSE 0 +#define SK_F_OFFLD_COMPLETE 1 +#define SK_F_OFFLD_SCHED 2 +#define SK_F_PG_OFFLD_COMPLETE 3 +#define SK_F_CONNECT_START 4 +#define SK_F_IPV6 5 +#define SK_F_CLOSING 7 + + atomic_t ref_count; + u32 state; + struct kwqe kwqe1; + struct kwqe kwqe2; + struct kwqe kwqe3; +}; + +struct cnic_dev { + struct net_device *netdev; + struct pci_dev *pcidev; + void __iomem *regview; + struct list_head list; + + int (*register_device)(struct cnic_dev *dev, int ulp_type, + void *ulp_ctx); + int (*unregister_device)(struct cnic_dev *dev, int ulp_type); + int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num_wqes); + int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[], + u32 num_wqes); + + int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **, + void *); + int (*cm_destroy)(struct cnic_sock *); + int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *); + int (*cm_abort)(struct cnic_sock *); + int (*cm_close)(struct cnic_sock *); + struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type); + int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type, + char *data, u16 data_size); + unsigned long flags; +#define CNIC_F_CNIC_UP 1 +#define CNIC_F_BNX2_CLASS 3 +#define CNIC_F_BNX2X_CLASS 4 + atomic_t ref_count; + u8 mac_addr[6]; + + int max_iscsi_conn; + int max_fcoe_conn; + int max_rdma_conn; + + void *cnic_priv; +}; + +#define CNIC_WR(dev, off, val) writel(val, dev->regview + off) +#define CNIC_WR16(dev, off, val) writew(val, dev->regview + off) +#define CNIC_WR8(dev, off, val) writeb(val, dev->regview + off) +#define CNIC_RD(dev, off) readl(dev->regview + off) +#define CNIC_RD16(dev, off) readw(dev->regview + off) + +struct cnic_ulp_ops { + /* Calls to these functions are protected by RCU. When + * unregistering, we wait for any calls to complete before + * continuing. + */ + + void (*cnic_init)(struct cnic_dev *dev); + void (*cnic_exit)(struct cnic_dev *dev); + void (*cnic_start)(void *ulp_ctx); + void (*cnic_stop)(void *ulp_ctx); + void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[], + u32 num_cqes); + void (*indicate_netevent)(void *ulp_ctx, unsigned long event); + void (*cm_connect_complete)(struct cnic_sock *); + void (*cm_close_complete)(struct cnic_sock *); + void (*cm_abort_complete)(struct cnic_sock *); + void (*cm_remote_close)(struct cnic_sock *); + void (*cm_remote_abort)(struct cnic_sock *); + void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type, + char *data, u16 data_size); + struct module *owner; +}; + +extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops); + +extern int cnic_unregister_driver(int ulp_type); + +#endif -- cgit v1.2.3 From cf4e6363859d30f24f8cd3e8930dbff399cc3550 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 8 Jun 2009 18:14:44 -0700 Subject: [SCSI] bnx2i: Add bnx2i iSCSI driver. New iSCSI driver for Broadcom BNX2 devices. The driver interfaces with the CNIC driver to access the hardware. Signed-off-by: Anil Veerabhadrappa Signed-off-by: Michael Chan Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/Kconfig | 1 + drivers/scsi/Makefile | 1 + drivers/scsi/bnx2i/57xx_iscsi_constants.h | 155 ++ drivers/scsi/bnx2i/57xx_iscsi_hsi.h | 1509 ++++++++++++++++++ drivers/scsi/bnx2i/Kconfig | 7 + drivers/scsi/bnx2i/Makefile | 3 + drivers/scsi/bnx2i/bnx2i.h | 771 +++++++++ drivers/scsi/bnx2i/bnx2i_hwi.c | 2405 +++++++++++++++++++++++++++++ drivers/scsi/bnx2i/bnx2i_init.c | 438 ++++++ drivers/scsi/bnx2i/bnx2i_iscsi.c | 2064 +++++++++++++++++++++++++ drivers/scsi/bnx2i/bnx2i_sysfs.c | 142 ++ 11 files changed, 7496 insertions(+) create mode 100644 drivers/scsi/bnx2i/57xx_iscsi_constants.h create mode 100644 drivers/scsi/bnx2i/57xx_iscsi_hsi.h create mode 100644 drivers/scsi/bnx2i/Kconfig create mode 100644 drivers/scsi/bnx2i/Makefile create mode 100644 drivers/scsi/bnx2i/bnx2i.h create mode 100644 drivers/scsi/bnx2i/bnx2i_hwi.c create mode 100644 drivers/scsi/bnx2i/bnx2i_init.c create mode 100644 drivers/scsi/bnx2i/bnx2i_iscsi.c create mode 100644 drivers/scsi/bnx2i/bnx2i_sysfs.c (limited to 'drivers') diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 759e1507e63c..6a19ed9a1194 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -354,6 +354,7 @@ config ISCSI_TCP http://open-iscsi.org source "drivers/scsi/cxgb3i/Kconfig" +source "drivers/scsi/bnx2i/Kconfig" config SGIWD93_SCSI tristate "SGI WD93C93 SCSI Driver" diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 8795c309963e..25429ea63d0a 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -129,6 +129,7 @@ obj-$(CONFIG_SCSI_STEX) += stex.o obj-$(CONFIG_SCSI_MVSAS) += mvsas/ obj-$(CONFIG_PS3_ROM) += ps3rom.o obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/ +obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ obj-$(CONFIG_ARM) += arm/ diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h new file mode 100644 index 000000000000..2fceb19eb27b --- /dev/null +++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h @@ -0,0 +1,155 @@ +/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI + * + * Copyright (c) 2006 - 2009 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) + */ +#ifndef __57XX_ISCSI_CONSTANTS_H_ +#define __57XX_ISCSI_CONSTANTS_H_ + +/** +* This file defines HSI constants for the iSCSI flows +*/ + +/* iSCSI request op codes */ +#define ISCSI_OPCODE_CLEANUP_REQUEST (7) + +/* iSCSI response/messages op codes */ +#define ISCSI_OPCODE_CLEANUP_RESPONSE (0x27) +#define ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION (0) + +/* iSCSI task types */ +#define ISCSI_TASK_TYPE_READ (0) +#define ISCSI_TASK_TYPE_WRITE (1) +#define ISCSI_TASK_TYPE_MPATH (2) + +/* initial CQ sequence numbers */ +#define ISCSI_INITIAL_SN (1) + +/* KWQ (kernel work queue) layer codes */ +#define ISCSI_KWQE_LAYER_CODE (6) + +/* KWQ (kernel work queue) request op codes */ +#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN1 (0) +#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN2 (1) +#define ISCSI_KWQE_OPCODE_UPDATE_CONN (2) +#define ISCSI_KWQE_OPCODE_DESTROY_CONN (3) +#define ISCSI_KWQE_OPCODE_INIT1 (4) +#define ISCSI_KWQE_OPCODE_INIT2 (5) + +/* KCQ (kernel completion queue) response op codes */ +#define ISCSI_KCQE_OPCODE_OFFLOAD_CONN (0x10) +#define ISCSI_KCQE_OPCODE_UPDATE_CONN (0x12) +#define ISCSI_KCQE_OPCODE_DESTROY_CONN (0x13) +#define ISCSI_KCQE_OPCODE_INIT (0x14) +#define ISCSI_KCQE_OPCODE_FW_CLEAN_TASK (0x15) +#define ISCSI_KCQE_OPCODE_TCP_RESET (0x16) +#define ISCSI_KCQE_OPCODE_TCP_SYN (0x17) +#define ISCSI_KCQE_OPCODE_TCP_FIN (0X18) +#define ISCSI_KCQE_OPCODE_TCP_ERROR (0x19) +#define ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20) +#define ISCSI_KCQE_OPCODE_ISCSI_ERROR (0x21) + +/* KCQ (kernel completion queue) completion status */ +#define ISCSI_KCQE_COMPLETION_STATUS_SUCCESS (0x0) +#define ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x1) +#define ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x2) +#define ISCSI_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x3) +#define ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR (0x4) + +#define ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR (0x5) +#define ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR (0x6) + +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_UNEXPECTED_OPCODE (0xa) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE (0xb) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN (0xc) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT (0xd) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN (0xe) + +/* Response */ +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN (0xf) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T (0x10) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_IS_ZERO (0x2c) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG (0x2d) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0 (0x11) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1 (0x12) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2 (0x13) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3 (0x14) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4 (0x15) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5 (0x16) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6 (0x17) + +/* Data-In */ +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN (0x18) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN (0x19) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO (0x1a) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV (0x1b) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN (0x1c) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN (0x1d) + +/* R2T */ +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF (0x1f) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN (0x20) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN (0x21) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 (0x22) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 (0x23) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED (0x24) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV (0x25) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN (0x26) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO (0x27) + +/* TMF */ +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN (0x28) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN (0x29) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN (0x2a) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP (0x2b) + +/* IP/TCP processing errors: */ +#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT (0x40) +#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS (0x41) +#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG (0x42) +#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_MAX_RTRANS (0x43) + +/* iSCSI licensing errors */ +/* general iSCSI license not installed */ +#define ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED (0x50) +/* additional LOM specific iSCSI license not installed */ +#define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED (0x51) + +/* SQ/RQ/CQ DB structure sizes */ +#define ISCSI_SQ_DB_SIZE (16) +#define ISCSI_RQ_DB_SIZE (16) +#define ISCSI_CQ_DB_SIZE (80) + +#define ISCSI_SQN_TO_NOTIFY_NOT_VALID 0xFFFF + +/* Page size codes (for flags field in connection offload request) */ +#define ISCSI_PAGE_SIZE_256 (0) +#define ISCSI_PAGE_SIZE_512 (1) +#define ISCSI_PAGE_SIZE_1K (2) +#define ISCSI_PAGE_SIZE_2K (3) +#define ISCSI_PAGE_SIZE_4K (4) +#define ISCSI_PAGE_SIZE_8K (5) +#define ISCSI_PAGE_SIZE_16K (6) +#define ISCSI_PAGE_SIZE_32K (7) +#define ISCSI_PAGE_SIZE_64K (8) +#define ISCSI_PAGE_SIZE_128K (9) +#define ISCSI_PAGE_SIZE_256K (10) +#define ISCSI_PAGE_SIZE_512K (11) +#define ISCSI_PAGE_SIZE_1M (12) +#define ISCSI_PAGE_SIZE_2M (13) +#define ISCSI_PAGE_SIZE_4M (14) +#define ISCSI_PAGE_SIZE_8M (15) + +/* Iscsi PDU related defines */ +#define ISCSI_HEADER_SIZE (48) +#define ISCSI_DIGEST_SHIFT (2) +#define ISCSI_DIGEST_SIZE (4) + +#define B577XX_ISCSI_CONNECTION_TYPE 3 + +#endif /*__57XX_ISCSI_CONSTANTS_H_ */ diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h new file mode 100644 index 000000000000..36af1afef9b6 --- /dev/null +++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h @@ -0,0 +1,1509 @@ +/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI. + * + * Copyright (c) 2006 - 2009 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) + */ +#ifndef __57XX_ISCSI_HSI_LINUX_LE__ +#define __57XX_ISCSI_HSI_LINUX_LE__ + +/* + * iSCSI Async CQE + */ +struct bnx2i_async_msg { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 reserved1; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 reserved1; + u8 op_code; +#endif + u32 reserved2; + u32 exp_cmd_sn; + u32 max_cmd_sn; + u32 reserved3[2]; +#if defined(__BIG_ENDIAN) + u16 reserved5; + u8 err_code; + u8 reserved4; +#elif defined(__LITTLE_ENDIAN) + u8 reserved4; + u8 err_code; + u16 reserved5; +#endif + u32 reserved6; + u32 lun[2]; +#if defined(__BIG_ENDIAN) + u8 async_event; + u8 async_vcode; + u16 param1; +#elif defined(__LITTLE_ENDIAN) + u16 param1; + u8 async_vcode; + u8 async_event; +#endif +#if defined(__BIG_ENDIAN) + u16 param2; + u16 param3; +#elif defined(__LITTLE_ENDIAN) + u16 param3; + u16 param2; +#endif + u32 reserved7[3]; + u32 cq_req_sn; +}; + + +/* + * iSCSI Buffer Descriptor (BD) + */ +struct iscsi_bd { + u32 buffer_addr_hi; + u32 buffer_addr_lo; +#if defined(__BIG_ENDIAN) + u16 reserved0; + u16 buffer_length; +#elif defined(__LITTLE_ENDIAN) + u16 buffer_length; + u16 reserved0; +#endif +#if defined(__BIG_ENDIAN) + u16 reserved3; + u16 flags; +#define ISCSI_BD_RESERVED1 (0x3F<<0) +#define ISCSI_BD_RESERVED1_SHIFT 0 +#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6) +#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6 +#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7) +#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7 +#define ISCSI_BD_RESERVED2 (0xFF<<8) +#define ISCSI_BD_RESERVED2_SHIFT 8 +#elif defined(__LITTLE_ENDIAN) + u16 flags; +#define ISCSI_BD_RESERVED1 (0x3F<<0) +#define ISCSI_BD_RESERVED1_SHIFT 0 +#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6) +#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6 +#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7) +#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7 +#define ISCSI_BD_RESERVED2 (0xFF<<8) +#define ISCSI_BD_RESERVED2_SHIFT 8 + u16 reserved3; +#endif +}; + + +/* + * iSCSI Cleanup SQ WQE + */ +struct bnx2i_cleanup_request { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 reserved1; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 reserved1; + u8 op_code; +#endif + u32 reserved2[3]; +#if defined(__BIG_ENDIAN) + u16 reserved3; + u16 itt; +#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0 +#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14) +#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0 +#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14) +#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14 + u16 reserved3; +#endif + u32 reserved4[10]; +#if defined(__BIG_ENDIAN) + u8 cq_index; + u8 reserved6; + u16 reserved5; +#elif defined(__LITTLE_ENDIAN) + u16 reserved5; + u8 reserved6; + u8 cq_index; +#endif +}; + + +/* + * iSCSI Cleanup CQE + */ +struct bnx2i_cleanup_response { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 status; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 status; + u8 op_code; +#endif + u32 reserved1[3]; + u32 reserved2[2]; +#if defined(__BIG_ENDIAN) + u16 reserved4; + u8 err_code; + u8 reserved3; +#elif defined(__LITTLE_ENDIAN) + u8 reserved3; + u8 err_code; + u16 reserved4; +#endif + u32 reserved5[7]; +#if defined(__BIG_ENDIAN) + u16 reserved6; + u16 itt; +#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14) +#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14) +#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14 + u16 reserved6; +#endif + u32 cq_req_sn; +}; + + +/* + * SCSI read/write SQ WQE + */ +struct bnx2i_cmd_request { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 op_attr; +#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0) +#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0 +#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3) +#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3 +#define ISCSI_CMD_REQUEST_WRITE (0x1<<5) +#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5 +#define ISCSI_CMD_REQUEST_READ (0x1<<6) +#define ISCSI_CMD_REQUEST_READ_SHIFT 6 +#define ISCSI_CMD_REQUEST_FINAL (0x1<<7) +#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7 + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_attr; +#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0) +#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0 +#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3) +#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3 +#define ISCSI_CMD_REQUEST_WRITE (0x1<<5) +#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5 +#define ISCSI_CMD_REQUEST_READ (0x1<<6) +#define ISCSI_CMD_REQUEST_READ_SHIFT 6 +#define ISCSI_CMD_REQUEST_FINAL (0x1<<7) +#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7 + u8 op_code; +#endif +#if defined(__BIG_ENDIAN) + u16 ud_buffer_offset; + u16 sd_buffer_offset; +#elif defined(__LITTLE_ENDIAN) + u16 sd_buffer_offset; + u16 ud_buffer_offset; +#endif + u32 lun[2]; +#if defined(__BIG_ENDIAN) + u16 reserved2; + u16 itt; +#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0 +#define ISCSI_CMD_REQUEST_TYPE (0x3<<14) +#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0 +#define ISCSI_CMD_REQUEST_TYPE (0x3<<14) +#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14 + u16 reserved2; +#endif + u32 total_data_transfer_length; + u32 cmd_sn; + u32 reserved3; + u32 cdb[4]; + u32 zero_fill; + u32 bd_list_addr_lo; + u32 bd_list_addr_hi; +#if defined(__BIG_ENDIAN) + u8 cq_index; + u8 sd_start_bd_index; + u8 ud_start_bd_index; + u8 num_bds; +#elif defined(__LITTLE_ENDIAN) + u8 num_bds; + u8 ud_start_bd_index; + u8 sd_start_bd_index; + u8 cq_index; +#endif +}; + + +/* + * task statistics for write response + */ +struct bnx2i_write_resp_task_stat { + u32 num_data_ins; +}; + +/* + * task statistics for read response + */ +struct bnx2i_read_resp_task_stat { +#if defined(__BIG_ENDIAN) + u16 num_data_outs; + u16 num_r2ts; +#elif defined(__LITTLE_ENDIAN) + u16 num_r2ts; + u16 num_data_outs; +#endif +}; + +/* + * task statistics for iSCSI cmd response + */ +union bnx2i_cmd_resp_task_stat { + struct bnx2i_write_resp_task_stat write_stat; + struct bnx2i_read_resp_task_stat read_stat; +}; + +/* + * SCSI Command CQE + */ +struct bnx2i_cmd_response { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 response_flags; +#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0) +#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0 +#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1) +#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1 +#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2) +#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2 +#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3) +#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3 +#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4) +#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4 +#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5) +#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5 + u8 response; + u8 status; +#elif defined(__LITTLE_ENDIAN) + u8 status; + u8 response; + u8 response_flags; +#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0) +#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0 +#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1) +#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1 +#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2) +#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2 +#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3) +#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3 +#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4) +#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4 +#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5) +#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5 + u8 op_code; +#endif + u32 data_length; + u32 exp_cmd_sn; + u32 max_cmd_sn; + u32 reserved2; + u32 residual_count; +#if defined(__BIG_ENDIAN) + u16 reserved4; + u8 err_code; + u8 reserved3; +#elif defined(__LITTLE_ENDIAN) + u8 reserved3; + u8 err_code; + u16 reserved4; +#endif + u32 reserved5[5]; + union bnx2i_cmd_resp_task_stat task_stat; + u32 reserved6; +#if defined(__BIG_ENDIAN) + u16 reserved7; + u16 itt; +#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14) +#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14) +#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14 + u16 reserved7; +#endif + u32 cq_req_sn; +}; + + + +/* + * firmware middle-path request SQ WQE + */ +struct bnx2i_fw_mp_request { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 op_attr; + u16 hdr_opaque1; +#elif defined(__LITTLE_ENDIAN) + u16 hdr_opaque1; + u8 op_attr; + u8 op_code; +#endif + u32 data_length; + u32 hdr_opaque2[2]; +#if defined(__BIG_ENDIAN) + u16 reserved0; + u16 itt; +#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0 +#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14) +#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0 +#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14) +#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14 + u16 reserved0; +#endif + u32 hdr_opaque3[4]; + u32 resp_bd_list_addr_lo; + u32 resp_bd_list_addr_hi; + u32 resp_buffer; +#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0) +#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0 +#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS (0xFF<<24) +#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS_SHIFT 24 +#if defined(__BIG_ENDIAN) + u16 reserved4; + u8 reserved3; + u8 flags; +#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0) +#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0 +#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1) +#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1 +#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2) +#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2 +#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3) +#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3 +#elif defined(__LITTLE_ENDIAN) + u8 flags; +#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0) +#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0 +#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1) +#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1 +#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2) +#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2 +#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3) +#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3 + u8 reserved3; + u16 reserved4; +#endif + u32 bd_list_addr_lo; + u32 bd_list_addr_hi; +#if defined(__BIG_ENDIAN) + u8 cq_index; + u8 reserved6; + u8 reserved5; + u8 num_bds; +#elif defined(__LITTLE_ENDIAN) + u8 num_bds; + u8 reserved5; + u8 reserved6; + u8 cq_index; +#endif +}; + + +/* + * firmware response - CQE: used only by firmware + */ +struct bnx2i_fw_response { + u32 hdr_dword1[2]; + u32 hdr_exp_cmd_sn; + u32 hdr_max_cmd_sn; + u32 hdr_ttt; + u32 hdr_res_cnt; + u32 cqe_flags; +#define ISCSI_FW_RESPONSE_RESERVED2 (0xFF<<0) +#define ISCSI_FW_RESPONSE_RESERVED2_SHIFT 0 +#define ISCSI_FW_RESPONSE_ERR_CODE (0xFF<<8) +#define ISCSI_FW_RESPONSE_ERR_CODE_SHIFT 8 +#define ISCSI_FW_RESPONSE_RESERVED3 (0xFFFF<<16) +#define ISCSI_FW_RESPONSE_RESERVED3_SHIFT 16 + u32 stat_sn; + u32 hdr_dword2[2]; + u32 hdr_dword3[2]; + u32 task_stat; + u32 reserved0; + u32 hdr_itt; + u32 cq_req_sn; +}; + + +/* + * iSCSI KCQ CQE parameters + */ +union iscsi_kcqe_params { + u32 reserved0[4]; +}; + +/* + * iSCSI KCQ CQE + */ +struct iscsi_kcqe { + u32 iscsi_conn_id; + u32 completion_status; + u32 iscsi_conn_context_id; + union iscsi_kcqe_params params; +#if defined(__BIG_ENDIAN) + u8 flags; +#define ISCSI_KCQE_RESERVED0 (0xF<<0) +#define ISCSI_KCQE_RESERVED0_SHIFT 0 +#define ISCSI_KCQE_LAYER_CODE (0x7<<4) +#define ISCSI_KCQE_LAYER_CODE_SHIFT 4 +#define ISCSI_KCQE_RESERVED1 (0x1<<7) +#define ISCSI_KCQE_RESERVED1_SHIFT 7 + u8 op_code; + u16 qe_self_seq; +#elif defined(__LITTLE_ENDIAN) + u16 qe_self_seq; + u8 op_code; + u8 flags; +#define ISCSI_KCQE_RESERVED0 (0xF<<0) +#define ISCSI_KCQE_RESERVED0_SHIFT 0 +#define ISCSI_KCQE_LAYER_CODE (0x7<<4) +#define ISCSI_KCQE_LAYER_CODE_SHIFT 4 +#define ISCSI_KCQE_RESERVED1 (0x1<<7) +#define ISCSI_KCQE_RESERVED1_SHIFT 7 +#endif +}; + + + +/* + * iSCSI KWQE header + */ +struct iscsi_kwqe_header { +#if defined(__BIG_ENDIAN) + u8 flags; +#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0) +#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0 +#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4) +#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4 +#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7) +#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7 + u8 op_code; +#elif defined(__LITTLE_ENDIAN) + u8 op_code; + u8 flags; +#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0) +#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0 +#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4) +#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4 +#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7) +#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7 +#endif +}; + +/* + * iSCSI firmware init request 1 + */ +struct iscsi_kwqe_init1 { +#if defined(__BIG_ENDIAN) + struct iscsi_kwqe_header hdr; + u8 reserved0; + u8 num_cqs; +#elif defined(__LITTLE_ENDIAN) + u8 num_cqs; + u8 reserved0; + struct iscsi_kwqe_header hdr; +#endif + u32 dummy_buffer_addr_lo; + u32 dummy_buffer_addr_hi; +#if defined(__BIG_ENDIAN) + u16 num_ccells_per_conn; + u16 num_tasks_per_conn; +#elif defined(__LITTLE_ENDIAN) + u16 num_tasks_per_conn; + u16 num_ccells_per_conn; +#endif +#if defined(__BIG_ENDIAN) + u16 sq_wqes_per_page; + u16 sq_num_wqes; +#elif defined(__LITTLE_ENDIAN) + u16 sq_num_wqes; + u16 sq_wqes_per_page; +#endif +#if defined(__BIG_ENDIAN) + u8 cq_log_wqes_per_page; + u8 flags; +#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0) +#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0 +#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4) +#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4 +#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5) +#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5 +#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6) +#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6 + u16 cq_num_wqes; +#elif defined(__LITTLE_ENDIAN) + u16 cq_num_wqes; + u8 flags; +#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0) +#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0 +#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4) +#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4 +#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5) +#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5 +#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6) +#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6 + u8 cq_log_wqes_per_page; +#endif +#if defined(__BIG_ENDIAN) + u16 cq_num_pages; + u16 sq_num_pages; +#elif defined(__LITTLE_ENDIAN) + u16 sq_num_pages; + u16 cq_num_pages; +#endif +#if defined(__BIG_ENDIAN) + u16 rq_buffer_size; + u16 rq_num_wqes; +#elif defined(__LITTLE_ENDIAN) + u16 rq_num_wqes; + u16 rq_buffer_size; +#endif +}; + +/* + * iSCSI firmware init request 2 + */ +struct iscsi_kwqe_init2 { +#if defined(__BIG_ENDIAN) + struct iscsi_kwqe_header hdr; + u16 max_cq_sqn; +#elif defined(__LITTLE_ENDIAN) + u16 max_cq_sqn; + struct iscsi_kwqe_header hdr; +#endif + u32 error_bit_map[2]; + u32 reserved1[5]; +}; + +/* + * Initial iSCSI connection offload request 1 + */ +struct iscsi_kwqe_conn_offload1 { +#if defined(__BIG_ENDIAN) + struct iscsi_kwqe_header hdr; + u16 iscsi_conn_id; +#elif defined(__LITTLE_ENDIAN) + u16 iscsi_conn_id; + struct iscsi_kwqe_header hdr; +#endif + u32 sq_page_table_addr_lo; + u32 sq_page_table_addr_hi; + u32 cq_page_table_addr_lo; + u32 cq_page_table_addr_hi; + u32 reserved0[3]; +}; + +/* + * iSCSI Page Table Entry (PTE) + */ +struct iscsi_pte { + u32 hi; + u32 lo; +}; + +/* + * Initial iSCSI connection offload request 2 + */ +struct iscsi_kwqe_conn_offload2 { +#if defined(__BIG_ENDIAN) + struct iscsi_kwqe_header hdr; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + struct iscsi_kwqe_header hdr; +#endif + u32 rq_page_table_addr_lo; + u32 rq_page_table_addr_hi; + struct iscsi_pte sq_first_pte; + struct iscsi_pte cq_first_pte; + u32 num_additional_wqes; +}; + + +/* + * Initial iSCSI connection offload request 3 + */ +struct iscsi_kwqe_conn_offload3 { +#if defined(__BIG_ENDIAN) + struct iscsi_kwqe_header hdr; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + struct iscsi_kwqe_header hdr; +#endif + u32 reserved1; + struct iscsi_pte qp_first_pte[3]; +}; + + +/* + * iSCSI connection update request + */ +struct iscsi_kwqe_conn_update { +#if defined(__BIG_ENDIAN) + struct iscsi_kwqe_header hdr; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + struct iscsi_kwqe_header hdr; +#endif +#if defined(__BIG_ENDIAN) + u8 session_error_recovery_level; + u8 max_outstanding_r2ts; + u8 reserved2; + u8 conn_flags; +#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0) +#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0 +#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1) +#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1 +#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2) +#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2 +#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3) +#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3 +#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4) +#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4 +#elif defined(__LITTLE_ENDIAN) + u8 conn_flags; +#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0) +#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0 +#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1) +#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1 +#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2) +#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2 +#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3) +#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3 +#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4) +#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4 + u8 reserved2; + u8 max_outstanding_r2ts; + u8 session_error_recovery_level; +#endif + u32 context_id; + u32 max_send_pdu_length; + u32 max_recv_pdu_length; + u32 first_burst_length; + u32 max_burst_length; + u32 exp_stat_sn; +}; + +/* + * iSCSI destroy connection request + */ +struct iscsi_kwqe_conn_destroy { +#if defined(__BIG_ENDIAN) + struct iscsi_kwqe_header hdr; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + struct iscsi_kwqe_header hdr; +#endif + u32 context_id; + u32 reserved1[6]; +}; + +/* + * iSCSI KWQ WQE + */ +union iscsi_kwqe { + struct iscsi_kwqe_init1 init1; + struct iscsi_kwqe_init2 init2; + struct iscsi_kwqe_conn_offload1 conn_offload1; + struct iscsi_kwqe_conn_offload2 conn_offload2; + struct iscsi_kwqe_conn_update conn_update; + struct iscsi_kwqe_conn_destroy conn_destroy; +}; + +/* + * iSCSI Login SQ WQE + */ +struct bnx2i_login_request { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 op_attr; +#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0) +#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0 +#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2) +#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2 +#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4) +#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4 +#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6) +#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6 +#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7) +#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7 + u8 version_max; + u8 version_min; +#elif defined(__LITTLE_ENDIAN) + u8 version_min; + u8 version_max; + u8 op_attr; +#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0) +#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0 +#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2) +#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2 +#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4) +#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4 +#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6) +#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6 +#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7) +#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7 + u8 op_code; +#endif + u32 data_length; + u32 isid_lo; +#if defined(__BIG_ENDIAN) + u16 isid_hi; + u16 tsih; +#elif defined(__LITTLE_ENDIAN) + u16 tsih; + u16 isid_hi; +#endif +#if defined(__BIG_ENDIAN) + u16 reserved2; + u16 itt; +#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0 +#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14) +#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0 +#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14) +#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14 + u16 reserved2; +#endif +#if defined(__BIG_ENDIAN) + u16 cid; + u16 reserved3; +#elif defined(__LITTLE_ENDIAN) + u16 reserved3; + u16 cid; +#endif + u32 cmd_sn; + u32 exp_stat_sn; + u32 reserved4; + u32 resp_bd_list_addr_lo; + u32 resp_bd_list_addr_hi; + u32 resp_buffer; +#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0) +#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0 +#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS (0xFF<<24) +#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT 24 +#if defined(__BIG_ENDIAN) + u16 reserved8; + u8 reserved7; + u8 flags; +#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0) +#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0 +#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2) +#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2 +#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3) +#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3 +#elif defined(__LITTLE_ENDIAN) + u8 flags; +#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0) +#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0 +#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2) +#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2 +#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3) +#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3 + u8 reserved7; + u16 reserved8; +#endif + u32 bd_list_addr_lo; + u32 bd_list_addr_hi; +#if defined(__BIG_ENDIAN) + u8 cq_index; + u8 reserved10; + u8 reserved9; + u8 num_bds; +#elif defined(__LITTLE_ENDIAN) + u8 num_bds; + u8 reserved9; + u8 reserved10; + u8 cq_index; +#endif +}; + + +/* + * iSCSI Login CQE + */ +struct bnx2i_login_response { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 response_flags; +#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0) +#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2) +#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2 +#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4) +#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4 +#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6) +#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6 +#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7) +#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7 + u8 version_max; + u8 version_active; +#elif defined(__LITTLE_ENDIAN) + u8 version_active; + u8 version_max; + u8 response_flags; +#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0) +#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2) +#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2 +#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4) +#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4 +#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6) +#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6 +#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7) +#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7 + u8 op_code; +#endif + u32 data_length; + u32 exp_cmd_sn; + u32 max_cmd_sn; + u32 reserved1[2]; +#if defined(__BIG_ENDIAN) + u16 reserved3; + u8 err_code; + u8 reserved2; +#elif defined(__LITTLE_ENDIAN) + u8 reserved2; + u8 err_code; + u16 reserved3; +#endif + u32 stat_sn; + u32 isid_lo; +#if defined(__BIG_ENDIAN) + u16 isid_hi; + u16 tsih; +#elif defined(__LITTLE_ENDIAN) + u16 tsih; + u16 isid_hi; +#endif +#if defined(__BIG_ENDIAN) + u8 status_class; + u8 status_detail; + u16 reserved4; +#elif defined(__LITTLE_ENDIAN) + u16 reserved4; + u8 status_detail; + u8 status_class; +#endif + u32 reserved5[3]; +#if defined(__BIG_ENDIAN) + u16 reserved6; + u16 itt; +#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14) +#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14) +#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14 + u16 reserved6; +#endif + u32 cq_req_sn; +}; + + +/* + * iSCSI Logout SQ WQE + */ +struct bnx2i_logout_request { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 op_attr; +#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0) +#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0 +#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7) +#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7 + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_attr; +#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0) +#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0 +#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7) +#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7 + u8 op_code; +#endif + u32 data_length; + u32 reserved1[2]; +#if defined(__BIG_ENDIAN) + u16 reserved2; + u16 itt; +#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0 +#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14) +#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0 +#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14) +#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14 + u16 reserved2; +#endif +#if defined(__BIG_ENDIAN) + u16 cid; + u16 reserved3; +#elif defined(__LITTLE_ENDIAN) + u16 reserved3; + u16 cid; +#endif + u32 cmd_sn; + u32 reserved4[5]; + u32 zero_fill; + u32 bd_list_addr_lo; + u32 bd_list_addr_hi; +#if defined(__BIG_ENDIAN) + u8 cq_index; + u8 reserved6; + u8 reserved5; + u8 num_bds; +#elif defined(__LITTLE_ENDIAN) + u8 num_bds; + u8 reserved5; + u8 reserved6; + u8 cq_index; +#endif +}; + + +/* + * iSCSI Logout CQE + */ +struct bnx2i_logout_response { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 reserved1; + u8 response; + u8 reserved0; +#elif defined(__LITTLE_ENDIAN) + u8 reserved0; + u8 response; + u8 reserved1; + u8 op_code; +#endif + u32 reserved2; + u32 exp_cmd_sn; + u32 max_cmd_sn; + u32 reserved3[2]; +#if defined(__BIG_ENDIAN) + u16 reserved5; + u8 err_code; + u8 reserved4; +#elif defined(__LITTLE_ENDIAN) + u8 reserved4; + u8 err_code; + u16 reserved5; +#endif + u32 reserved6[3]; +#if defined(__BIG_ENDIAN) + u16 time_to_wait; + u16 time_to_retain; +#elif defined(__LITTLE_ENDIAN) + u16 time_to_retain; + u16 time_to_wait; +#endif + u32 reserved7[3]; +#if defined(__BIG_ENDIAN) + u16 reserved8; + u16 itt; +#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14) +#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14) +#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14 + u16 reserved8; +#endif + u32 cq_req_sn; +}; + + +/* + * iSCSI Nop-In CQE + */ +struct bnx2i_nop_in_msg { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 reserved1; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 reserved1; + u8 op_code; +#endif + u32 data_length; + u32 exp_cmd_sn; + u32 max_cmd_sn; + u32 ttt; + u32 reserved2; +#if defined(__BIG_ENDIAN) + u16 reserved4; + u8 err_code; + u8 reserved3; +#elif defined(__LITTLE_ENDIAN) + u8 reserved3; + u8 err_code; + u16 reserved4; +#endif + u32 reserved5; + u32 lun[2]; + u32 reserved6[4]; +#if defined(__BIG_ENDIAN) + u16 reserved7; + u16 itt; +#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0) +#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0 +#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14) +#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0) +#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0 +#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14) +#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14 + u16 reserved7; +#endif + u32 cq_req_sn; +}; + + +/* + * iSCSI NOP-OUT SQ WQE + */ +struct bnx2i_nop_out_request { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 op_attr; +#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0) +#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0 +#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7) +#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7 + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_attr; +#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0) +#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0 +#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7) +#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7 + u8 op_code; +#endif + u32 data_length; + u32 lun[2]; +#if defined(__BIG_ENDIAN) + u16 reserved2; + u16 itt; +#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0 +#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14) +#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0 +#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14) +#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14 + u16 reserved2; +#endif + u32 ttt; + u32 cmd_sn; + u32 reserved3[2]; + u32 resp_bd_list_addr_lo; + u32 resp_bd_list_addr_hi; + u32 resp_buffer; +#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0) +#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0 +#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS (0xFF<<24) +#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS_SHIFT 24 +#if defined(__BIG_ENDIAN) + u16 reserved7; + u8 reserved6; + u8 flags; +#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0) +#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0 +#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1) +#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1 +#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2) +#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2 +#elif defined(__LITTLE_ENDIAN) + u8 flags; +#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0) +#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0 +#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1) +#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1 +#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2) +#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2 + u8 reserved6; + u16 reserved7; +#endif + u32 bd_list_addr_lo; + u32 bd_list_addr_hi; +#if defined(__BIG_ENDIAN) + u8 cq_index; + u8 reserved9; + u8 reserved8; + u8 num_bds; +#elif defined(__LITTLE_ENDIAN) + u8 num_bds; + u8 reserved8; + u8 reserved9; + u8 cq_index; +#endif +}; + +/* + * iSCSI Reject CQE + */ +struct bnx2i_reject_msg { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 reserved1; + u8 reason; + u8 reserved0; +#elif defined(__LITTLE_ENDIAN) + u8 reserved0; + u8 reason; + u8 reserved1; + u8 op_code; +#endif + u32 data_length; + u32 exp_cmd_sn; + u32 max_cmd_sn; + u32 reserved2[2]; +#if defined(__BIG_ENDIAN) + u16 reserved4; + u8 err_code; + u8 reserved3; +#elif defined(__LITTLE_ENDIAN) + u8 reserved3; + u8 err_code; + u16 reserved4; +#endif + u32 reserved5[8]; + u32 cq_req_sn; +}; + +/* + * bnx2i iSCSI TMF SQ WQE + */ +struct bnx2i_tmf_request { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 op_attr; +#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0) +#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0 +#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7) +#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7 + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_attr; +#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0) +#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0 +#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7) +#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7 + u8 op_code; +#endif + u32 data_length; + u32 lun[2]; +#if defined(__BIG_ENDIAN) + u16 reserved1; + u16 itt; +#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0 +#define ISCSI_TMF_REQUEST_TYPE (0x3<<14) +#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0 +#define ISCSI_TMF_REQUEST_TYPE (0x3<<14) +#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14 + u16 reserved1; +#endif + u32 ref_itt; + u32 cmd_sn; + u32 reserved2; + u32 ref_cmd_sn; + u32 reserved3[3]; + u32 zero_fill; + u32 bd_list_addr_lo; + u32 bd_list_addr_hi; +#if defined(__BIG_ENDIAN) + u8 cq_index; + u8 reserved5; + u8 reserved4; + u8 num_bds; +#elif defined(__LITTLE_ENDIAN) + u8 num_bds; + u8 reserved4; + u8 reserved5; + u8 cq_index; +#endif +}; + +/* + * iSCSI Text SQ WQE + */ +struct bnx2i_text_request { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 op_attr; +#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0) +#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0 +#define ISCSI_TEXT_REQUEST_CONT (0x1<<6) +#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6 +#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7) +#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7 + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_attr; +#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0) +#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0 +#define ISCSI_TEXT_REQUEST_CONT (0x1<<6) +#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6 +#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7) +#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7 + u8 op_code; +#endif + u32 data_length; + u32 lun[2]; +#if defined(__BIG_ENDIAN) + u16 reserved3; + u16 itt; +#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0 +#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14) +#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0 +#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14) +#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14 + u16 reserved3; +#endif + u32 ttt; + u32 cmd_sn; + u32 reserved4[2]; + u32 resp_bd_list_addr_lo; + u32 resp_bd_list_addr_hi; + u32 resp_buffer; +#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0) +#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0 +#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS (0xFF<<24) +#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT 24 + u32 zero_fill; + u32 bd_list_addr_lo; + u32 bd_list_addr_hi; +#if defined(__BIG_ENDIAN) + u8 cq_index; + u8 reserved7; + u8 reserved6; + u8 num_bds; +#elif defined(__LITTLE_ENDIAN) + u8 num_bds; + u8 reserved6; + u8 reserved7; + u8 cq_index; +#endif +}; + +/* + * iSCSI SQ WQE + */ +union iscsi_request { + struct bnx2i_cmd_request cmd; + struct bnx2i_tmf_request tmf; + struct bnx2i_nop_out_request nop_out; + struct bnx2i_login_request login_req; + struct bnx2i_text_request text; + struct bnx2i_logout_request logout_req; + struct bnx2i_cleanup_request cleanup; +}; + + +/* + * iSCSI TMF CQE + */ +struct bnx2i_tmf_response { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 reserved1; + u8 response; + u8 reserved0; +#elif defined(__LITTLE_ENDIAN) + u8 reserved0; + u8 response; + u8 reserved1; + u8 op_code; +#endif + u32 reserved2; + u32 exp_cmd_sn; + u32 max_cmd_sn; + u32 reserved3[2]; +#if defined(__BIG_ENDIAN) + u16 reserved5; + u8 err_code; + u8 reserved4; +#elif defined(__LITTLE_ENDIAN) + u8 reserved4; + u8 err_code; + u16 reserved5; +#endif + u32 reserved6[7]; +#if defined(__BIG_ENDIAN) + u16 reserved7; + u16 itt; +#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14) +#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14) +#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14 + u16 reserved7; +#endif + u32 cq_req_sn; +}; + +/* + * iSCSI Text CQE + */ +struct bnx2i_text_response { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 response_flags; +#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0) +#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6) +#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6 +#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7) +#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7 + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 response_flags; +#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0) +#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6) +#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6 +#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7) +#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7 + u8 op_code; +#endif + u32 data_length; + u32 exp_cmd_sn; + u32 max_cmd_sn; + u32 ttt; + u32 reserved2; +#if defined(__BIG_ENDIAN) + u16 reserved4; + u8 err_code; + u8 reserved3; +#elif defined(__LITTLE_ENDIAN) + u8 reserved3; + u8 err_code; + u16 reserved4; +#endif + u32 reserved5; + u32 lun[2]; + u32 reserved6[4]; +#if defined(__BIG_ENDIAN) + u16 reserved7; + u16 itt; +#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14) +#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14) +#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14 + u16 reserved7; +#endif + u32 cq_req_sn; +}; + +/* + * iSCSI CQE + */ +union iscsi_response { + struct bnx2i_cmd_response cmd; + struct bnx2i_tmf_response tmf; + struct bnx2i_login_response login_resp; + struct bnx2i_text_response text; + struct bnx2i_logout_response logout_resp; + struct bnx2i_cleanup_response cleanup; + struct bnx2i_reject_msg reject; + struct bnx2i_async_msg async; + struct bnx2i_nop_in_msg nop_in; +}; + +#endif /* __57XX_ISCSI_HSI_LINUX_LE__ */ diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig new file mode 100644 index 000000000000..820d428ae839 --- /dev/null +++ b/drivers/scsi/bnx2i/Kconfig @@ -0,0 +1,7 @@ +config SCSI_BNX2_ISCSI + tristate "Broadcom NetXtreme II iSCSI support" + select SCSI_ISCSI_ATTRS + select CNIC + ---help--- + This driver supports iSCSI offload for the Broadcom NetXtreme II + devices. diff --git a/drivers/scsi/bnx2i/Makefile b/drivers/scsi/bnx2i/Makefile new file mode 100644 index 000000000000..b5802bd2e76a --- /dev/null +++ b/drivers/scsi/bnx2i/Makefile @@ -0,0 +1,3 @@ +bnx2i-y := bnx2i_init.o bnx2i_hwi.o bnx2i_iscsi.o bnx2i_sysfs.o + +obj-$(CONFIG_SCSI_BNX2_ISCSI) += bnx2i.o diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h new file mode 100644 index 000000000000..d7576f28c6e9 --- /dev/null +++ b/drivers/scsi/bnx2i/bnx2i.h @@ -0,0 +1,771 @@ +/* bnx2i.h: Broadcom NetXtreme II iSCSI driver. + * + * Copyright (c) 2006 - 2009 Broadcom Corporation + * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mike Christie + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) + */ + +#ifndef _BNX2I_H_ +#define _BNX2I_H_ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../../net/cnic_if.h" +#include "57xx_iscsi_hsi.h" +#include "57xx_iscsi_constants.h" + +#define BNX2_ISCSI_DRIVER_NAME "bnx2i" + +#define BNX2I_MAX_ADAPTERS 8 + +#define ISCSI_MAX_CONNS_PER_HBA 128 +#define ISCSI_MAX_SESS_PER_HBA ISCSI_MAX_CONNS_PER_HBA +#define ISCSI_MAX_CMDS_PER_SESS 128 + +/* Total active commands across all connections supported by devices */ +#define ISCSI_MAX_CMDS_PER_HBA_5708 (28 * (ISCSI_MAX_CMDS_PER_SESS - 1)) +#define ISCSI_MAX_CMDS_PER_HBA_5709 (128 * (ISCSI_MAX_CMDS_PER_SESS - 1)) +#define ISCSI_MAX_CMDS_PER_HBA_57710 (256 * (ISCSI_MAX_CMDS_PER_SESS - 1)) + +#define ISCSI_MAX_BDS_PER_CMD 32 + +#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8 +#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4 + +/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */ +#define MAX_BD_LENGTH 65535 +#define BD_SPLIT_SIZE 32768 + +/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */ +#define BNX2I_SQ_WQES_MIN 16 +#define BNX2I_570X_SQ_WQES_MAX 128 +#define BNX2I_5770X_SQ_WQES_MAX 512 +#define BNX2I_570X_SQ_WQES_DEFAULT 128 +#define BNX2I_5770X_SQ_WQES_DEFAULT 256 + +#define BNX2I_570X_CQ_WQES_MAX 128 +#define BNX2I_5770X_CQ_WQES_MAX 512 + +#define BNX2I_RQ_WQES_MIN 16 +#define BNX2I_RQ_WQES_MAX 32 +#define BNX2I_RQ_WQES_DEFAULT 16 + +/* CCELLs per conn */ +#define BNX2I_CCELLS_MIN 16 +#define BNX2I_CCELLS_MAX 96 +#define BNX2I_CCELLS_DEFAULT 64 + +#define ITT_INVALID_SIGNATURE 0xFFFF + +#define ISCSI_CMD_CLEANUP_TIMEOUT 100 + +#define BNX2I_CONN_CTX_BUF_SIZE 16384 + +#define BNX2I_SQ_WQE_SIZE 64 +#define BNX2I_RQ_WQE_SIZE 256 +#define BNX2I_CQE_SIZE 64 + +#define MB_KERNEL_CTX_SHIFT 8 +#define MB_KERNEL_CTX_SIZE (1 << MB_KERNEL_CTX_SHIFT) + +#define CTX_SHIFT 7 +#define GET_CID_NUM(cid_addr) ((cid_addr) >> CTX_SHIFT) + +#define CTX_OFFSET 0x10000 +#define MAX_CID_CNT 0x4000 + +/* 5709 context registers */ +#define BNX2_MQ_CONFIG2 0x00003d00 +#define BNX2_MQ_CONFIG2_CONT_SZ (0x7L<<4) +#define BNX2_MQ_CONFIG2_FIRST_L4L5 (0x1fL<<8) + +/* 57710's BAR2 is mapped to doorbell registers */ +#define BNX2X_DOORBELL_PCI_BAR 2 +#define BNX2X_MAX_CQS 8 + +#define CNIC_ARM_CQE 1 +#define CNIC_DISARM_CQE 0 + +#define REG_RD(__hba, offset) \ + readl(__hba->regview + offset) +#define REG_WR(__hba, offset, val) \ + writel(val, __hba->regview + offset) + + +/** + * struct generic_pdu_resc - login pdu resource structure + * + * @req_buf: driver buffer used to stage payload associated with + * the login request + * @req_dma_addr: dma address for iscsi login request payload buffer + * @req_buf_size: actual login request payload length + * @req_wr_ptr: pointer into login request buffer when next data is + * to be written + * @resp_hdr: iscsi header where iscsi login response header is to + * be recreated + * @resp_buf: buffer to stage login response payload + * @resp_dma_addr: login response payload buffer dma address + * @resp_buf_size: login response paylod length + * @resp_wr_ptr: pointer into login response buffer when next data is + * to be written + * @req_bd_tbl: iscsi login request payload BD table + * @req_bd_dma: login request BD table dma address + * @resp_bd_tbl: iscsi login response payload BD table + * @resp_bd_dma: login request BD table dma address + * + * following structure defines buffer info for generic pdus such as iSCSI Login, + * Logout and NOP + */ +struct generic_pdu_resc { + char *req_buf; + dma_addr_t req_dma_addr; + u32 req_buf_size; + char *req_wr_ptr; + struct iscsi_hdr resp_hdr; + char *resp_buf; + dma_addr_t resp_dma_addr; + u32 resp_buf_size; + char *resp_wr_ptr; + char *req_bd_tbl; + dma_addr_t req_bd_dma; + char *resp_bd_tbl; + dma_addr_t resp_bd_dma; +}; + + +/** + * struct bd_resc_page - tracks DMA'able memory allocated for BD tables + * + * @link: list head to link elements + * @max_ptrs: maximun pointers that can be stored in this page + * @num_valid: number of pointer valid in this page + * @page: base addess for page pointer array + * + * structure to track DMA'able memory allocated for command BD tables + */ +struct bd_resc_page { + struct list_head link; + u32 max_ptrs; + u32 num_valid; + void *page[1]; +}; + + +/** + * struct io_bdt - I/O buffer destricptor table + * + * @bd_tbl: BD table's virtual address + * @bd_tbl_dma: BD table's dma address + * @bd_valid: num valid BD entries + * + * IO BD table + */ +struct io_bdt { + struct iscsi_bd *bd_tbl; + dma_addr_t bd_tbl_dma; + u16 bd_valid; +}; + + +/** + * bnx2i_cmd - iscsi command structure + * + * @scsi_cmd: SCSI-ML task pointer corresponding to this iscsi cmd + * @sg: SG list + * @io_tbl: buffer descriptor (BD) table + * @bd_tbl_dma: buffer descriptor (BD) table's dma address + */ +struct bnx2i_cmd { + struct iscsi_hdr hdr; + struct bnx2i_conn *conn; + struct scsi_cmnd *scsi_cmd; + struct scatterlist *sg; + struct io_bdt io_tbl; + dma_addr_t bd_tbl_dma; + struct bnx2i_cmd_request req; +}; + + +/** + * struct bnx2i_conn - iscsi connection structure + * + * @cls_conn: pointer to iscsi cls conn + * @hba: adapter structure pointer + * @iscsi_conn_cid: iscsi conn id + * @fw_cid: firmware iscsi context id + * @ep: endpoint structure pointer + * @gen_pdu: login/nopout/logout pdu resources + * @violation_notified: bit mask used to track iscsi error/warning messages + * already printed out + * + * iSCSI connection structure + */ +struct bnx2i_conn { + struct iscsi_cls_conn *cls_conn; + struct bnx2i_hba *hba; + struct completion cmd_cleanup_cmpl; + int is_bound; + + u32 iscsi_conn_cid; +#define BNX2I_CID_RESERVED 0x5AFF + u32 fw_cid; + + struct timer_list poll_timer; + /* + * Queue Pair (QP) related structure elements. + */ + struct bnx2i_endpoint *ep; + + /* + * Buffer for login negotiation process + */ + struct generic_pdu_resc gen_pdu; + u64 violation_notified; +}; + + + +/** + * struct iscsi_cid_queue - Per adapter iscsi cid queue + * + * @cid_que_base: queue base memory + * @cid_que: queue memory pointer + * @cid_q_prod_idx: produce index + * @cid_q_cons_idx: consumer index + * @cid_q_max_idx: max index. used to detect wrap around condition + * @cid_free_cnt: queue size + * @conn_cid_tbl: iscsi cid to conn structure mapping table + * + * Per adapter iSCSI CID Queue + */ +struct iscsi_cid_queue { + void *cid_que_base; + u32 *cid_que; + u32 cid_q_prod_idx; + u32 cid_q_cons_idx; + u32 cid_q_max_idx; + u32 cid_free_cnt; + struct bnx2i_conn **conn_cid_tbl; +}; + +/** + * struct bnx2i_hba - bnx2i adapter structure + * + * @link: list head to link elements + * @cnic: pointer to cnic device + * @pcidev: pointer to pci dev + * @netdev: pointer to netdev structure + * @regview: mapped PCI register space + * @age: age, incremented by every recovery + * @cnic_dev_type: cnic device type, 5706/5708/5709/57710 + * @mail_queue_access: mailbox queue access mode, applicable to 5709 only + * @reg_with_cnic: indicates whether the device is register with CNIC + * @adapter_state: adapter state, UP, GOING_DOWN, LINK_DOWN + * @mtu_supported: Ethernet MTU supported + * @shost: scsi host pointer + * @max_sqes: SQ size + * @max_rqes: RQ size + * @max_cqes: CQ size + * @num_ccell: number of command cells per connection + * @ofld_conns_active: active connection list + * @max_active_conns: max offload connections supported by this device + * @cid_que: iscsi cid queue + * @ep_rdwr_lock: read / write lock to synchronize various ep lists + * @ep_ofld_list: connection list for pending offload completion + * @ep_destroy_list: connection list for pending offload completion + * @mp_bd_tbl: BD table to be used with middle path requests + * @mp_bd_dma: DMA address of 'mp_bd_tbl' memory buffer + * @dummy_buffer: Dummy buffer to be used with zero length scsicmd reqs + * @dummy_buf_dma: DMA address of 'dummy_buffer' memory buffer + * @lock: lock to synchonize access to hba structure + * @pci_did: PCI device ID + * @pci_vid: PCI vendor ID + * @pci_sdid: PCI subsystem device ID + * @pci_svid: PCI subsystem vendor ID + * @pci_func: PCI function number in system pci tree + * @pci_devno: PCI device number in system pci tree + * @num_wqe_sent: statistic counter, total wqe's sent + * @num_cqe_rcvd: statistic counter, total cqe's received + * @num_intr_claimed: statistic counter, total interrupts claimed + * @link_changed_count: statistic counter, num of link change notifications + * received + * @ipaddr_changed_count: statistic counter, num times IP address changed while + * at least one connection is offloaded + * @num_sess_opened: statistic counter, total num sessions opened + * @num_conn_opened: statistic counter, total num conns opened on this hba + * @ctx_ccell_tasks: captures number of ccells and tasks supported by + * currently offloaded connection, used to decode + * context memory + * + * Adapter Data Structure + */ +struct bnx2i_hba { + struct list_head link; + struct cnic_dev *cnic; + struct pci_dev *pcidev; + struct net_device *netdev; + void __iomem *regview; + + u32 age; + unsigned long cnic_dev_type; + #define BNX2I_NX2_DEV_5706 0x0 + #define BNX2I_NX2_DEV_5708 0x1 + #define BNX2I_NX2_DEV_5709 0x2 + #define BNX2I_NX2_DEV_57710 0x3 + u32 mail_queue_access; + #define BNX2I_MQ_KERNEL_MODE 0x0 + #define BNX2I_MQ_KERNEL_BYPASS_MODE 0x1 + #define BNX2I_MQ_BIN_MODE 0x2 + unsigned long reg_with_cnic; + #define BNX2I_CNIC_REGISTERED 1 + + unsigned long adapter_state; + #define ADAPTER_STATE_UP 0 + #define ADAPTER_STATE_GOING_DOWN 1 + #define ADAPTER_STATE_LINK_DOWN 2 + #define ADAPTER_STATE_INIT_FAILED 31 + unsigned int mtu_supported; + #define BNX2I_MAX_MTU_SUPPORTED 1500 + + struct Scsi_Host *shost; + + u32 max_sqes; + u32 max_rqes; + u32 max_cqes; + u32 num_ccell; + + int ofld_conns_active; + + int max_active_conns; + struct iscsi_cid_queue cid_que; + + rwlock_t ep_rdwr_lock; + struct list_head ep_ofld_list; + struct list_head ep_destroy_list; + + /* + * BD table to be used with MP (Middle Path requests. + */ + char *mp_bd_tbl; + dma_addr_t mp_bd_dma; + char *dummy_buffer; + dma_addr_t dummy_buf_dma; + + spinlock_t lock; /* protects hba structure access */ + struct mutex net_dev_lock;/* sync net device access */ + + /* + * PCI related info. + */ + u16 pci_did; + u16 pci_vid; + u16 pci_sdid; + u16 pci_svid; + u16 pci_func; + u16 pci_devno; + + /* + * Following are a bunch of statistics useful during development + * and later stage for score boarding. + */ + u32 num_wqe_sent; + u32 num_cqe_rcvd; + u32 num_intr_claimed; + u32 link_changed_count; + u32 ipaddr_changed_count; + u32 num_sess_opened; + u32 num_conn_opened; + unsigned int ctx_ccell_tasks; +}; + + +/******************************************************************************* + * QP [ SQ / RQ / CQ ] info. + ******************************************************************************/ + +/* + * SQ/RQ/CQ generic structure definition + */ +struct sqe { + u8 sqe_byte[BNX2I_SQ_WQE_SIZE]; +}; + +struct rqe { + u8 rqe_byte[BNX2I_RQ_WQE_SIZE]; +}; + +struct cqe { + u8 cqe_byte[BNX2I_CQE_SIZE]; +}; + + +enum { +#if defined(__LITTLE_ENDIAN) + CNIC_EVENT_COAL_INDEX = 0x0, + CNIC_SEND_DOORBELL = 0x4, + CNIC_EVENT_CQ_ARM = 0x7, + CNIC_RECV_DOORBELL = 0x8 +#elif defined(__BIG_ENDIAN) + CNIC_EVENT_COAL_INDEX = 0x2, + CNIC_SEND_DOORBELL = 0x6, + CNIC_EVENT_CQ_ARM = 0x4, + CNIC_RECV_DOORBELL = 0xa +#endif +}; + + +/* + * CQ DB + */ +struct bnx2x_iscsi_cq_pend_cmpl { + /* CQ producer, updated by Ustorm */ + u16 ustrom_prod; + /* CQ pending completion counter */ + u16 pend_cntr; +}; + + +struct bnx2i_5771x_cq_db { + struct bnx2x_iscsi_cq_pend_cmpl qp_pend_cmpl[BNX2X_MAX_CQS]; + /* CQ pending completion ITT array */ + u16 itt[BNX2X_MAX_CQS]; + /* Cstorm CQ sequence to notify array, updated by driver */; + u16 sqn[BNX2X_MAX_CQS]; + u32 reserved[4] /* 16 byte allignment */; +}; + + +struct bnx2i_5771x_sq_rq_db { + u16 prod_idx; + u8 reserved0[14]; /* Pad structure size to 16 bytes */ +}; + + +struct bnx2i_5771x_dbell_hdr { + u8 header; + /* 1 for rx doorbell, 0 for tx doorbell */ +#define B577XX_DOORBELL_HDR_RX (0x1<<0) +#define B577XX_DOORBELL_HDR_RX_SHIFT 0 + /* 0 for normal doorbell, 1 for advertise wnd doorbell */ +#define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1) +#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1 + /* rdma tx only: DPM transaction size specifier (64/128/256/512B) */ +#define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2) +#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2 + /* connection type */ +#define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4) +#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4 +}; + +struct bnx2i_5771x_dbell { + struct bnx2i_5771x_dbell_hdr dbell; + u8 pad[3]; + +}; + +/** + * struct qp_info - QP (share queue region) atrributes structure + * + * @ctx_base: ioremapped pci register base to access doorbell register + * pertaining to this offloaded connection + * @sq_virt: virtual address of send queue (SQ) region + * @sq_phys: DMA address of SQ memory region + * @sq_mem_size: SQ size + * @sq_prod_qe: SQ producer entry pointer + * @sq_cons_qe: SQ consumer entry pointer + * @sq_first_qe: virtaul address of first entry in SQ + * @sq_last_qe: virtaul address of last entry in SQ + * @sq_prod_idx: SQ producer index + * @sq_cons_idx: SQ consumer index + * @sqe_left: number sq entry left + * @sq_pgtbl_virt: page table describing buffer consituting SQ region + * @sq_pgtbl_phys: dma address of 'sq_pgtbl_virt' + * @sq_pgtbl_size: SQ page table size + * @cq_virt: virtual address of completion queue (CQ) region + * @cq_phys: DMA address of RQ memory region + * @cq_mem_size: CQ size + * @cq_prod_qe: CQ producer entry pointer + * @cq_cons_qe: CQ consumer entry pointer + * @cq_first_qe: virtaul address of first entry in CQ + * @cq_last_qe: virtaul address of last entry in CQ + * @cq_prod_idx: CQ producer index + * @cq_cons_idx: CQ consumer index + * @cqe_left: number cq entry left + * @cqe_size: size of each CQ entry + * @cqe_exp_seq_sn: next expected CQE sequence number + * @cq_pgtbl_virt: page table describing buffer consituting CQ region + * @cq_pgtbl_phys: dma address of 'cq_pgtbl_virt' + * @cq_pgtbl_size: CQ page table size + * @rq_virt: virtual address of receive queue (RQ) region + * @rq_phys: DMA address of RQ memory region + * @rq_mem_size: RQ size + * @rq_prod_qe: RQ producer entry pointer + * @rq_cons_qe: RQ consumer entry pointer + * @rq_first_qe: virtaul address of first entry in RQ + * @rq_last_qe: virtaul address of last entry in RQ + * @rq_prod_idx: RQ producer index + * @rq_cons_idx: RQ consumer index + * @rqe_left: number rq entry left + * @rq_pgtbl_virt: page table describing buffer consituting RQ region + * @rq_pgtbl_phys: dma address of 'rq_pgtbl_virt' + * @rq_pgtbl_size: RQ page table size + * + * queue pair (QP) is a per connection shared data structure which is used + * to send work requests (SQ), receive completion notifications (CQ) + * and receive asynchoronous / scsi sense info (RQ). 'qp_info' structure + * below holds queue memory, consumer/producer indexes and page table + * information + */ +struct qp_info { + void __iomem *ctx_base; +#define DPM_TRIGER_TYPE 0x40 + +#define BNX2I_570x_QUE_DB_SIZE 0 +#define BNX2I_5771x_QUE_DB_SIZE 16 + struct sqe *sq_virt; + dma_addr_t sq_phys; + u32 sq_mem_size; + + struct sqe *sq_prod_qe; + struct sqe *sq_cons_qe; + struct sqe *sq_first_qe; + struct sqe *sq_last_qe; + u16 sq_prod_idx; + u16 sq_cons_idx; + u32 sqe_left; + + void *sq_pgtbl_virt; + dma_addr_t sq_pgtbl_phys; + u32 sq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */ + + struct cqe *cq_virt; + dma_addr_t cq_phys; + u32 cq_mem_size; + + struct cqe *cq_prod_qe; + struct cqe *cq_cons_qe; + struct cqe *cq_first_qe; + struct cqe *cq_last_qe; + u16 cq_prod_idx; + u16 cq_cons_idx; + u32 cqe_left; + u32 cqe_size; + u32 cqe_exp_seq_sn; + + void *cq_pgtbl_virt; + dma_addr_t cq_pgtbl_phys; + u32 cq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */ + + struct rqe *rq_virt; + dma_addr_t rq_phys; + u32 rq_mem_size; + + struct rqe *rq_prod_qe; + struct rqe *rq_cons_qe; + struct rqe *rq_first_qe; + struct rqe *rq_last_qe; + u16 rq_prod_idx; + u16 rq_cons_idx; + u32 rqe_left; + + void *rq_pgtbl_virt; + dma_addr_t rq_pgtbl_phys; + u32 rq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */ +}; + + + +/* + * CID handles + */ +struct ep_handles { + u32 fw_cid; + u32 drv_iscsi_cid; + u16 pg_cid; + u16 rsvd; +}; + + +enum { + EP_STATE_IDLE = 0x0, + EP_STATE_PG_OFLD_START = 0x1, + EP_STATE_PG_OFLD_COMPL = 0x2, + EP_STATE_OFLD_START = 0x4, + EP_STATE_OFLD_COMPL = 0x8, + EP_STATE_CONNECT_START = 0x10, + EP_STATE_CONNECT_COMPL = 0x20, + EP_STATE_ULP_UPDATE_START = 0x40, + EP_STATE_ULP_UPDATE_COMPL = 0x80, + EP_STATE_DISCONN_START = 0x100, + EP_STATE_DISCONN_COMPL = 0x200, + EP_STATE_CLEANUP_START = 0x400, + EP_STATE_CLEANUP_CMPL = 0x800, + EP_STATE_TCP_FIN_RCVD = 0x1000, + EP_STATE_TCP_RST_RCVD = 0x2000, + EP_STATE_PG_OFLD_FAILED = 0x1000000, + EP_STATE_ULP_UPDATE_FAILED = 0x2000000, + EP_STATE_CLEANUP_FAILED = 0x4000000, + EP_STATE_OFLD_FAILED = 0x8000000, + EP_STATE_CONNECT_FAILED = 0x10000000, + EP_STATE_DISCONN_TIMEDOUT = 0x20000000, +}; + +/** + * struct bnx2i_endpoint - representation of tcp connection in NX2 world + * + * @link: list head to link elements + * @hba: adapter to which this connection belongs + * @conn: iscsi connection this EP is linked to + * @sess: iscsi session this EP is linked to + * @cm_sk: cnic sock struct + * @hba_age: age to detect if 'iscsid' issues ep_disconnect() + * after HBA reset is completed by bnx2i/cnic/bnx2 + * modules + * @state: tracks offload connection state machine + * @teardown_mode: indicates if conn teardown is abortive or orderly + * @qp: QP information + * @ids: contains chip allocated *context id* & driver assigned + * *iscsi cid* + * @ofld_timer: offload timer to detect timeout + * @ofld_wait: wait queue + * + * Endpoint Structure - equivalent of tcp socket structure + */ +struct bnx2i_endpoint { + struct list_head link; + struct bnx2i_hba *hba; + struct bnx2i_conn *conn; + struct cnic_sock *cm_sk; + u32 hba_age; + u32 state; + unsigned long timestamp; + int num_active_cmds; + + struct qp_info qp; + struct ep_handles ids; + #define ep_iscsi_cid ids.drv_iscsi_cid + #define ep_cid ids.fw_cid + #define ep_pg_cid ids.pg_cid + struct timer_list ofld_timer; + wait_queue_head_t ofld_wait; +}; + + + +/* Global variables */ +extern unsigned int error_mask1, error_mask2; +extern u64 iscsi_error_mask; +extern unsigned int en_tcp_dack; +extern unsigned int event_coal_div; + +extern struct scsi_transport_template *bnx2i_scsi_xport_template; +extern struct iscsi_transport bnx2i_iscsi_transport; +extern struct cnic_ulp_ops bnx2i_cnic_cb; + +extern unsigned int sq_size; +extern unsigned int rq_size; + +extern struct device_attribute *bnx2i_dev_attributes[]; + + + +/* + * Function Prototypes + */ +extern void bnx2i_identify_device(struct bnx2i_hba *hba); +extern void bnx2i_register_device(struct bnx2i_hba *hba); + +extern void bnx2i_ulp_init(struct cnic_dev *dev); +extern void bnx2i_ulp_exit(struct cnic_dev *dev); +extern void bnx2i_start(void *handle); +extern void bnx2i_stop(void *handle); +extern void bnx2i_reg_dev_all(void); +extern void bnx2i_unreg_dev_all(void); +extern struct bnx2i_hba *get_adapter_list_head(void); + +struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, + u16 iscsi_cid); + +int bnx2i_alloc_ep_pool(void); +void bnx2i_release_ep_pool(void); +struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba); +struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba); + +struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic); + +struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic); +void bnx2i_free_hba(struct bnx2i_hba *hba); + +void bnx2i_get_rq_buf(struct bnx2i_conn *conn, char *ptr, int len); +void bnx2i_put_rq_buf(struct bnx2i_conn *conn, int count); + +void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd); + +void bnx2i_drop_session(struct iscsi_cls_session *session); + +extern int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba); +extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn, + struct iscsi_task *mtask); +extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn, + struct iscsi_task *mtask); +extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn, + struct bnx2i_cmd *cmnd); +extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn, + struct iscsi_task *mtask, u32 ttt, + char *datap, int data_len, int unsol); +extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn, + struct iscsi_task *mtask); +extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, + struct bnx2i_cmd *cmd); +extern void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep); +extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn); +extern void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep); + +extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep); +extern void bnx2i_free_qp_resc(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep); +extern void bnx2i_ep_ofld_timer(unsigned long data); +extern struct bnx2i_endpoint *bnx2i_find_ep_in_ofld_list( + struct bnx2i_hba *hba, u32 iscsi_cid); +extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list( + struct bnx2i_hba *hba, u32 iscsi_cid); + +extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep); +extern void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action); + +/* Debug related function prototypes */ +extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn); +extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn); +extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn); +extern void bnx2i_print_recv_state(struct bnx2i_conn *conn); + +#endif diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c new file mode 100644 index 000000000000..906cef5cda86 --- /dev/null +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -0,0 +1,2405 @@ +/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver. + * + * Copyright (c) 2006 - 2009 Broadcom Corporation + * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mike Christie + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) + */ + +#include +#include +#include "bnx2i.h" + +/** + * bnx2i_get_cid_num - get cid from ep + * @ep: endpoint pointer + * + * Only applicable to 57710 family of devices + */ +static u32 bnx2i_get_cid_num(struct bnx2i_endpoint *ep) +{ + u32 cid; + + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) + cid = ep->ep_cid; + else + cid = GET_CID_NUM(ep->ep_cid); + return cid; +} + + +/** + * bnx2i_adjust_qp_size - Adjust SQ/RQ/CQ size for 57710 device type + * @hba: Adapter for which adjustments is to be made + * + * Only applicable to 57710 family of devices + */ +static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) +{ + u32 num_elements_per_pg; + + if (test_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type) || + test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type) || + test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) { + if (!is_power_of_2(hba->max_sqes)) + hba->max_sqes = rounddown_pow_of_two(hba->max_sqes); + + if (!is_power_of_2(hba->max_rqes)) + hba->max_rqes = rounddown_pow_of_two(hba->max_rqes); + } + + /* Adjust each queue size if the user selection does not + * yield integral num of page buffers + */ + /* adjust SQ */ + num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE; + if (hba->max_sqes < num_elements_per_pg) + hba->max_sqes = num_elements_per_pg; + else if (hba->max_sqes % num_elements_per_pg) + hba->max_sqes = (hba->max_sqes + num_elements_per_pg - 1) & + ~(num_elements_per_pg - 1); + + /* adjust CQ */ + num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE; + if (hba->max_cqes < num_elements_per_pg) + hba->max_cqes = num_elements_per_pg; + else if (hba->max_cqes % num_elements_per_pg) + hba->max_cqes = (hba->max_cqes + num_elements_per_pg - 1) & + ~(num_elements_per_pg - 1); + + /* adjust RQ */ + num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE; + if (hba->max_rqes < num_elements_per_pg) + hba->max_rqes = num_elements_per_pg; + else if (hba->max_rqes % num_elements_per_pg) + hba->max_rqes = (hba->max_rqes + num_elements_per_pg - 1) & + ~(num_elements_per_pg - 1); +} + + +/** + * bnx2i_get_link_state - get network interface link state + * @hba: adapter instance pointer + * + * updates adapter structure flag based on netdev state + */ +static void bnx2i_get_link_state(struct bnx2i_hba *hba) +{ + if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state)) + set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); + else + clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); +} + + +/** + * bnx2i_iscsi_license_error - displays iscsi license related error message + * @hba: adapter instance pointer + * @error_code: error classification + * + * Puts out an error log when driver is unable to offload iscsi connection + * due to license restrictions + */ +static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code) +{ + if (error_code == ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED) + /* iSCSI offload not supported on this device */ + printk(KERN_ERR "bnx2i: iSCSI not supported, dev=%s\n", + hba->netdev->name); + if (error_code == ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED) + /* iSCSI offload not supported on this LOM device */ + printk(KERN_ERR "bnx2i: LOM is not enable to " + "offload iSCSI connections, dev=%s\n", + hba->netdev->name); + set_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state); +} + + +/** + * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification + * @ep: endpoint (transport indentifier) structure + * @action: action, ARM or DISARM. For now only ARM_CQE is used + * + * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt + * the driver. EQ event is generated CQ index is hit or at least 1 CQ is + * outstanding and on chip timer expires + */ +void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action) +{ + struct bnx2i_5771x_cq_db *cq_db; + u16 cq_index; + + if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) + return; + + if (action == CNIC_ARM_CQE) { + cq_index = ep->qp.cqe_exp_seq_sn + + ep->num_active_cmds / event_coal_div; + cq_index %= (ep->qp.cqe_size * 2 + 1); + if (!cq_index) { + cq_index = 1; + cq_db = (struct bnx2i_5771x_cq_db *) + ep->qp.cq_pgtbl_virt; + cq_db->sqn[0] = cq_index; + } + } +} + + +/** + * bnx2i_get_rq_buf - copy RQ buffer contents to driver buffer + * @conn: iscsi connection on which RQ event occured + * @ptr: driver buffer to which RQ buffer contents is to + * be copied + * @len: length of valid data inside RQ buf + * + * Copies RQ buffer contents from shared (DMA'able) memory region to + * driver buffer. RQ is used to DMA unsolicitated iscsi pdu's and + * scsi sense info + */ +void bnx2i_get_rq_buf(struct bnx2i_conn *bnx2i_conn, char *ptr, int len) +{ + if (!bnx2i_conn->ep->qp.rqe_left) + return; + + bnx2i_conn->ep->qp.rqe_left--; + memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len); + if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) { + bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe; + bnx2i_conn->ep->qp.rq_cons_idx = 0; + } else { + bnx2i_conn->ep->qp.rq_cons_qe++; + bnx2i_conn->ep->qp.rq_cons_idx++; + } +} + + +static void bnx2i_ring_577xx_doorbell(struct bnx2i_conn *conn) +{ + struct bnx2i_5771x_dbell dbell; + u32 msg; + + memset(&dbell, 0, sizeof(dbell)); + dbell.dbell.header = (B577XX_ISCSI_CONNECTION_TYPE << + B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT); + msg = *((u32 *)&dbell); + /* TODO : get doorbell register mapping */ + writel(cpu_to_le32(msg), conn->ep->qp.ctx_base); +} + + +/** + * bnx2i_put_rq_buf - Replenish RQ buffer, if required ring on chip doorbell + * @conn: iscsi connection on which event to post + * @count: number of RQ buffer being posted to chip + * + * No need to ring hardware doorbell for 57710 family of devices + */ +void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count) +{ + struct bnx2i_5771x_sq_rq_db *rq_db; + u16 hi_bit = (bnx2i_conn->ep->qp.rq_prod_idx & 0x8000); + struct bnx2i_endpoint *ep = bnx2i_conn->ep; + + ep->qp.rqe_left += count; + ep->qp.rq_prod_idx &= 0x7FFF; + ep->qp.rq_prod_idx += count; + + if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) { + ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes; + if (!hi_bit) + ep->qp.rq_prod_idx |= 0x8000; + } else + ep->qp.rq_prod_idx |= hi_bit; + + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { + rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt; + rq_db->prod_idx = ep->qp.rq_prod_idx; + /* no need to ring hardware doorbell for 57710 */ + } else { + writew(ep->qp.rq_prod_idx, + ep->qp.ctx_base + CNIC_RECV_DOORBELL); + } + mmiowb(); +} + + +/** + * bnx2i_ring_sq_dbell - Ring SQ doorbell to wake-up the processing engine + * @conn: iscsi connection to which new SQ entries belong + * @count: number of SQ WQEs to post + * + * SQ DB is updated in host memory and TX Doorbell is rung for 57710 family + * of devices. For 5706/5708/5709 new SQ WQE count is written into the + * doorbell register + */ +static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count) +{ + struct bnx2i_5771x_sq_rq_db *sq_db; + struct bnx2i_endpoint *ep = bnx2i_conn->ep; + + ep->num_active_cmds++; + wmb(); /* flush SQ WQE memory before the doorbell is rung */ + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { + sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt; + sq_db->prod_idx = ep->qp.sq_prod_idx; + bnx2i_ring_577xx_doorbell(bnx2i_conn); + } else + writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL); + + mmiowb(); /* flush posted PCI writes */ +} + + +/** + * bnx2i_ring_dbell_update_sq_params - update SQ driver parameters + * @conn: iscsi connection to which new SQ entries belong + * @count: number of SQ WQEs to post + * + * this routine will update SQ driver parameters and ring the doorbell + */ +static void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *bnx2i_conn, + int count) +{ + int tmp_cnt; + + if (count == 1) { + if (bnx2i_conn->ep->qp.sq_prod_qe == + bnx2i_conn->ep->qp.sq_last_qe) + bnx2i_conn->ep->qp.sq_prod_qe = + bnx2i_conn->ep->qp.sq_first_qe; + else + bnx2i_conn->ep->qp.sq_prod_qe++; + } else { + if ((bnx2i_conn->ep->qp.sq_prod_qe + count) <= + bnx2i_conn->ep->qp.sq_last_qe) + bnx2i_conn->ep->qp.sq_prod_qe += count; + else { + tmp_cnt = bnx2i_conn->ep->qp.sq_last_qe - + bnx2i_conn->ep->qp.sq_prod_qe; + bnx2i_conn->ep->qp.sq_prod_qe = + &bnx2i_conn->ep->qp.sq_first_qe[count - + (tmp_cnt + 1)]; + } + } + bnx2i_conn->ep->qp.sq_prod_idx += count; + /* Ring the doorbell */ + bnx2i_ring_sq_dbell(bnx2i_conn, bnx2i_conn->ep->qp.sq_prod_idx); +} + + +/** + * bnx2i_send_iscsi_login - post iSCSI login request MP WQE to hardware + * @conn: iscsi connection + * @cmd: driver command structure which is requesting + * a WQE to sent to chip for further processing + * + * prepare and post an iSCSI Login request WQE to CNIC firmware + */ +int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn, + struct iscsi_task *task) +{ + struct bnx2i_cmd *bnx2i_cmd; + struct bnx2i_login_request *login_wqe; + struct iscsi_login *login_hdr; + u32 dword; + + bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data; + login_hdr = (struct iscsi_login *)task->hdr; + login_wqe = (struct bnx2i_login_request *) + bnx2i_conn->ep->qp.sq_prod_qe; + + login_wqe->op_code = login_hdr->opcode; + login_wqe->op_attr = login_hdr->flags; + login_wqe->version_max = login_hdr->max_version; + login_wqe->version_min = login_hdr->min_version; + login_wqe->data_length = ntoh24(login_hdr->dlength); + login_wqe->isid_lo = *((u32 *) login_hdr->isid); + login_wqe->isid_hi = *((u16 *) login_hdr->isid + 2); + login_wqe->tsih = login_hdr->tsih; + login_wqe->itt = task->itt | + (ISCSI_TASK_TYPE_MPATH << ISCSI_LOGIN_REQUEST_TYPE_SHIFT); + login_wqe->cid = login_hdr->cid; + + login_wqe->cmd_sn = be32_to_cpu(login_hdr->cmdsn); + login_wqe->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn); + + login_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma; + login_wqe->resp_bd_list_addr_hi = + (u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32); + + dword = ((1 << ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT) | + (bnx2i_conn->gen_pdu.resp_buf_size << + ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT)); + login_wqe->resp_buffer = dword; + login_wqe->flags = 0; + login_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma; + login_wqe->bd_list_addr_hi = + (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32); + login_wqe->num_bds = 1; + login_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ + + bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); + return 0; +} + +/** + * bnx2i_send_iscsi_tmf - post iSCSI task management request MP WQE to hardware + * @conn: iscsi connection + * @mtask: driver command structure which is requesting + * a WQE to sent to chip for further processing + * + * prepare and post an iSCSI Login request WQE to CNIC firmware + */ +int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn, + struct iscsi_task *mtask) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct iscsi_tm *tmfabort_hdr; + struct scsi_cmnd *ref_sc; + struct iscsi_task *ctask; + struct bnx2i_cmd *bnx2i_cmd; + struct bnx2i_tmf_request *tmfabort_wqe; + u32 dword; + + bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data; + tmfabort_hdr = (struct iscsi_tm *)mtask->hdr; + tmfabort_wqe = (struct bnx2i_tmf_request *) + bnx2i_conn->ep->qp.sq_prod_qe; + + tmfabort_wqe->op_code = tmfabort_hdr->opcode; + tmfabort_wqe->op_attr = 0; + tmfabort_wqe->op_attr = + ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK; + tmfabort_wqe->lun[0] = be32_to_cpu(tmfabort_hdr->lun[0]); + tmfabort_wqe->lun[1] = be32_to_cpu(tmfabort_hdr->lun[1]); + + tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14)); + tmfabort_wqe->reserved2 = 0; + tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn); + + ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt); + if (!ctask || ctask->sc) + /* + * the iscsi layer must have completed the cmd while this + * was starting up. + */ + return 0; + ref_sc = ctask->sc; + + if (ref_sc->sc_data_direction == DMA_TO_DEVICE) + dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT); + else + dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT); + tmfabort_wqe->ref_itt = (dword | tmfabort_hdr->rtt); + tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn); + + tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; + tmfabort_wqe->bd_list_addr_hi = (u32) + ((u64) bnx2i_conn->hba->mp_bd_dma >> 32); + tmfabort_wqe->num_bds = 1; + tmfabort_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ + + bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); + return 0; +} + +/** + * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware + * @conn: iscsi connection + * @cmd: driver command structure which is requesting + * a WQE to sent to chip for further processing + * + * prepare and post an iSCSI SCSI-CMD request WQE to CNIC firmware + */ +int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn, + struct bnx2i_cmd *cmd) +{ + struct bnx2i_cmd_request *scsi_cmd_wqe; + + scsi_cmd_wqe = (struct bnx2i_cmd_request *) + bnx2i_conn->ep->qp.sq_prod_qe; + memcpy(scsi_cmd_wqe, &cmd->req, sizeof(struct bnx2i_cmd_request)); + scsi_cmd_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ + + bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); + return 0; +} + +/** + * bnx2i_send_iscsi_nopout - post iSCSI NOPOUT request WQE to hardware + * @conn: iscsi connection + * @cmd: driver command structure which is requesting + * a WQE to sent to chip for further processing + * @ttt: TTT to be used when building pdu header + * @datap: payload buffer pointer + * @data_len: payload data length + * @unsol: indicated whether nopout pdu is unsolicited pdu or + * in response to target's NOPIN w/ TTT != FFFFFFFF + * + * prepare and post a nopout request WQE to CNIC firmware + */ +int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn, + struct iscsi_task *task, u32 ttt, + char *datap, int data_len, int unsol) +{ + struct bnx2i_endpoint *ep = bnx2i_conn->ep; + struct bnx2i_cmd *bnx2i_cmd; + struct bnx2i_nop_out_request *nopout_wqe; + struct iscsi_nopout *nopout_hdr; + + bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data; + nopout_hdr = (struct iscsi_nopout *)task->hdr; + nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe; + nopout_wqe->op_code = nopout_hdr->opcode; + nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL; + memcpy(nopout_wqe->lun, nopout_hdr->lun, 8); + + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { + u32 tmp = nopout_hdr->lun[0]; + /* 57710 requires LUN field to be swapped */ + nopout_hdr->lun[0] = nopout_hdr->lun[1]; + nopout_hdr->lun[1] = tmp; + } + + nopout_wqe->itt = ((u16)task->itt | + (ISCSI_TASK_TYPE_MPATH << + ISCSI_TMF_REQUEST_TYPE_SHIFT)); + nopout_wqe->ttt = ttt; + nopout_wqe->flags = 0; + if (!unsol) + nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION; + else if (nopout_hdr->itt == RESERVED_ITT) + nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION; + + nopout_wqe->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn); + nopout_wqe->data_length = data_len; + if (data_len) { + /* handle payload data, not required in first release */ + printk(KERN_ALERT "NOPOUT: WARNING!! payload len != 0\n"); + } else { + nopout_wqe->bd_list_addr_lo = (u32) + bnx2i_conn->hba->mp_bd_dma; + nopout_wqe->bd_list_addr_hi = + (u32) ((u64) bnx2i_conn->hba->mp_bd_dma >> 32); + nopout_wqe->num_bds = 1; + } + nopout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ + + bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); + return 0; +} + + +/** + * bnx2i_send_iscsi_logout - post iSCSI logout request WQE to hardware + * @conn: iscsi connection + * @cmd: driver command structure which is requesting + * a WQE to sent to chip for further processing + * + * prepare and post logout request WQE to CNIC firmware + */ +int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn, + struct iscsi_task *task) +{ + struct bnx2i_cmd *bnx2i_cmd; + struct bnx2i_logout_request *logout_wqe; + struct iscsi_logout *logout_hdr; + + bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data; + logout_hdr = (struct iscsi_logout *)task->hdr; + + logout_wqe = (struct bnx2i_logout_request *) + bnx2i_conn->ep->qp.sq_prod_qe; + memset(logout_wqe, 0x00, sizeof(struct bnx2i_logout_request)); + + logout_wqe->op_code = logout_hdr->opcode; + logout_wqe->cmd_sn = be32_to_cpu(logout_hdr->cmdsn); + logout_wqe->op_attr = + logout_hdr->flags | ISCSI_LOGOUT_REQUEST_ALWAYS_ONE; + logout_wqe->itt = ((u16)task->itt | + (ISCSI_TASK_TYPE_MPATH << + ISCSI_LOGOUT_REQUEST_TYPE_SHIFT)); + logout_wqe->data_length = 0; + logout_wqe->cid = 0; + + logout_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; + logout_wqe->bd_list_addr_hi = (u32) + ((u64) bnx2i_conn->hba->mp_bd_dma >> 32); + logout_wqe->num_bds = 1; + logout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ + + bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); + return 0; +} + + +/** + * bnx2i_update_iscsi_conn - post iSCSI logout request WQE to hardware + * @conn: iscsi connection which requires iscsi parameter update + * + * sends down iSCSI Conn Update request to move iSCSI conn to FFP + */ +void bnx2i_update_iscsi_conn(struct iscsi_conn *conn) +{ + struct bnx2i_conn *bnx2i_conn = conn->dd_data; + struct bnx2i_hba *hba = bnx2i_conn->hba; + struct kwqe *kwqe_arr[2]; + struct iscsi_kwqe_conn_update *update_wqe; + struct iscsi_kwqe_conn_update conn_update_kwqe; + + update_wqe = &conn_update_kwqe; + + update_wqe->hdr.op_code = ISCSI_KWQE_OPCODE_UPDATE_CONN; + update_wqe->hdr.flags = + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); + + /* 5771x requires conn context id to be passed as is */ + if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_conn->ep->hba->cnic_dev_type)) + update_wqe->context_id = bnx2i_conn->ep->ep_cid; + else + update_wqe->context_id = (bnx2i_conn->ep->ep_cid >> 7); + update_wqe->conn_flags = 0; + if (conn->hdrdgst_en) + update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST; + if (conn->datadgst_en) + update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST; + if (conn->session->initial_r2t_en) + update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T; + if (conn->session->imm_data_en) + update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA; + + update_wqe->max_send_pdu_length = conn->max_xmit_dlength; + update_wqe->max_recv_pdu_length = conn->max_recv_dlength; + update_wqe->first_burst_length = conn->session->first_burst; + update_wqe->max_burst_length = conn->session->max_burst; + update_wqe->exp_stat_sn = conn->exp_statsn; + update_wqe->max_outstanding_r2ts = conn->session->max_r2t; + update_wqe->session_error_recovery_level = conn->session->erl; + iscsi_conn_printk(KERN_ALERT, conn, + "bnx2i: conn update - MBL 0x%x FBL 0x%x" + "MRDSL_I 0x%x MRDSL_T 0x%x \n", + update_wqe->max_burst_length, + update_wqe->first_burst_length, + update_wqe->max_recv_pdu_length, + update_wqe->max_send_pdu_length); + + kwqe_arr[0] = (struct kwqe *) update_wqe; + if (hba->cnic && hba->cnic->submit_kwqes) + hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1); +} + + +/** + * bnx2i_ep_ofld_timer - post iSCSI logout request WQE to hardware + * @data: endpoint (transport handle) structure pointer + * + * routine to handle connection offload/destroy request timeout + */ +void bnx2i_ep_ofld_timer(unsigned long data) +{ + struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) data; + + if (ep->state == EP_STATE_OFLD_START) { + printk(KERN_ALERT "ofld_timer: CONN_OFLD timeout\n"); + ep->state = EP_STATE_OFLD_FAILED; + } else if (ep->state == EP_STATE_DISCONN_START) { + printk(KERN_ALERT "ofld_timer: CONN_DISCON timeout\n"); + ep->state = EP_STATE_DISCONN_TIMEDOUT; + } else if (ep->state == EP_STATE_CLEANUP_START) { + printk(KERN_ALERT "ofld_timer: CONN_CLEANUP timeout\n"); + ep->state = EP_STATE_CLEANUP_FAILED; + } + + wake_up_interruptible(&ep->ofld_wait); +} + + +static int bnx2i_power_of2(u32 val) +{ + u32 power = 0; + if (val & (val - 1)) + return power; + val--; + while (val) { + val = val >> 1; + power++; + } + return power; +} + + +/** + * bnx2i_send_cmd_cleanup_req - send iscsi cmd context clean-up request + * @hba: adapter structure pointer + * @cmd: driver command structure which is requesting + * a WQE to sent to chip for further processing + * + * prepares and posts CONN_OFLD_REQ1/2 KWQE + */ +void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd) +{ + struct bnx2i_cleanup_request *cmd_cleanup; + + cmd_cleanup = + (struct bnx2i_cleanup_request *)cmd->conn->ep->qp.sq_prod_qe; + memset(cmd_cleanup, 0x00, sizeof(struct bnx2i_cleanup_request)); + + cmd_cleanup->op_code = ISCSI_OPCODE_CLEANUP_REQUEST; + cmd_cleanup->itt = cmd->req.itt; + cmd_cleanup->cq_index = 0; /* CQ# used for completion, 5771x only */ + + bnx2i_ring_dbell_update_sq_params(cmd->conn, 1); +} + + +/** + * bnx2i_send_conn_destroy - initiates iscsi connection teardown process + * @hba: adapter structure pointer + * @ep: endpoint (transport indentifier) structure + * + * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate + * iscsi connection context clean-up process + */ +void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) +{ + struct kwqe *kwqe_arr[2]; + struct iscsi_kwqe_conn_destroy conn_cleanup; + + memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy)); + + conn_cleanup.hdr.op_code = ISCSI_KWQE_OPCODE_DESTROY_CONN; + conn_cleanup.hdr.flags = + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); + /* 5771x requires conn context id to be passed as is */ + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) + conn_cleanup.context_id = ep->ep_cid; + else + conn_cleanup.context_id = (ep->ep_cid >> 7); + + conn_cleanup.reserved0 = (u16)ep->ep_iscsi_cid; + + kwqe_arr[0] = (struct kwqe *) &conn_cleanup; + if (hba->cnic && hba->cnic->submit_kwqes) + hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1); +} + + +/** + * bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process + * @hba: adapter structure pointer + * @ep: endpoint (transport indentifier) structure + * + * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE + */ +static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep) +{ + struct kwqe *kwqe_arr[2]; + struct iscsi_kwqe_conn_offload1 ofld_req1; + struct iscsi_kwqe_conn_offload2 ofld_req2; + dma_addr_t dma_addr; + int num_kwqes = 2; + u32 *ptbl; + + ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1; + ofld_req1.hdr.flags = + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); + + ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid; + + dma_addr = ep->qp.sq_pgtbl_phys; + ofld_req1.sq_page_table_addr_lo = (u32) dma_addr; + ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); + + dma_addr = ep->qp.cq_pgtbl_phys; + ofld_req1.cq_page_table_addr_lo = (u32) dma_addr; + ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); + + ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2; + ofld_req2.hdr.flags = + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); + + dma_addr = ep->qp.rq_pgtbl_phys; + ofld_req2.rq_page_table_addr_lo = (u32) dma_addr; + ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); + + ptbl = (u32 *) ep->qp.sq_pgtbl_virt; + + ofld_req2.sq_first_pte.hi = *ptbl++; + ofld_req2.sq_first_pte.lo = *ptbl; + + ptbl = (u32 *) ep->qp.cq_pgtbl_virt; + ofld_req2.cq_first_pte.hi = *ptbl++; + ofld_req2.cq_first_pte.lo = *ptbl; + + kwqe_arr[0] = (struct kwqe *) &ofld_req1; + kwqe_arr[1] = (struct kwqe *) &ofld_req2; + ofld_req2.num_additional_wqes = 0; + + if (hba->cnic && hba->cnic->submit_kwqes) + hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); +} + + +/** + * bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation + * @hba: adapter structure pointer + * @ep: endpoint (transport indentifier) structure + * + * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE + */ +static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep) +{ + struct kwqe *kwqe_arr[5]; + struct iscsi_kwqe_conn_offload1 ofld_req1; + struct iscsi_kwqe_conn_offload2 ofld_req2; + struct iscsi_kwqe_conn_offload3 ofld_req3[1]; + dma_addr_t dma_addr; + int num_kwqes = 2; + u32 *ptbl; + + ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1; + ofld_req1.hdr.flags = + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); + + ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid; + + dma_addr = ep->qp.sq_pgtbl_phys + ISCSI_SQ_DB_SIZE; + ofld_req1.sq_page_table_addr_lo = (u32) dma_addr; + ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); + + dma_addr = ep->qp.cq_pgtbl_phys + ISCSI_CQ_DB_SIZE; + ofld_req1.cq_page_table_addr_lo = (u32) dma_addr; + ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); + + ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2; + ofld_req2.hdr.flags = + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); + + dma_addr = ep->qp.rq_pgtbl_phys + ISCSI_RQ_DB_SIZE; + ofld_req2.rq_page_table_addr_lo = (u32) dma_addr; + ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); + + ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE); + ofld_req2.sq_first_pte.hi = *ptbl++; + ofld_req2.sq_first_pte.lo = *ptbl; + + ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE); + ofld_req2.cq_first_pte.hi = *ptbl++; + ofld_req2.cq_first_pte.lo = *ptbl; + + kwqe_arr[0] = (struct kwqe *) &ofld_req1; + kwqe_arr[1] = (struct kwqe *) &ofld_req2; + + ofld_req2.num_additional_wqes = 1; + memset(ofld_req3, 0x00, sizeof(ofld_req3[0])); + ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE); + ofld_req3[0].qp_first_pte[0].hi = *ptbl++; + ofld_req3[0].qp_first_pte[0].lo = *ptbl; + + kwqe_arr[2] = (struct kwqe *) ofld_req3; + /* need if we decide to go with multiple KCQE's per conn */ + num_kwqes += 1; + + if (hba->cnic && hba->cnic->submit_kwqes) + hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); +} + +/** + * bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process + * + * @hba: adapter structure pointer + * @ep: endpoint (transport indentifier) structure + * + * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE + */ +void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) +{ + if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) + bnx2i_5771x_send_conn_ofld_req(hba, ep); + else + bnx2i_570x_send_conn_ofld_req(hba, ep); +} + + +/** + * setup_qp_page_tables - iscsi QP page table setup function + * @ep: endpoint (transport indentifier) structure + * + * Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires + * 64-bit address in big endian format. Whereas 10G/sec (57710) requires + * PT in little endian format + */ +static void setup_qp_page_tables(struct bnx2i_endpoint *ep) +{ + int num_pages; + u32 *ptbl; + dma_addr_t page; + int cnic_dev_10g; + + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) + cnic_dev_10g = 1; + else + cnic_dev_10g = 0; + + /* SQ page table */ + memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size); + num_pages = ep->qp.sq_mem_size / PAGE_SIZE; + page = ep->qp.sq_phys; + + if (cnic_dev_10g) + ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE); + else + ptbl = (u32 *) ep->qp.sq_pgtbl_virt; + while (num_pages--) { + if (cnic_dev_10g) { + /* PTE is written in little endian format for 57710 */ + *ptbl = (u32) page; + ptbl++; + *ptbl = (u32) ((u64) page >> 32); + ptbl++; + page += PAGE_SIZE; + } else { + /* PTE is written in big endian format for + * 5706/5708/5709 devices */ + *ptbl = (u32) ((u64) page >> 32); + ptbl++; + *ptbl = (u32) page; + ptbl++; + page += PAGE_SIZE; + } + } + + /* RQ page table */ + memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size); + num_pages = ep->qp.rq_mem_size / PAGE_SIZE; + page = ep->qp.rq_phys; + + if (cnic_dev_10g) + ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE); + else + ptbl = (u32 *) ep->qp.rq_pgtbl_virt; + while (num_pages--) { + if (cnic_dev_10g) { + /* PTE is written in little endian format for 57710 */ + *ptbl = (u32) page; + ptbl++; + *ptbl = (u32) ((u64) page >> 32); + ptbl++; + page += PAGE_SIZE; + } else { + /* PTE is written in big endian format for + * 5706/5708/5709 devices */ + *ptbl = (u32) ((u64) page >> 32); + ptbl++; + *ptbl = (u32) page; + ptbl++; + page += PAGE_SIZE; + } + } + + /* CQ page table */ + memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size); + num_pages = ep->qp.cq_mem_size / PAGE_SIZE; + page = ep->qp.cq_phys; + + if (cnic_dev_10g) + ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE); + else + ptbl = (u32 *) ep->qp.cq_pgtbl_virt; + while (num_pages--) { + if (cnic_dev_10g) { + /* PTE is written in little endian format for 57710 */ + *ptbl = (u32) page; + ptbl++; + *ptbl = (u32) ((u64) page >> 32); + ptbl++; + page += PAGE_SIZE; + } else { + /* PTE is written in big endian format for + * 5706/5708/5709 devices */ + *ptbl = (u32) ((u64) page >> 32); + ptbl++; + *ptbl = (u32) page; + ptbl++; + page += PAGE_SIZE; + } + } +} + + +/** + * bnx2i_alloc_qp_resc - allocates required resources for QP. + * @hba: adapter structure pointer + * @ep: endpoint (transport indentifier) structure + * + * Allocate QP (transport layer for iSCSI connection) resources, DMA'able + * memory for SQ/RQ/CQ and page tables. EP structure elements such + * as producer/consumer indexes/pointers, queue sizes and page table + * contents are setup + */ +int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) +{ + struct bnx2i_5771x_cq_db *cq_db; + + ep->hba = hba; + ep->conn = NULL; + ep->ep_cid = ep->ep_iscsi_cid = ep->ep_pg_cid = 0; + + /* Allocate page table memory for SQ which is page aligned */ + ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE; + ep->qp.sq_mem_size = + (ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + ep->qp.sq_pgtbl_size = + (ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *); + ep->qp.sq_pgtbl_size = + (ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; + + ep->qp.sq_pgtbl_virt = + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, + &ep->qp.sq_pgtbl_phys, GFP_KERNEL); + if (!ep->qp.sq_pgtbl_virt) { + printk(KERN_ALERT "bnx2i: unable to alloc SQ PT mem (%d)\n", + ep->qp.sq_pgtbl_size); + goto mem_alloc_err; + } + + /* Allocate memory area for actual SQ element */ + ep->qp.sq_virt = + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, + &ep->qp.sq_phys, GFP_KERNEL); + if (!ep->qp.sq_virt) { + printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n", + ep->qp.sq_mem_size); + goto mem_alloc_err; + } + + memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size); + ep->qp.sq_first_qe = ep->qp.sq_virt; + ep->qp.sq_prod_qe = ep->qp.sq_first_qe; + ep->qp.sq_cons_qe = ep->qp.sq_first_qe; + ep->qp.sq_last_qe = &ep->qp.sq_first_qe[hba->max_sqes - 1]; + ep->qp.sq_prod_idx = 0; + ep->qp.sq_cons_idx = 0; + ep->qp.sqe_left = hba->max_sqes; + + /* Allocate page table memory for CQ which is page aligned */ + ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE; + ep->qp.cq_mem_size = + (ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + ep->qp.cq_pgtbl_size = + (ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *); + ep->qp.cq_pgtbl_size = + (ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; + + ep->qp.cq_pgtbl_virt = + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, + &ep->qp.cq_pgtbl_phys, GFP_KERNEL); + if (!ep->qp.cq_pgtbl_virt) { + printk(KERN_ALERT "bnx2i: unable to alloc CQ PT memory %d\n", + ep->qp.cq_pgtbl_size); + goto mem_alloc_err; + } + + /* Allocate memory area for actual CQ element */ + ep->qp.cq_virt = + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, + &ep->qp.cq_phys, GFP_KERNEL); + if (!ep->qp.cq_virt) { + printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n", + ep->qp.cq_mem_size); + goto mem_alloc_err; + } + memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size); + + ep->qp.cq_first_qe = ep->qp.cq_virt; + ep->qp.cq_prod_qe = ep->qp.cq_first_qe; + ep->qp.cq_cons_qe = ep->qp.cq_first_qe; + ep->qp.cq_last_qe = &ep->qp.cq_first_qe[hba->max_cqes - 1]; + ep->qp.cq_prod_idx = 0; + ep->qp.cq_cons_idx = 0; + ep->qp.cqe_left = hba->max_cqes; + ep->qp.cqe_exp_seq_sn = ISCSI_INITIAL_SN; + ep->qp.cqe_size = hba->max_cqes; + + /* Invalidate all EQ CQE index, req only for 57710 */ + cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt; + memset(cq_db->sqn, 0xFF, sizeof(cq_db->sqn[0]) * BNX2X_MAX_CQS); + + /* Allocate page table memory for RQ which is page aligned */ + ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE; + ep->qp.rq_mem_size = + (ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + ep->qp.rq_pgtbl_size = + (ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *); + ep->qp.rq_pgtbl_size = + (ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; + + ep->qp.rq_pgtbl_virt = + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, + &ep->qp.rq_pgtbl_phys, GFP_KERNEL); + if (!ep->qp.rq_pgtbl_virt) { + printk(KERN_ALERT "bnx2i: unable to alloc RQ PT mem %d\n", + ep->qp.rq_pgtbl_size); + goto mem_alloc_err; + } + + /* Allocate memory area for actual RQ element */ + ep->qp.rq_virt = + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size, + &ep->qp.rq_phys, GFP_KERNEL); + if (!ep->qp.rq_virt) { + printk(KERN_ALERT "bnx2i: unable to alloc RQ BD memory %d\n", + ep->qp.rq_mem_size); + goto mem_alloc_err; + } + + ep->qp.rq_first_qe = ep->qp.rq_virt; + ep->qp.rq_prod_qe = ep->qp.rq_first_qe; + ep->qp.rq_cons_qe = ep->qp.rq_first_qe; + ep->qp.rq_last_qe = &ep->qp.rq_first_qe[hba->max_rqes - 1]; + ep->qp.rq_prod_idx = 0x8000; + ep->qp.rq_cons_idx = 0; + ep->qp.rqe_left = hba->max_rqes; + + setup_qp_page_tables(ep); + + return 0; + +mem_alloc_err: + bnx2i_free_qp_resc(hba, ep); + return -ENOMEM; +} + + + +/** + * bnx2i_free_qp_resc - free memory resources held by QP + * @hba: adapter structure pointer + * @ep: endpoint (transport indentifier) structure + * + * Free QP resources - SQ/RQ/CQ memory and page tables. + */ +void bnx2i_free_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) +{ + if (ep->qp.ctx_base) { + iounmap(ep->qp.ctx_base); + ep->qp.ctx_base = NULL; + } + /* Free SQ mem */ + if (ep->qp.sq_pgtbl_virt) { + dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, + ep->qp.sq_pgtbl_virt, ep->qp.sq_pgtbl_phys); + ep->qp.sq_pgtbl_virt = NULL; + ep->qp.sq_pgtbl_phys = 0; + } + if (ep->qp.sq_virt) { + dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, + ep->qp.sq_virt, ep->qp.sq_phys); + ep->qp.sq_virt = NULL; + ep->qp.sq_phys = 0; + } + + /* Free RQ mem */ + if (ep->qp.rq_pgtbl_virt) { + dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, + ep->qp.rq_pgtbl_virt, ep->qp.rq_pgtbl_phys); + ep->qp.rq_pgtbl_virt = NULL; + ep->qp.rq_pgtbl_phys = 0; + } + if (ep->qp.rq_virt) { + dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size, + ep->qp.rq_virt, ep->qp.rq_phys); + ep->qp.rq_virt = NULL; + ep->qp.rq_phys = 0; + } + + /* Free CQ mem */ + if (ep->qp.cq_pgtbl_virt) { + dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, + ep->qp.cq_pgtbl_virt, ep->qp.cq_pgtbl_phys); + ep->qp.cq_pgtbl_virt = NULL; + ep->qp.cq_pgtbl_phys = 0; + } + if (ep->qp.cq_virt) { + dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, + ep->qp.cq_virt, ep->qp.cq_phys); + ep->qp.cq_virt = NULL; + ep->qp.cq_phys = 0; + } +} + + +/** + * bnx2i_send_fw_iscsi_init_msg - initiates initial handshake with iscsi f/w + * @hba: adapter structure pointer + * + * Send down iscsi_init KWQEs which initiates the initial handshake with the f/w + * This results in iSCSi support validation and on-chip context manager + * initialization. Firmware completes this handshake with a CQE carrying + * the result of iscsi support validation. Parameter carried by + * iscsi init request determines the number of offloaded connection and + * tolerance level for iscsi protocol violation this hba/chip can support + */ +int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba) +{ + struct kwqe *kwqe_arr[3]; + struct iscsi_kwqe_init1 iscsi_init; + struct iscsi_kwqe_init2 iscsi_init2; + int rc = 0; + u64 mask64; + + bnx2i_adjust_qp_size(hba); + + iscsi_init.flags = + ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT; + if (en_tcp_dack) + iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE; + iscsi_init.reserved0 = 0; + iscsi_init.num_cqs = 1; + iscsi_init.hdr.op_code = ISCSI_KWQE_OPCODE_INIT1; + iscsi_init.hdr.flags = + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); + + iscsi_init.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma; + iscsi_init.dummy_buffer_addr_hi = + (u32) ((u64) hba->dummy_buf_dma >> 32); + + hba->ctx_ccell_tasks = + ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16)); + iscsi_init.num_ccells_per_conn = hba->num_ccell; + iscsi_init.num_tasks_per_conn = hba->max_sqes; + iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE; + iscsi_init.sq_num_wqes = hba->max_sqes; + iscsi_init.cq_log_wqes_per_page = + (u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE); + iscsi_init.cq_num_wqes = hba->max_cqes; + iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE + + (PAGE_SIZE - 1)) / PAGE_SIZE; + iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE + + (PAGE_SIZE - 1)) / PAGE_SIZE; + iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE; + iscsi_init.rq_num_wqes = hba->max_rqes; + + + iscsi_init2.hdr.op_code = ISCSI_KWQE_OPCODE_INIT2; + iscsi_init2.hdr.flags = + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); + iscsi_init2.max_cq_sqn = hba->max_cqes * 2 + 1; + mask64 = 0x0ULL; + mask64 |= ( + /* CISCO MDS */ + (1UL << + ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV) | + /* HP MSA1510i */ + (1UL << + ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) | + /* EMC */ + (1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN)); + if (error_mask1) + iscsi_init2.error_bit_map[0] = error_mask1; + else + iscsi_init2.error_bit_map[0] = (u32) mask64; + + if (error_mask2) + iscsi_init2.error_bit_map[1] = error_mask2; + else + iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32); + + iscsi_error_mask = mask64; + + kwqe_arr[0] = (struct kwqe *) &iscsi_init; + kwqe_arr[1] = (struct kwqe *) &iscsi_init2; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 2); + return rc; +} + + +/** + * bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion. + * @conn: iscsi connection + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process SCSI CMD Response CQE & complete the request to SCSI-ML + */ +static int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct bnx2i_cmd_response *resp_cqe; + struct bnx2i_cmd *bnx2i_cmd; + struct iscsi_task *task; + struct iscsi_cmd_rsp *hdr; + u32 datalen = 0; + + resp_cqe = (struct bnx2i_cmd_response *)cqe; + spin_lock(&session->lock); + task = iscsi_itt_to_task(conn, + resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX); + if (!task) + goto fail; + + bnx2i_cmd = task->dd_data; + + if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) { + conn->datain_pdus_cnt += + resp_cqe->task_stat.read_stat.num_data_outs; + conn->rxdata_octets += + bnx2i_cmd->req.total_data_transfer_length; + } else { + conn->dataout_pdus_cnt += + resp_cqe->task_stat.read_stat.num_data_outs; + conn->r2t_pdus_cnt += + resp_cqe->task_stat.read_stat.num_r2ts; + conn->txdata_octets += + bnx2i_cmd->req.total_data_transfer_length; + } + bnx2i_iscsi_unmap_sg_list(bnx2i_cmd); + + hdr = (struct iscsi_cmd_rsp *)task->hdr; + resp_cqe = (struct bnx2i_cmd_response *)cqe; + hdr->opcode = resp_cqe->op_code; + hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn); + hdr->exp_cmdsn = cpu_to_be32(resp_cqe->exp_cmd_sn); + hdr->response = resp_cqe->response; + hdr->cmd_status = resp_cqe->status; + hdr->flags = resp_cqe->response_flags; + hdr->residual_count = cpu_to_be32(resp_cqe->residual_count); + + if (resp_cqe->op_code == ISCSI_OP_SCSI_DATA_IN) + goto done; + + if (resp_cqe->status == SAM_STAT_CHECK_CONDITION) { + datalen = resp_cqe->data_length; + if (datalen < 2) + goto done; + + if (datalen > BNX2I_RQ_WQE_SIZE) { + iscsi_conn_printk(KERN_ERR, conn, + "sense data len %d > RQ sz\n", + datalen); + datalen = BNX2I_RQ_WQE_SIZE; + } else if (datalen > ISCSI_DEF_MAX_RECV_SEG_LEN) { + iscsi_conn_printk(KERN_ERR, conn, + "sense data len %d > conn data\n", + datalen); + datalen = ISCSI_DEF_MAX_RECV_SEG_LEN; + } + + bnx2i_get_rq_buf(bnx2i_cmd->conn, conn->data, datalen); + bnx2i_put_rq_buf(bnx2i_cmd->conn, 1); + } + +done: + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, + conn->data, datalen); +fail: + spin_unlock(&session->lock); + return 0; +} + + +/** + * bnx2i_process_login_resp - this function handles iscsi login response + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process Login Response CQE & complete it to open-iscsi user daemon + */ +static int bnx2i_process_login_resp(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct iscsi_task *task; + struct bnx2i_login_response *login; + struct iscsi_login_rsp *resp_hdr; + int pld_len; + int pad_len; + + login = (struct bnx2i_login_response *) cqe; + spin_lock(&session->lock); + task = iscsi_itt_to_task(conn, + login->itt & ISCSI_LOGIN_RESPONSE_INDEX); + if (!task) + goto done; + + resp_hdr = (struct iscsi_login_rsp *) &bnx2i_conn->gen_pdu.resp_hdr; + memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); + resp_hdr->opcode = login->op_code; + resp_hdr->flags = login->response_flags; + resp_hdr->max_version = login->version_max; + resp_hdr->active_version = login->version_active;; + resp_hdr->hlength = 0; + + hton24(resp_hdr->dlength, login->data_length); + memcpy(resp_hdr->isid, &login->isid_lo, 6); + resp_hdr->tsih = cpu_to_be16(login->tsih); + resp_hdr->itt = task->hdr->itt; + resp_hdr->statsn = cpu_to_be32(login->stat_sn); + resp_hdr->exp_cmdsn = cpu_to_be32(login->exp_cmd_sn); + resp_hdr->max_cmdsn = cpu_to_be32(login->max_cmd_sn); + resp_hdr->status_class = login->status_class; + resp_hdr->status_detail = login->status_detail; + pld_len = login->data_length; + bnx2i_conn->gen_pdu.resp_wr_ptr = + bnx2i_conn->gen_pdu.resp_buf + pld_len; + + pad_len = 0; + if (pld_len & 0x3) + pad_len = 4 - (pld_len % 4); + + if (pad_len) { + int i = 0; + for (i = 0; i < pad_len; i++) { + bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0; + bnx2i_conn->gen_pdu.resp_wr_ptr++; + } + } + + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, + bnx2i_conn->gen_pdu.resp_buf, + bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf); +done: + spin_unlock(&session->lock); + return 0; +} + +/** + * bnx2i_process_tmf_resp - this function handles iscsi TMF response + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process iSCSI TMF Response CQE and wake up the driver eh thread. + */ +static int bnx2i_process_tmf_resp(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct iscsi_task *task; + struct bnx2i_tmf_response *tmf_cqe; + struct iscsi_tm_rsp *resp_hdr; + + tmf_cqe = (struct bnx2i_tmf_response *)cqe; + spin_lock(&session->lock); + task = iscsi_itt_to_task(conn, + tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX); + if (!task) + goto done; + + resp_hdr = (struct iscsi_tm_rsp *) &bnx2i_conn->gen_pdu.resp_hdr; + memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); + resp_hdr->opcode = tmf_cqe->op_code; + resp_hdr->max_cmdsn = cpu_to_be32(tmf_cqe->max_cmd_sn); + resp_hdr->exp_cmdsn = cpu_to_be32(tmf_cqe->exp_cmd_sn); + resp_hdr->itt = task->hdr->itt; + resp_hdr->response = tmf_cqe->response; + + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0); +done: + spin_unlock(&session->lock); + return 0; +} + +/** + * bnx2i_process_logout_resp - this function handles iscsi logout response + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process iSCSI Logout Response CQE & make function call to + * notify the user daemon. + */ +static int bnx2i_process_logout_resp(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct iscsi_task *task; + struct bnx2i_logout_response *logout; + struct iscsi_logout_rsp *resp_hdr; + + logout = (struct bnx2i_logout_response *) cqe; + spin_lock(&session->lock); + task = iscsi_itt_to_task(conn, + logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX); + if (!task) + goto done; + + resp_hdr = (struct iscsi_logout_rsp *) &bnx2i_conn->gen_pdu.resp_hdr; + memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); + resp_hdr->opcode = logout->op_code; + resp_hdr->flags = logout->response; + resp_hdr->hlength = 0; + + resp_hdr->itt = task->hdr->itt; + resp_hdr->statsn = task->hdr->exp_statsn; + resp_hdr->exp_cmdsn = cpu_to_be32(logout->exp_cmd_sn); + resp_hdr->max_cmdsn = cpu_to_be32(logout->max_cmd_sn); + + resp_hdr->t2wait = cpu_to_be32(logout->time_to_wait); + resp_hdr->t2retain = cpu_to_be32(logout->time_to_retain); + + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0); +done: + spin_unlock(&session->lock); + return 0; +} + +/** + * bnx2i_process_nopin_local_cmpl - this function handles iscsi nopin CQE + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process iSCSI NOPIN local completion CQE, frees IIT and command structures + */ +static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct bnx2i_nop_in_msg *nop_in; + struct iscsi_task *task; + + nop_in = (struct bnx2i_nop_in_msg *)cqe; + spin_lock(&session->lock); + task = iscsi_itt_to_task(conn, + nop_in->itt & ISCSI_NOP_IN_MSG_INDEX); + if (task) + iscsi_put_task(task); + spin_unlock(&session->lock); +} + +/** + * bnx2i_unsol_pdu_adjust_rq - makes adjustments to RQ after unsol pdu is recvd + * @conn: iscsi connection + * + * Firmware advances RQ producer index for every unsolicited PDU even if + * payload data length is '0'. This function makes corresponding + * adjustments on the driver side to match this f/w behavior + */ +static void bnx2i_unsol_pdu_adjust_rq(struct bnx2i_conn *bnx2i_conn) +{ + char dummy_rq_data[2]; + bnx2i_get_rq_buf(bnx2i_conn, dummy_rq_data, 1); + bnx2i_put_rq_buf(bnx2i_conn, 1); +} + + +/** + * bnx2i_process_nopin_mesg - this function handles iscsi nopin CQE + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process iSCSI target's proactive iSCSI NOPIN request + */ +static int bnx2i_process_nopin_mesg(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct iscsi_task *task; + struct bnx2i_nop_in_msg *nop_in; + struct iscsi_nopin *hdr; + u32 itt; + int tgt_async_nop = 0; + + nop_in = (struct bnx2i_nop_in_msg *)cqe; + itt = nop_in->itt & ISCSI_NOP_IN_MSG_INDEX; + + spin_lock(&session->lock); + hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr; + memset(hdr, 0, sizeof(struct iscsi_hdr)); + hdr->opcode = nop_in->op_code; + hdr->max_cmdsn = cpu_to_be32(nop_in->max_cmd_sn); + hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn); + hdr->ttt = cpu_to_be32(nop_in->ttt); + + if (itt == (u16) RESERVED_ITT) { + bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); + hdr->itt = RESERVED_ITT; + tgt_async_nop = 1; + goto done; + } + + /* this is a response to one of our nop-outs */ + task = iscsi_itt_to_task(conn, itt); + if (task) { + hdr->flags = ISCSI_FLAG_CMD_FINAL; + hdr->itt = task->hdr->itt; + hdr->ttt = cpu_to_be32(nop_in->ttt); + memcpy(hdr->lun, nop_in->lun, 8); + } +done: + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); + spin_unlock(&session->lock); + + return tgt_async_nop; +} + + +/** + * bnx2i_process_async_mesg - this function handles iscsi async message + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process iSCSI ASYNC Message + */ +static void bnx2i_process_async_mesg(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct bnx2i_async_msg *async_cqe; + struct iscsi_async *resp_hdr; + u8 async_event; + + bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); + + async_cqe = (struct bnx2i_async_msg *)cqe; + async_event = async_cqe->async_event; + + if (async_event == ISCSI_ASYNC_MSG_SCSI_EVENT) { + iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, + "async: scsi events not supported\n"); + return; + } + + spin_lock(&session->lock); + resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr; + memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); + resp_hdr->opcode = async_cqe->op_code; + resp_hdr->flags = 0x80; + + memcpy(resp_hdr->lun, async_cqe->lun, 8); + resp_hdr->exp_cmdsn = cpu_to_be32(async_cqe->exp_cmd_sn); + resp_hdr->max_cmdsn = cpu_to_be32(async_cqe->max_cmd_sn); + + resp_hdr->async_event = async_cqe->async_event; + resp_hdr->async_vcode = async_cqe->async_vcode; + + resp_hdr->param1 = cpu_to_be16(async_cqe->param1); + resp_hdr->param2 = cpu_to_be16(async_cqe->param2); + resp_hdr->param3 = cpu_to_be16(async_cqe->param3); + + __iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data, + (struct iscsi_hdr *)resp_hdr, NULL, 0); + spin_unlock(&session->lock); +} + + +/** + * bnx2i_process_reject_mesg - process iscsi reject pdu + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process iSCSI REJECT message + */ +static void bnx2i_process_reject_mesg(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct bnx2i_reject_msg *reject; + struct iscsi_reject *hdr; + + reject = (struct bnx2i_reject_msg *) cqe; + if (reject->data_length) { + bnx2i_get_rq_buf(bnx2i_conn, conn->data, reject->data_length); + bnx2i_put_rq_buf(bnx2i_conn, 1); + } else + bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); + + spin_lock(&session->lock); + hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr; + memset(hdr, 0, sizeof(struct iscsi_hdr)); + hdr->opcode = reject->op_code; + hdr->reason = reject->reason; + hton24(hdr->dlength, reject->data_length); + hdr->max_cmdsn = cpu_to_be32(reject->max_cmd_sn); + hdr->exp_cmdsn = cpu_to_be32(reject->exp_cmd_sn); + hdr->ffffffff = cpu_to_be32(RESERVED_ITT); + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data, + reject->data_length); + spin_unlock(&session->lock); +} + +/** + * bnx2i_process_cmd_cleanup_resp - process scsi command clean-up completion + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process command cleanup response CQE during conn shutdown or error recovery + */ +static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct bnx2i_cleanup_response *cmd_clean_rsp; + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct iscsi_task *task; + + cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe; + spin_lock(&session->lock); + task = iscsi_itt_to_task(conn, + cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX); + if (!task) + printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n", + cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX); + spin_unlock(&session->lock); + complete(&bnx2i_conn->cmd_cleanup_cmpl); +} + + + +/** + * bnx2i_process_new_cqes - process newly DMA'ed CQE's + * @bnx2i_conn: iscsi connection + * + * this function is called by generic KCQ handler to process all pending CQE's + */ +static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct iscsi_session *session = conn->session; + struct qp_info *qp = &bnx2i_conn->ep->qp; + struct bnx2i_nop_in_msg *nopin; + int tgt_async_msg; + + while (1) { + nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe; + if (nopin->cq_req_sn != qp->cqe_exp_seq_sn) + break; + + if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) + break; + + tgt_async_msg = 0; + + switch (nopin->op_code) { + case ISCSI_OP_SCSI_CMD_RSP: + case ISCSI_OP_SCSI_DATA_IN: + bnx2i_process_scsi_cmd_resp(session, bnx2i_conn, + qp->cq_cons_qe); + break; + case ISCSI_OP_LOGIN_RSP: + bnx2i_process_login_resp(session, bnx2i_conn, + qp->cq_cons_qe); + break; + case ISCSI_OP_SCSI_TMFUNC_RSP: + bnx2i_process_tmf_resp(session, bnx2i_conn, + qp->cq_cons_qe); + break; + case ISCSI_OP_LOGOUT_RSP: + bnx2i_process_logout_resp(session, bnx2i_conn, + qp->cq_cons_qe); + break; + case ISCSI_OP_NOOP_IN: + if (bnx2i_process_nopin_mesg(session, bnx2i_conn, + qp->cq_cons_qe)) + tgt_async_msg = 1; + break; + case ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION: + bnx2i_process_nopin_local_cmpl(session, bnx2i_conn, + qp->cq_cons_qe); + break; + case ISCSI_OP_ASYNC_EVENT: + bnx2i_process_async_mesg(session, bnx2i_conn, + qp->cq_cons_qe); + tgt_async_msg = 1; + break; + case ISCSI_OP_REJECT: + bnx2i_process_reject_mesg(session, bnx2i_conn, + qp->cq_cons_qe); + break; + case ISCSI_OPCODE_CLEANUP_RESPONSE: + bnx2i_process_cmd_cleanup_resp(session, bnx2i_conn, + qp->cq_cons_qe); + break; + default: + printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n", + nopin->op_code); + } + + if (!tgt_async_msg) + bnx2i_conn->ep->num_active_cmds--; + + /* clear out in production version only, till beta keep opcode + * field intact, will be helpful in debugging (context dump) + * nopin->op_code = 0; + */ + qp->cqe_exp_seq_sn++; + if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1)) + qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN; + + if (qp->cq_cons_qe == qp->cq_last_qe) { + qp->cq_cons_qe = qp->cq_first_qe; + qp->cq_cons_idx = 0; + } else { + qp->cq_cons_qe++; + qp->cq_cons_idx++; + } + } + bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE); +} + +/** + * bnx2i_fastpath_notification - process global event queue (KCQ) + * @hba: adapter structure pointer + * @new_cqe_kcqe: pointer to newly DMA'ed KCQE entry + * + * Fast path event notification handler, KCQ entry carries context id + * of the connection that has 1 or more pending CQ entries + */ +static void bnx2i_fastpath_notification(struct bnx2i_hba *hba, + struct iscsi_kcqe *new_cqe_kcqe) +{ + struct bnx2i_conn *conn; + u32 iscsi_cid; + + iscsi_cid = new_cqe_kcqe->iscsi_conn_id; + conn = bnx2i_get_conn_from_id(hba, iscsi_cid); + + if (!conn) { + printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid); + return; + } + if (!conn->ep) { + printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid); + return; + } + + bnx2i_process_new_cqes(conn); +} + + +/** + * bnx2i_process_update_conn_cmpl - process iscsi conn update completion KCQE + * @hba: adapter structure pointer + * @update_kcqe: kcqe pointer + * + * CONN_UPDATE completion handler, this completes iSCSI connection FFP migration + */ +static void bnx2i_process_update_conn_cmpl(struct bnx2i_hba *hba, + struct iscsi_kcqe *update_kcqe) +{ + struct bnx2i_conn *conn; + u32 iscsi_cid; + + iscsi_cid = update_kcqe->iscsi_conn_id; + conn = bnx2i_get_conn_from_id(hba, iscsi_cid); + + if (!conn) { + printk(KERN_ALERT "conn_update: cid %x not valid\n", iscsi_cid); + return; + } + if (!conn->ep) { + printk(KERN_ALERT "cid %x does not have ep bound\n", iscsi_cid); + return; + } + + if (update_kcqe->completion_status) { + printk(KERN_ALERT "request failed cid %x\n", iscsi_cid); + conn->ep->state = EP_STATE_ULP_UPDATE_FAILED; + } else + conn->ep->state = EP_STATE_ULP_UPDATE_COMPL; + + wake_up_interruptible(&conn->ep->ofld_wait); +} + + +/** + * bnx2i_recovery_que_add_conn - add connection to recovery queue + * @hba: adapter structure pointer + * @bnx2i_conn: iscsi connection + * + * Add connection to recovery queue and schedule adapter eh worker + */ +static void bnx2i_recovery_que_add_conn(struct bnx2i_hba *hba, + struct bnx2i_conn *bnx2i_conn) +{ + iscsi_conn_failure(bnx2i_conn->cls_conn->dd_data, + ISCSI_ERR_CONN_FAILED); +} + + +/** + * bnx2i_process_tcp_error - process error notification on a given connection + * + * @hba: adapter structure pointer + * @tcp_err: tcp error kcqe pointer + * + * handles tcp level error notifications from FW. + */ +static void bnx2i_process_tcp_error(struct bnx2i_hba *hba, + struct iscsi_kcqe *tcp_err) +{ + struct bnx2i_conn *bnx2i_conn; + u32 iscsi_cid; + + iscsi_cid = tcp_err->iscsi_conn_id; + bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid); + + if (!bnx2i_conn) { + printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid); + return; + } + + printk(KERN_ALERT "bnx2i - cid 0x%x had TCP errors, error code 0x%x\n", + iscsi_cid, tcp_err->completion_status); + bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn); +} + + +/** + * bnx2i_process_iscsi_error - process error notification on a given connection + * @hba: adapter structure pointer + * @iscsi_err: iscsi error kcqe pointer + * + * handles iscsi error notifications from the FW. Firmware based in initial + * handshake classifies iscsi protocol / TCP rfc violation into either + * warning or error indications. If indication is of "Error" type, driver + * will initiate session recovery for that connection/session. For + * "Warning" type indication, driver will put out a system log message + * (there will be only one message for each type for the life of the + * session, this is to avoid un-necessarily overloading the system) + */ +static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba, + struct iscsi_kcqe *iscsi_err) +{ + struct bnx2i_conn *bnx2i_conn; + u32 iscsi_cid; + char warn_notice[] = "iscsi_warning"; + char error_notice[] = "iscsi_error"; + char additional_notice[64]; + char *message; + int need_recovery; + u64 err_mask64; + + iscsi_cid = iscsi_err->iscsi_conn_id; + bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid); + if (!bnx2i_conn) { + printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid); + return; + } + + err_mask64 = (0x1ULL << iscsi_err->completion_status); + + if (err_mask64 & iscsi_error_mask) { + need_recovery = 0; + message = warn_notice; + } else { + need_recovery = 1; + message = error_notice; + } + + switch (iscsi_err->completion_status) { + case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR: + strcpy(additional_notice, "hdr digest err"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR: + strcpy(additional_notice, "data digest err"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE: + strcpy(additional_notice, "wrong opcode rcvd"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN: + strcpy(additional_notice, "AHS len > 0 rcvd"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT: + strcpy(additional_notice, "invalid ITT rcvd"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN: + strcpy(additional_notice, "wrong StatSN rcvd"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN: + strcpy(additional_notice, "wrong DataSN rcvd"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T: + strcpy(additional_notice, "pend R2T violation"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0: + strcpy(additional_notice, "ERL0, UO"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1: + strcpy(additional_notice, "ERL0, U1"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2: + strcpy(additional_notice, "ERL0, U2"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3: + strcpy(additional_notice, "ERL0, U3"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4: + strcpy(additional_notice, "ERL0, U4"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5: + strcpy(additional_notice, "ERL0, U5"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6: + strcpy(additional_notice, "ERL0, U6"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN: + strcpy(additional_notice, "invalid resi len"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN: + strcpy(additional_notice, "MRDSL violation"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO: + strcpy(additional_notice, "F-bit not set"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV: + strcpy(additional_notice, "invalid TTT"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN: + strcpy(additional_notice, "invalid DataSN"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN: + strcpy(additional_notice, "burst len violation"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF: + strcpy(additional_notice, "buf offset violation"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN: + strcpy(additional_notice, "invalid LUN field"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN: + strcpy(additional_notice, "invalid R2TSN field"); + break; +#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0 \ + ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 + case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0: + strcpy(additional_notice, "invalid cmd len1"); + break; +#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1 \ + ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 + case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1: + strcpy(additional_notice, "invalid cmd len2"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED: + strcpy(additional_notice, + "pend r2t exceeds MaxOutstandingR2T value"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV: + strcpy(additional_notice, "TTT is rsvd"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN: + strcpy(additional_notice, "MBL violation"); + break; +#define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO \ + ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO + case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO: + strcpy(additional_notice, "data seg len != 0"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN: + strcpy(additional_notice, "reject pdu len error"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN: + strcpy(additional_notice, "async pdu len error"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN: + strcpy(additional_notice, "nopin pdu len error"); + break; +#define BNX2_ERR_PEND_R2T_IN_CLEANUP \ + ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP + case BNX2_ERR_PEND_R2T_IN_CLEANUP: + strcpy(additional_notice, "pend r2t in cleanup"); + break; + + case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT: + strcpy(additional_notice, "IP fragments rcvd"); + break; + case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS: + strcpy(additional_notice, "IP options error"); + break; + case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG: + strcpy(additional_notice, "urgent flag error"); + break; + default: + printk(KERN_ALERT "iscsi_err - unknown err %x\n", + iscsi_err->completion_status); + } + + if (need_recovery) { + iscsi_conn_printk(KERN_ALERT, + bnx2i_conn->cls_conn->dd_data, + "bnx2i: %s - %s\n", + message, additional_notice); + + iscsi_conn_printk(KERN_ALERT, + bnx2i_conn->cls_conn->dd_data, + "conn_err - hostno %d conn %p, " + "iscsi_cid %x cid %x\n", + bnx2i_conn->hba->shost->host_no, + bnx2i_conn, bnx2i_conn->ep->ep_iscsi_cid, + bnx2i_conn->ep->ep_cid); + bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn); + } else + if (!test_and_set_bit(iscsi_err->completion_status, + (void *) &bnx2i_conn->violation_notified)) + iscsi_conn_printk(KERN_ALERT, + bnx2i_conn->cls_conn->dd_data, + "bnx2i: %s - %s\n", + message, additional_notice); +} + + +/** + * bnx2i_process_conn_destroy_cmpl - process iscsi conn destroy completion + * @hba: adapter structure pointer + * @conn_destroy: conn destroy kcqe pointer + * + * handles connection destroy completion request. + */ +static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba, + struct iscsi_kcqe *conn_destroy) +{ + struct bnx2i_endpoint *ep; + + ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id); + if (!ep) { + printk(KERN_ALERT "bnx2i_conn_destroy_cmpl: no pending " + "offload request, unexpected complection\n"); + return; + } + + if (hba != ep->hba) { + printk(KERN_ALERT "conn destroy- error hba mis-match\n"); + return; + } + + if (conn_destroy->completion_status) { + printk(KERN_ALERT "conn_destroy_cmpl: op failed\n"); + ep->state = EP_STATE_CLEANUP_FAILED; + } else + ep->state = EP_STATE_CLEANUP_CMPL; + wake_up_interruptible(&ep->ofld_wait); +} + + +/** + * bnx2i_process_ofld_cmpl - process initial iscsi conn offload completion + * @hba: adapter structure pointer + * @ofld_kcqe: conn offload kcqe pointer + * + * handles initial connection offload completion, ep_connect() thread is + * woken-up to continue with LLP connect process + */ +static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba, + struct iscsi_kcqe *ofld_kcqe) +{ + u32 cid_addr; + struct bnx2i_endpoint *ep; + u32 cid_num; + + ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id); + if (!ep) { + printk(KERN_ALERT "ofld_cmpl: no pend offload request\n"); + return; + } + + if (hba != ep->hba) { + printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n"); + return; + } + + if (ofld_kcqe->completion_status) { + if (ofld_kcqe->completion_status == + ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) + printk(KERN_ALERT "bnx2i: unable to allocate" + " iSCSI context resources\n"); + ep->state = EP_STATE_OFLD_FAILED; + } else { + ep->state = EP_STATE_OFLD_COMPL; + cid_addr = ofld_kcqe->iscsi_conn_context_id; + cid_num = bnx2i_get_cid_num(ep); + ep->ep_cid = cid_addr; + ep->qp.ctx_base = NULL; + } + wake_up_interruptible(&ep->ofld_wait); +} + +/** + * bnx2i_indicate_kcqe - process iscsi conn update completion KCQE + * @hba: adapter structure pointer + * @update_kcqe: kcqe pointer + * + * Generic KCQ event handler/dispatcher + */ +static void bnx2i_indicate_kcqe(void *context, struct kcqe *kcqe[], + u32 num_cqe) +{ + struct bnx2i_hba *hba = context; + int i = 0; + struct iscsi_kcqe *ikcqe = NULL; + + while (i < num_cqe) { + ikcqe = (struct iscsi_kcqe *) kcqe[i++]; + + if (ikcqe->op_code == + ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION) + bnx2i_fastpath_notification(hba, ikcqe); + else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_OFFLOAD_CONN) + bnx2i_process_ofld_cmpl(hba, ikcqe); + else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_UPDATE_CONN) + bnx2i_process_update_conn_cmpl(hba, ikcqe); + else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_INIT) { + if (ikcqe->completion_status != + ISCSI_KCQE_COMPLETION_STATUS_SUCCESS) + bnx2i_iscsi_license_error(hba, ikcqe->\ + completion_status); + else { + set_bit(ADAPTER_STATE_UP, &hba->adapter_state); + bnx2i_get_link_state(hba); + printk(KERN_INFO "bnx2i [%.2x:%.2x.%.2x]: " + "ISCSI_INIT passed\n", + (u8)hba->pcidev->bus->number, + hba->pci_devno, + (u8)hba->pci_func); + + + } + } else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_DESTROY_CONN) + bnx2i_process_conn_destroy_cmpl(hba, ikcqe); + else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_ISCSI_ERROR) + bnx2i_process_iscsi_error(hba, ikcqe); + else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_TCP_ERROR) + bnx2i_process_tcp_error(hba, ikcqe); + else + printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n", + ikcqe->op_code); + } +} + + +/** + * bnx2i_indicate_netevent - Generic netdev event handler + * @context: adapter structure pointer + * @event: event type + * + * Handles four netdev events, NETDEV_UP, NETDEV_DOWN, + * NETDEV_GOING_DOWN and NETDEV_CHANGE + */ +static void bnx2i_indicate_netevent(void *context, unsigned long event) +{ + struct bnx2i_hba *hba = context; + + switch (event) { + case NETDEV_UP: + if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) + bnx2i_send_fw_iscsi_init_msg(hba); + break; + case NETDEV_DOWN: + clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); + clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); + break; + case NETDEV_GOING_DOWN: + set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); + iscsi_host_for_each_session(hba->shost, + bnx2i_drop_session); + break; + case NETDEV_CHANGE: + bnx2i_get_link_state(hba); + break; + default: + ; + } +} + + +/** + * bnx2i_cm_connect_cmpl - process iscsi conn establishment completion + * @cm_sk: cnic sock structure pointer + * + * function callback exported via bnx2i - cnic driver interface to + * indicate completion of option-2 TCP connect request. + */ +static void bnx2i_cm_connect_cmpl(struct cnic_sock *cm_sk) +{ + struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; + + if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state)) + ep->state = EP_STATE_CONNECT_FAILED; + else if (test_bit(SK_F_OFFLD_COMPLETE, &cm_sk->flags)) + ep->state = EP_STATE_CONNECT_COMPL; + else + ep->state = EP_STATE_CONNECT_FAILED; + + wake_up_interruptible(&ep->ofld_wait); +} + + +/** + * bnx2i_cm_close_cmpl - process tcp conn close completion + * @cm_sk: cnic sock structure pointer + * + * function callback exported via bnx2i - cnic driver interface to + * indicate completion of option-2 graceful TCP connect shutdown + */ +static void bnx2i_cm_close_cmpl(struct cnic_sock *cm_sk) +{ + struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; + + ep->state = EP_STATE_DISCONN_COMPL; + wake_up_interruptible(&ep->ofld_wait); +} + + +/** + * bnx2i_cm_abort_cmpl - process abortive tcp conn teardown completion + * @cm_sk: cnic sock structure pointer + * + * function callback exported via bnx2i - cnic driver interface to + * indicate completion of option-2 abortive TCP connect termination + */ +static void bnx2i_cm_abort_cmpl(struct cnic_sock *cm_sk) +{ + struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; + + ep->state = EP_STATE_DISCONN_COMPL; + wake_up_interruptible(&ep->ofld_wait); +} + + +/** + * bnx2i_cm_remote_close - process received TCP FIN + * @hba: adapter structure pointer + * @update_kcqe: kcqe pointer + * + * function callback exported via bnx2i - cnic driver interface to indicate + * async TCP events such as FIN + */ +static void bnx2i_cm_remote_close(struct cnic_sock *cm_sk) +{ + struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; + + ep->state = EP_STATE_TCP_FIN_RCVD; + if (ep->conn) + bnx2i_recovery_que_add_conn(ep->hba, ep->conn); +} + +/** + * bnx2i_cm_remote_abort - process TCP RST and start conn cleanup + * @hba: adapter structure pointer + * @update_kcqe: kcqe pointer + * + * function callback exported via bnx2i - cnic driver interface to + * indicate async TCP events (RST) sent by the peer. + */ +static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk) +{ + struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; + + ep->state = EP_STATE_TCP_RST_RCVD; + if (ep->conn) + bnx2i_recovery_que_add_conn(ep->hba, ep->conn); +} + + +static void bnx2i_send_nl_mesg(struct cnic_dev *dev, u32 msg_type, + char *buf, u16 buflen) +{ + struct bnx2i_hba *hba; + + hba = bnx2i_find_hba_for_cnic(dev); + if (!hba) + return; + + if (iscsi_offload_mesg(hba->shost, &bnx2i_iscsi_transport, + msg_type, buf, buflen)) + printk(KERN_ALERT "bnx2i: private nl message send error\n"); + +} + + +/** + * bnx2i_cnic_cb - global template of bnx2i - cnic driver interface structure + * carrying callback function pointers + * + */ +struct cnic_ulp_ops bnx2i_cnic_cb = { + .cnic_init = bnx2i_ulp_init, + .cnic_exit = bnx2i_ulp_exit, + .cnic_start = bnx2i_start, + .cnic_stop = bnx2i_stop, + .indicate_kcqes = bnx2i_indicate_kcqe, + .indicate_netevent = bnx2i_indicate_netevent, + .cm_connect_complete = bnx2i_cm_connect_cmpl, + .cm_close_complete = bnx2i_cm_close_cmpl, + .cm_abort_complete = bnx2i_cm_abort_cmpl, + .cm_remote_close = bnx2i_cm_remote_close, + .cm_remote_abort = bnx2i_cm_remote_abort, + .iscsi_nl_send_msg = bnx2i_send_nl_mesg, + .owner = THIS_MODULE +}; + + +/** + * bnx2i_map_ep_dbell_regs - map connection doorbell registers + * @ep: bnx2i endpoint + * + * maps connection's SQ and RQ doorbell registers, 5706/5708/5709 hosts these + * register in BAR #0. Whereas in 57710 these register are accessed by + * mapping BAR #1 + */ +int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep) +{ + u32 cid_num; + u32 reg_off; + u32 first_l4l5; + u32 ctx_sz; + u32 config2; + resource_size_t reg_base; + + cid_num = bnx2i_get_cid_num(ep); + + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { + reg_base = pci_resource_start(ep->hba->pcidev, + BNX2X_DOORBELL_PCI_BAR); + reg_off = PAGE_SIZE * (cid_num & 0x1FFFF) + DPM_TRIGER_TYPE; + ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4); + goto arm_cq; + } + + reg_base = ep->hba->netdev->base_addr; + if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) && + (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) { + config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2); + first_l4l5 = config2 & BNX2_MQ_CONFIG2_FIRST_L4L5; + ctx_sz = (config2 & BNX2_MQ_CONFIG2_CONT_SZ) >> 3; + if (ctx_sz) + reg_off = CTX_OFFSET + MAX_CID_CNT * MB_KERNEL_CTX_SIZE + + PAGE_SIZE * + (((cid_num - first_l4l5) / ctx_sz) + 256); + else + reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num); + } else + /* 5709 device in normal node and 5706/5708 devices */ + reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num); + + ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, + MB_KERNEL_CTX_SIZE); + if (!ep->qp.ctx_base) + return -ENOMEM; + +arm_cq: + bnx2i_arm_cq_event_coalescing(ep, CNIC_ARM_CQE); + return 0; +} diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c new file mode 100644 index 000000000000..ae4b2d588fd3 --- /dev/null +++ b/drivers/scsi/bnx2i/bnx2i_init.c @@ -0,0 +1,438 @@ +/* bnx2i.c: Broadcom NetXtreme II iSCSI driver. + * + * Copyright (c) 2006 - 2009 Broadcom Corporation + * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mike Christie + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) + */ + +#include "bnx2i.h" + +static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list); +static u32 adapter_count; +static int bnx2i_reg_device; + +#define DRV_MODULE_NAME "bnx2i" +#define DRV_MODULE_VERSION "2.0.1d" +#define DRV_MODULE_RELDATE "Mar 25, 2009" + +static char version[] __devinitdata = + "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ + " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + + +MODULE_AUTHOR("Anil Veerabhadrappa "); +MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 iSCSI Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +static DEFINE_RWLOCK(bnx2i_dev_lock); + +unsigned int event_coal_div = 1; +module_param(event_coal_div, int, 0664); +MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor"); + +unsigned int en_tcp_dack = 1; +module_param(en_tcp_dack, int, 0664); +MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK"); + +unsigned int error_mask1 = 0x00; +module_param(error_mask1, int, 0664); +MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1"); + +unsigned int error_mask2 = 0x00; +module_param(error_mask2, int, 0664); +MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2"); + +unsigned int sq_size; +module_param(sq_size, int, 0664); +MODULE_PARM_DESC(sq_size, "Configure SQ size"); + +unsigned int rq_size = BNX2I_RQ_WQES_DEFAULT; +module_param(rq_size, int, 0664); +MODULE_PARM_DESC(rq_size, "Configure RQ size"); + +u64 iscsi_error_mask = 0x00; + +static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) ; + + +/** + * bnx2i_identify_device - identifies NetXtreme II device type + * @hba: Adapter structure pointer + * + * This function identifies the NX2 device type and sets appropriate + * queue mailbox register access method, 5709 requires driver to + * access MBOX regs using *bin* mode + */ +void bnx2i_identify_device(struct bnx2i_hba *hba) +{ + hba->cnic_dev_type = 0; + if ((hba->pci_did == PCI_DEVICE_ID_NX2_5706) || + (hba->pci_did == PCI_DEVICE_ID_NX2_5706S)) + set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type); + else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5708) || + (hba->pci_did == PCI_DEVICE_ID_NX2_5708S)) + set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type); + else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5709) || + (hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) { + set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type); + hba->mail_queue_access = BNX2I_MQ_BIN_MODE; + } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 || + hba->pci_did == PCI_DEVICE_ID_NX2_57711) + set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type); +} + + +/** + * get_adapter_list_head - returns head of adapter list + */ +struct bnx2i_hba *get_adapter_list_head(void) +{ + struct bnx2i_hba *hba = NULL; + struct bnx2i_hba *tmp_hba; + + if (!adapter_count) + goto hba_not_found; + + read_lock(&bnx2i_dev_lock); + list_for_each_entry(tmp_hba, &adapter_list, link) { + if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) { + hba = tmp_hba; + break; + } + } + read_unlock(&bnx2i_dev_lock); +hba_not_found: + return hba; +} + + +/** + * bnx2i_find_hba_for_cnic - maps cnic device instance to bnx2i adapter instance + * @cnic: pointer to cnic device instance + * + */ +struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic) +{ + struct bnx2i_hba *hba, *temp; + + read_lock(&bnx2i_dev_lock); + list_for_each_entry_safe(hba, temp, &adapter_list, link) { + if (hba->cnic == cnic) { + read_unlock(&bnx2i_dev_lock); + return hba; + } + } + read_unlock(&bnx2i_dev_lock); + return NULL; +} + + +/** + * bnx2i_start - cnic callback to initialize & start adapter instance + * @handle: transparent handle pointing to adapter structure + * + * This function maps adapter structure to pcidev structure and initiates + * firmware handshake to enable/initialize on chip iscsi components + * This bnx2i - cnic interface api callback is issued after following + * 2 conditions are met - + * a) underlying network interface is up (marked by event 'NETDEV_UP' + * from netdev + * b) bnx2i adapter instance is registered + */ +void bnx2i_start(void *handle) +{ +#define BNX2I_INIT_POLL_TIME (1000 / HZ) + struct bnx2i_hba *hba = handle; + int i = HZ; + + bnx2i_send_fw_iscsi_init_msg(hba); + while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--) + msleep(BNX2I_INIT_POLL_TIME); +} + + +/** + * bnx2i_stop - cnic callback to shutdown adapter instance + * @handle: transparent handle pointing to adapter structure + * + * driver checks if adapter is already in shutdown mode, if not start + * the shutdown process + */ +void bnx2i_stop(void *handle) +{ + struct bnx2i_hba *hba = handle; + + /* check if cleanup happened in GOING_DOWN context */ + clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); + if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN, + &hba->adapter_state)) + iscsi_host_for_each_session(hba->shost, + bnx2i_drop_session); +} + +/** + * bnx2i_register_device - register bnx2i adapter instance with the cnic driver + * @hba: Adapter instance to register + * + * registers bnx2i adapter instance with the cnic driver while holding the + * adapter structure lock + */ +void bnx2i_register_device(struct bnx2i_hba *hba) +{ + if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) || + test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { + return; + } + + hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba); + + spin_lock(&hba->lock); + bnx2i_reg_device++; + spin_unlock(&hba->lock); + + set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); +} + + +/** + * bnx2i_reg_dev_all - registers all adapter instances with the cnic driver + * + * registers all bnx2i adapter instances with the cnic driver while holding + * the global resource lock + */ +void bnx2i_reg_dev_all(void) +{ + struct bnx2i_hba *hba, *temp; + + read_lock(&bnx2i_dev_lock); + list_for_each_entry_safe(hba, temp, &adapter_list, link) + bnx2i_register_device(hba); + read_unlock(&bnx2i_dev_lock); +} + + +/** + * bnx2i_unreg_one_device - unregister adapter instance with the cnic driver + * @hba: Adapter instance to unregister + * + * registers bnx2i adapter instance with the cnic driver while holding + * the adapter structure lock + */ +static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) +{ + if (hba->ofld_conns_active || + !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) || + test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) + return; + + hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); + + spin_lock(&hba->lock); + bnx2i_reg_device--; + spin_unlock(&hba->lock); + + /* ep_disconnect could come before NETDEV_DOWN, driver won't + * see NETDEV_DOWN as it already unregistered itself. + */ + hba->adapter_state = 0; + clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); +} + +/** + * bnx2i_unreg_dev_all - unregisters all bnx2i instances with the cnic driver + * + * unregisters all bnx2i adapter instances with the cnic driver while holding + * the global resource lock + */ +void bnx2i_unreg_dev_all(void) +{ + struct bnx2i_hba *hba, *temp; + + read_lock(&bnx2i_dev_lock); + list_for_each_entry_safe(hba, temp, &adapter_list, link) + bnx2i_unreg_one_device(hba); + read_unlock(&bnx2i_dev_lock); +} + + +/** + * bnx2i_init_one - initialize an adapter instance and allocate memory resources + * @hba: bnx2i adapter instance + * @cnic: cnic device handle + * + * Global resource lock and host adapter lock is held during critical sections + * below. This routine is called from cnic_register_driver() context and + * work horse thread which does majority of device specific initialization + */ +static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic) +{ + int rc; + + read_lock(&bnx2i_dev_lock); + if (bnx2i_reg_device && + !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { + rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba); + if (rc) /* duplicate registration */ + printk(KERN_ERR "bnx2i- dev reg failed\n"); + + spin_lock(&hba->lock); + bnx2i_reg_device++; + hba->age++; + spin_unlock(&hba->lock); + + set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); + } + read_unlock(&bnx2i_dev_lock); + + write_lock(&bnx2i_dev_lock); + list_add_tail(&hba->link, &adapter_list); + adapter_count++; + write_unlock(&bnx2i_dev_lock); + return 0; +} + + +/** + * bnx2i_ulp_init - initialize an adapter instance + * @dev: cnic device handle + * + * Called from cnic_register_driver() context to initialize all enumerated + * cnic devices. This routine allocate adapter structure and other + * device specific resources. + */ +void bnx2i_ulp_init(struct cnic_dev *dev) +{ + struct bnx2i_hba *hba; + + /* Allocate a HBA structure for this device */ + hba = bnx2i_alloc_hba(dev); + if (!hba) { + printk(KERN_ERR "bnx2i init: hba initialization failed\n"); + return; + } + + /* Get PCI related information and update hba struct members */ + clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); + if (bnx2i_init_one(hba, dev)) { + printk(KERN_ERR "bnx2i - hba %p init failed\n", hba); + bnx2i_free_hba(hba); + } else + hba->cnic = dev; +} + + +/** + * bnx2i_ulp_exit - shuts down adapter instance and frees all resources + * @dev: cnic device handle + * + */ +void bnx2i_ulp_exit(struct cnic_dev *dev) +{ + struct bnx2i_hba *hba; + + hba = bnx2i_find_hba_for_cnic(dev); + if (!hba) { + printk(KERN_INFO "bnx2i_ulp_exit: hba not " + "found, dev 0x%p\n", dev); + return; + } + write_lock(&bnx2i_dev_lock); + list_del_init(&hba->link); + adapter_count--; + + if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { + hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); + clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); + + spin_lock(&hba->lock); + bnx2i_reg_device--; + spin_unlock(&hba->lock); + } + write_unlock(&bnx2i_dev_lock); + + bnx2i_free_hba(hba); +} + + +/** + * bnx2i_mod_init - module init entry point + * + * initialize any driver wide global data structures such as endpoint pool, + * tcp port manager/queue, sysfs. finally driver will register itself + * with the cnic module + */ +static int __init bnx2i_mod_init(void) +{ + int err; + + printk(KERN_INFO "%s", version); + + if (!is_power_of_2(sq_size)) + sq_size = roundup_pow_of_two(sq_size); + + bnx2i_scsi_xport_template = + iscsi_register_transport(&bnx2i_iscsi_transport); + if (!bnx2i_scsi_xport_template) { + printk(KERN_ERR "Could not register bnx2i transport.\n"); + err = -ENOMEM; + goto out; + } + + err = cnic_register_driver(CNIC_ULP_ISCSI, &bnx2i_cnic_cb); + if (err) { + printk(KERN_ERR "Could not register bnx2i cnic driver.\n"); + goto unreg_xport; + } + + return 0; + +unreg_xport: + iscsi_unregister_transport(&bnx2i_iscsi_transport); +out: + return err; +} + + +/** + * bnx2i_mod_exit - module cleanup/exit entry point + * + * Global resource lock and host adapter lock is held during critical sections + * in this function. Driver will browse through the adapter list, cleans-up + * each instance, unregisters iscsi transport name and finally driver will + * unregister itself with the cnic module + */ +static void __exit bnx2i_mod_exit(void) +{ + struct bnx2i_hba *hba; + + write_lock(&bnx2i_dev_lock); + while (!list_empty(&adapter_list)) { + hba = list_entry(adapter_list.next, struct bnx2i_hba, link); + list_del(&hba->link); + adapter_count--; + + if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { + hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); + clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); + bnx2i_reg_device--; + } + + write_unlock(&bnx2i_dev_lock); + bnx2i_free_hba(hba); + write_lock(&bnx2i_dev_lock); + } + write_unlock(&bnx2i_dev_lock); + + iscsi_unregister_transport(&bnx2i_iscsi_transport); + cnic_unregister_driver(CNIC_ULP_ISCSI); +} + +module_init(bnx2i_mod_init); +module_exit(bnx2i_mod_exit); diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c new file mode 100644 index 000000000000..f7412196f2f8 --- /dev/null +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -0,0 +1,2064 @@ +/* + * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver. + * + * Copyright (c) 2006 - 2009 Broadcom Corporation + * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mike Christie + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) + */ + +#include +#include +#include "bnx2i.h" + +struct scsi_transport_template *bnx2i_scsi_xport_template; +struct iscsi_transport bnx2i_iscsi_transport; +static struct scsi_host_template bnx2i_host_template; + +/* + * Global endpoint resource info + */ +static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */ + + +static int bnx2i_adapter_ready(struct bnx2i_hba *hba) +{ + int retval = 0; + + if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) || + test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) || + test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state)) + retval = -EPERM; + return retval; +} + +/** + * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks + * @cmd: iscsi cmd struct pointer + * @buf_off: absolute buffer offset + * @start_bd_off: u32 pointer to return the offset within the BD + * indicated by 'start_bd_idx' on which 'buf_off' falls + * @start_bd_idx: index of the BD on which 'buf_off' falls + * + * identifies & marks various bd info for scsi command's imm data, + * unsolicited data and the first solicited data seq. + */ +static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off, + u32 *start_bd_off, u32 *start_bd_idx) +{ + struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl; + u32 cur_offset = 0; + u32 cur_bd_idx = 0; + + if (buf_off) { + while (buf_off >= (cur_offset + bd_tbl->buffer_length)) { + cur_offset += bd_tbl->buffer_length; + cur_bd_idx++; + bd_tbl++; + } + } + + *start_bd_off = buf_off - cur_offset; + *start_bd_idx = cur_bd_idx; +} + +/** + * bnx2i_setup_write_cmd_bd_info - sets up BD various information + * @task: transport layer's cmd struct pointer + * + * identifies & marks various bd info for scsi command's immediate data, + * unsolicited data and first solicited data seq which includes BD start + * index & BD buf off. his function takes into account iscsi parameter such + * as immediate data and unsolicited data is support on this connection. + */ +static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task) +{ + struct bnx2i_cmd *cmd = task->dd_data; + u32 start_bd_offset; + u32 start_bd_idx; + u32 buffer_offset = 0; + u32 cmd_len = cmd->req.total_data_transfer_length; + + /* if ImmediateData is turned off & IntialR2T is turned on, + * there will be no immediate or unsolicited data, just return. + */ + if (!iscsi_task_has_unsol_data(task) && !task->imm_count) + return; + + /* Immediate data */ + buffer_offset += task->imm_count; + if (task->imm_count == cmd_len) + return; + + if (iscsi_task_has_unsol_data(task)) { + bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset, + &start_bd_offset, &start_bd_idx); + cmd->req.ud_buffer_offset = start_bd_offset; + cmd->req.ud_start_bd_index = start_bd_idx; + buffer_offset += task->unsol_r2t.data_length; + } + + if (buffer_offset != cmd_len) { + bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset, + &start_bd_offset, &start_bd_idx); + if ((start_bd_offset > task->conn->session->first_burst) || + (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) { + int i = 0; + + iscsi_conn_printk(KERN_ALERT, task->conn, + "bnx2i- error, buf offset 0x%x " + "bd_valid %d use_sg %d\n", + buffer_offset, cmd->io_tbl.bd_valid, + scsi_sg_count(cmd->scsi_cmd)); + for (i = 0; i < cmd->io_tbl.bd_valid; i++) + iscsi_conn_printk(KERN_ALERT, task->conn, + "bnx2i err, bd[%d]: len %x\n", + i, cmd->io_tbl.bd_tbl[i].\ + buffer_length); + } + cmd->req.sd_buffer_offset = start_bd_offset; + cmd->req.sd_start_bd_index = start_bd_idx; + } +} + + + +/** + * bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table + * @hba: adapter instance + * @cmd: iscsi cmd struct pointer + * + * map SG list + */ +static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd) +{ + struct scsi_cmnd *sc = cmd->scsi_cmd; + struct iscsi_bd *bd = cmd->io_tbl.bd_tbl; + struct scatterlist *sg; + int byte_count = 0; + int bd_count = 0; + int sg_count; + int sg_len; + u64 addr; + int i; + + BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD); + + sg_count = scsi_dma_map(sc); + + scsi_for_each_sg(sc, sg, sg_count, i) { + sg_len = sg_dma_len(sg); + addr = (u64) sg_dma_address(sg); + bd[bd_count].buffer_addr_lo = addr & 0xffffffff; + bd[bd_count].buffer_addr_hi = addr >> 32; + bd[bd_count].buffer_length = sg_len; + bd[bd_count].flags = 0; + if (bd_count == 0) + bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN; + + byte_count += sg_len; + bd_count++; + } + + if (bd_count) + bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN; + + BUG_ON(byte_count != scsi_bufflen(sc)); + return bd_count; +} + +/** + * bnx2i_iscsi_map_sg_list - maps SG list + * @cmd: iscsi cmd struct pointer + * + * creates BD list table for the command + */ +static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd) +{ + int bd_count; + + bd_count = bnx2i_map_scsi_sg(cmd->conn->hba, cmd); + if (!bd_count) { + struct iscsi_bd *bd = cmd->io_tbl.bd_tbl; + + bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0; + bd[0].buffer_length = bd[0].flags = 0; + } + cmd->io_tbl.bd_valid = bd_count; +} + + +/** + * bnx2i_iscsi_unmap_sg_list - unmaps SG list + * @cmd: iscsi cmd struct pointer + * + * unmap IO buffers and invalidate the BD table + */ +void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd) +{ + struct scsi_cmnd *sc = cmd->scsi_cmd; + + if (cmd->io_tbl.bd_valid && sc) { + scsi_dma_unmap(sc); + cmd->io_tbl.bd_valid = 0; + } +} + +static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd) +{ + memset(&cmd->req, 0x00, sizeof(cmd->req)); + cmd->req.op_code = 0xFF; + cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma; + cmd->req.bd_list_addr_hi = + (u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32); + +} + + +/** + * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid' + * @hba: pointer to adapter instance + * @conn: pointer to iscsi connection + * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1) + * + * update iscsi cid table entry with connection pointer. This enables + * driver to quickly get hold of connection structure pointer in + * completion/interrupt thread using iscsi context ID + */ +static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba, + struct bnx2i_conn *bnx2i_conn, + u32 iscsi_cid) +{ + if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) { + iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, + "conn bind - entry #%d not free\n", iscsi_cid); + return -EBUSY; + } + + hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn; + return 0; +} + + +/** + * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr + * @hba: pointer to adapter instance + * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1) + */ +struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, + u16 iscsi_cid) +{ + if (!hba->cid_que.conn_cid_tbl) { + printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n"); + return NULL; + + } else if (iscsi_cid >= hba->max_active_conns) { + printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid); + return NULL; + } + return hba->cid_que.conn_cid_tbl[iscsi_cid]; +} + + +/** + * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool + * @hba: pointer to adapter instance + */ +static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba) +{ + int idx; + + if (!hba->cid_que.cid_free_cnt) + return -1; + + idx = hba->cid_que.cid_q_cons_idx; + hba->cid_que.cid_q_cons_idx++; + if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx) + hba->cid_que.cid_q_cons_idx = 0; + + hba->cid_que.cid_free_cnt--; + return hba->cid_que.cid_que[idx]; +} + + +/** + * bnx2i_free_iscsi_cid - returns tcp port to free list + * @hba: pointer to adapter instance + * @iscsi_cid: iscsi context ID to free + */ +static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid) +{ + int idx; + + if (iscsi_cid == (u16) -1) + return; + + hba->cid_que.cid_free_cnt++; + + idx = hba->cid_que.cid_q_prod_idx; + hba->cid_que.cid_que[idx] = iscsi_cid; + hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL; + hba->cid_que.cid_q_prod_idx++; + if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx) + hba->cid_que.cid_q_prod_idx = 0; +} + + +/** + * bnx2i_setup_free_cid_que - sets up free iscsi cid queue + * @hba: pointer to adapter instance + * + * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table, + * and initialize table attributes + */ +static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba) +{ + int mem_size; + int i; + + mem_size = hba->max_active_conns * sizeof(u32); + mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + + hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL); + if (!hba->cid_que.cid_que_base) + return -ENOMEM; + + mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *); + mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL); + if (!hba->cid_que.conn_cid_tbl) { + kfree(hba->cid_que.cid_que_base); + hba->cid_que.cid_que_base = NULL; + return -ENOMEM; + } + + hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base; + hba->cid_que.cid_q_prod_idx = 0; + hba->cid_que.cid_q_cons_idx = 0; + hba->cid_que.cid_q_max_idx = hba->max_active_conns; + hba->cid_que.cid_free_cnt = hba->max_active_conns; + + for (i = 0; i < hba->max_active_conns; i++) { + hba->cid_que.cid_que[i] = i; + hba->cid_que.conn_cid_tbl[i] = NULL; + } + return 0; +} + + +/** + * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources + * @hba: pointer to adapter instance + */ +static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba) +{ + kfree(hba->cid_que.cid_que_base); + hba->cid_que.cid_que_base = NULL; + + kfree(hba->cid_que.conn_cid_tbl); + hba->cid_que.conn_cid_tbl = NULL; +} + + +/** + * bnx2i_alloc_ep - allocates ep structure from global pool + * @hba: pointer to adapter instance + * + * routine allocates a free endpoint structure from global pool and + * a tcp port to be used for this connection. Global resource lock, + * 'bnx2i_resc_lock' is held while accessing shared global data structures + */ +static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba) +{ + struct iscsi_endpoint *ep; + struct bnx2i_endpoint *bnx2i_ep; + + ep = iscsi_create_endpoint(sizeof(*bnx2i_ep)); + if (!ep) { + printk(KERN_ERR "bnx2i: Could not allocate ep\n"); + return NULL; + } + + bnx2i_ep = ep->dd_data; + INIT_LIST_HEAD(&bnx2i_ep->link); + bnx2i_ep->state = EP_STATE_IDLE; + bnx2i_ep->hba = hba; + bnx2i_ep->hba_age = hba->age; + hba->ofld_conns_active++; + init_waitqueue_head(&bnx2i_ep->ofld_wait); + return ep; +} + + +/** + * bnx2i_free_ep - free endpoint + * @ep: pointer to iscsi endpoint structure + */ +static void bnx2i_free_ep(struct iscsi_endpoint *ep) +{ + struct bnx2i_endpoint *bnx2i_ep = ep->dd_data; + unsigned long flags; + + spin_lock_irqsave(&bnx2i_resc_lock, flags); + bnx2i_ep->state = EP_STATE_IDLE; + bnx2i_ep->hba->ofld_conns_active--; + + bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid); + if (bnx2i_ep->conn) { + bnx2i_ep->conn->ep = NULL; + bnx2i_ep->conn = NULL; + } + + bnx2i_ep->hba = NULL; + spin_unlock_irqrestore(&bnx2i_resc_lock, flags); + iscsi_destroy_endpoint(ep); +} + + +/** + * bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command + * @hba: adapter instance pointer + * @session: iscsi session pointer + * @cmd: iscsi command structure + */ +static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session, + struct bnx2i_cmd *cmd) +{ + struct io_bdt *io = &cmd->io_tbl; + struct iscsi_bd *bd; + + io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, + ISCSI_MAX_BDS_PER_CMD * sizeof(*bd), + &io->bd_tbl_dma, GFP_KERNEL); + if (!io->bd_tbl) { + iscsi_session_printk(KERN_ERR, session, "Could not " + "allocate bdt.\n"); + return -ENOMEM; + } + io->bd_valid = 0; + return 0; +} + +/** + * bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table + * @hba: adapter instance pointer + * @session: iscsi session pointer + * @cmd: iscsi command structure + */ +static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba, + struct iscsi_session *session) +{ + int i; + + for (i = 0; i < session->cmds_max; i++) { + struct iscsi_task *task = session->cmds[i]; + struct bnx2i_cmd *cmd = task->dd_data; + + if (cmd->io_tbl.bd_tbl) + dma_free_coherent(&hba->pcidev->dev, + ISCSI_MAX_BDS_PER_CMD * + sizeof(struct iscsi_bd), + cmd->io_tbl.bd_tbl, + cmd->io_tbl.bd_tbl_dma); + } + +} + + +/** + * bnx2i_setup_cmd_pool - sets up iscsi command pool for the session + * @hba: adapter instance pointer + * @session: iscsi session pointer + */ +static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba, + struct iscsi_session *session) +{ + int i; + + for (i = 0; i < session->cmds_max; i++) { + struct iscsi_task *task = session->cmds[i]; + struct bnx2i_cmd *cmd = task->dd_data; + + /* Anil */ + task->hdr = &cmd->hdr; + task->hdr_max = sizeof(struct iscsi_hdr); + + if (bnx2i_alloc_bdt(hba, session, cmd)) + goto free_bdts; + } + + return 0; + +free_bdts: + bnx2i_destroy_cmd_pool(hba, session); + return -ENOMEM; +} + + +/** + * bnx2i_setup_mp_bdt - allocate BD table resources + * @hba: pointer to adapter structure + * + * Allocate memory for dummy buffer and associated BD + * table to be used by middle path (MP) requests + */ +static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) +{ + int rc = 0; + struct iscsi_bd *mp_bdt; + u64 addr; + + hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + &hba->mp_bd_dma, GFP_KERNEL); + if (!hba->mp_bd_tbl) { + printk(KERN_ERR "unable to allocate Middle Path BDT\n"); + rc = -1; + goto out; + } + + hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + &hba->dummy_buf_dma, GFP_KERNEL); + if (!hba->dummy_buffer) { + printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n"); + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->mp_bd_tbl, hba->mp_bd_dma); + hba->mp_bd_tbl = NULL; + rc = -1; + goto out; + } + + mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl; + addr = (unsigned long) hba->dummy_buf_dma; + mp_bdt->buffer_addr_lo = addr & 0xffffffff; + mp_bdt->buffer_addr_hi = addr >> 32; + mp_bdt->buffer_length = PAGE_SIZE; + mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN | + ISCSI_BD_FIRST_IN_BD_CHAIN; +out: + return rc; +} + + +/** + * bnx2i_free_mp_bdt - releases ITT back to free pool + * @hba: pointer to adapter instance + * + * free MP dummy buffer and associated BD table + */ +static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba) +{ + if (hba->mp_bd_tbl) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->mp_bd_tbl, hba->mp_bd_dma); + hba->mp_bd_tbl = NULL; + } + if (hba->dummy_buffer) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->dummy_buffer, hba->dummy_buf_dma); + hba->dummy_buffer = NULL; + } + return; +} + +/** + * bnx2i_drop_session - notifies iscsid of connection error. + * @hba: adapter instance pointer + * @session: iscsi session pointer + * + * This notifies iscsid that there is a error, so it can initiate + * recovery. + * + * This relies on caller using the iscsi class iterator so the object + * is refcounted and does not disapper from under us. + */ +void bnx2i_drop_session(struct iscsi_cls_session *cls_session) +{ + iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); +} + +/** + * bnx2i_ep_destroy_list_add - add an entry to EP destroy list + * @hba: pointer to adapter instance + * @ep: pointer to endpoint (transport indentifier) structure + * + * EP destroy queue manager + */ +static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep) +{ + write_lock_bh(&hba->ep_rdwr_lock); + list_add_tail(&ep->link, &hba->ep_destroy_list); + write_unlock_bh(&hba->ep_rdwr_lock); + return 0; +} + +/** + * bnx2i_ep_destroy_list_del - add an entry to EP destroy list + * + * @hba: pointer to adapter instance + * @ep: pointer to endpoint (transport indentifier) structure + * + * EP destroy queue manager + */ +static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep) +{ + write_lock_bh(&hba->ep_rdwr_lock); + list_del_init(&ep->link); + write_unlock_bh(&hba->ep_rdwr_lock); + + return 0; +} + +/** + * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list + * @hba: pointer to adapter instance + * @ep: pointer to endpoint (transport indentifier) structure + * + * pending conn offload completion queue manager + */ +static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep) +{ + write_lock_bh(&hba->ep_rdwr_lock); + list_add_tail(&ep->link, &hba->ep_ofld_list); + write_unlock_bh(&hba->ep_rdwr_lock); + return 0; +} + +/** + * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list + * @hba: pointer to adapter instance + * @ep: pointer to endpoint (transport indentifier) structure + * + * pending conn offload completion queue manager + */ +static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep) +{ + write_lock_bh(&hba->ep_rdwr_lock); + list_del_init(&ep->link); + write_unlock_bh(&hba->ep_rdwr_lock); + return 0; +} + + +/** + * bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints + * + * @hba: pointer to adapter instance + * @iscsi_cid: iscsi context ID to find + * + */ +struct bnx2i_endpoint * +bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid) +{ + struct list_head *list; + struct list_head *tmp; + struct bnx2i_endpoint *ep; + + read_lock_bh(&hba->ep_rdwr_lock); + list_for_each_safe(list, tmp, &hba->ep_ofld_list) { + ep = (struct bnx2i_endpoint *)list; + + if (ep->ep_iscsi_cid == iscsi_cid) + break; + ep = NULL; + } + read_unlock_bh(&hba->ep_rdwr_lock); + + if (!ep) + printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid); + return ep; +} + + +/** + * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list + * @hba: pointer to adapter instance + * @iscsi_cid: iscsi context ID to find + * + */ +struct bnx2i_endpoint * +bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid) +{ + struct list_head *list; + struct list_head *tmp; + struct bnx2i_endpoint *ep; + + read_lock_bh(&hba->ep_rdwr_lock); + list_for_each_safe(list, tmp, &hba->ep_destroy_list) { + ep = (struct bnx2i_endpoint *)list; + + if (ep->ep_iscsi_cid == iscsi_cid) + break; + ep = NULL; + } + read_unlock_bh(&hba->ep_rdwr_lock); + + if (!ep) + printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid); + + return ep; +} + +/** + * bnx2i_setup_host_queue_size - assigns shost->can_queue param + * @hba: pointer to adapter instance + * @shost: scsi host pointer + * + * Initializes 'can_queue' parameter based on how many outstanding commands + * the device can handle. Each device 5708/5709/57710 has different + * capabilities + */ +static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba, + struct Scsi_Host *shost) +{ + if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type)) + shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708; + else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) + shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709; + else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) + shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710; + else + shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708; +} + + +/** + * bnx2i_alloc_hba - allocate and init adapter instance + * @cnic: cnic device pointer + * + * allocate & initialize adapter structure and call other + * support routines to do per adapter initialization + */ +struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic) +{ + struct Scsi_Host *shost; + struct bnx2i_hba *hba; + + shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0); + if (!shost) + return NULL; + shost->dma_boundary = cnic->pcidev->dma_mask; + shost->transportt = bnx2i_scsi_xport_template; + shost->max_id = ISCSI_MAX_CONNS_PER_HBA; + shost->max_channel = 0; + shost->max_lun = 512; + shost->max_cmd_len = 16; + + hba = iscsi_host_priv(shost); + hba->shost = shost; + hba->netdev = cnic->netdev; + /* Get PCI related information and update hba struct members */ + hba->pcidev = cnic->pcidev; + pci_dev_get(hba->pcidev); + hba->pci_did = hba->pcidev->device; + hba->pci_vid = hba->pcidev->vendor; + hba->pci_sdid = hba->pcidev->subsystem_device; + hba->pci_svid = hba->pcidev->subsystem_vendor; + hba->pci_func = PCI_FUNC(hba->pcidev->devfn); + hba->pci_devno = PCI_SLOT(hba->pcidev->devfn); + bnx2i_identify_device(hba); + + bnx2i_identify_device(hba); + bnx2i_setup_host_queue_size(hba, shost); + + if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) { + hba->regview = ioremap_nocache(hba->netdev->base_addr, + BNX2_MQ_CONFIG2); + if (!hba->regview) + goto ioreg_map_err; + } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { + hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096); + if (!hba->regview) + goto ioreg_map_err; + } + + if (bnx2i_setup_mp_bdt(hba)) + goto mp_bdt_mem_err; + + INIT_LIST_HEAD(&hba->ep_ofld_list); + INIT_LIST_HEAD(&hba->ep_destroy_list); + rwlock_init(&hba->ep_rdwr_lock); + + hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED; + + /* different values for 5708/5709/57710 */ + hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA; + + if (bnx2i_setup_free_cid_que(hba)) + goto cid_que_err; + + /* SQ/RQ/CQ size can be changed via sysfx interface */ + if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { + if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX) + hba->max_sqes = sq_size; + else + hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT; + } else { /* 5706/5708/5709 */ + if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX) + hba->max_sqes = sq_size; + else + hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT; + } + + hba->max_rqes = rq_size; + hba->max_cqes = hba->max_sqes + rq_size; + if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { + if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX) + hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX; + } else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX) + hba->max_cqes = BNX2I_570X_CQ_WQES_MAX; + + hba->num_ccell = hba->max_sqes / 2; + + spin_lock_init(&hba->lock); + mutex_init(&hba->net_dev_lock); + + if (iscsi_host_add(shost, &hba->pcidev->dev)) + goto free_dump_mem; + return hba; + +free_dump_mem: + bnx2i_release_free_cid_que(hba); +cid_que_err: + bnx2i_free_mp_bdt(hba); +mp_bdt_mem_err: + if (hba->regview) { + iounmap(hba->regview); + hba->regview = NULL; + } +ioreg_map_err: + pci_dev_put(hba->pcidev); + scsi_host_put(shost); + return NULL; +} + +/** + * bnx2i_free_hba- releases hba structure and resources held by the adapter + * @hba: pointer to adapter instance + * + * free adapter structure and call various cleanup routines. + */ +void bnx2i_free_hba(struct bnx2i_hba *hba) +{ + struct Scsi_Host *shost = hba->shost; + + iscsi_host_remove(shost); + INIT_LIST_HEAD(&hba->ep_ofld_list); + INIT_LIST_HEAD(&hba->ep_destroy_list); + pci_dev_put(hba->pcidev); + + if (hba->regview) { + iounmap(hba->regview); + hba->regview = NULL; + } + bnx2i_free_mp_bdt(hba); + bnx2i_release_free_cid_que(hba); + iscsi_host_free(shost); +} + +/** + * bnx2i_conn_free_login_resources - free DMA resources used for login process + * @hba: pointer to adapter instance + * @bnx2i_conn: iscsi connection pointer + * + * Login related resources, mostly BDT & payload DMA memory is freed + */ +static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba, + struct bnx2i_conn *bnx2i_conn) +{ + if (bnx2i_conn->gen_pdu.resp_bd_tbl) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + bnx2i_conn->gen_pdu.resp_bd_tbl, + bnx2i_conn->gen_pdu.resp_bd_dma); + bnx2i_conn->gen_pdu.resp_bd_tbl = NULL; + } + + if (bnx2i_conn->gen_pdu.req_bd_tbl) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + bnx2i_conn->gen_pdu.req_bd_tbl, + bnx2i_conn->gen_pdu.req_bd_dma); + bnx2i_conn->gen_pdu.req_bd_tbl = NULL; + } + + if (bnx2i_conn->gen_pdu.resp_buf) { + dma_free_coherent(&hba->pcidev->dev, + ISCSI_DEF_MAX_RECV_SEG_LEN, + bnx2i_conn->gen_pdu.resp_buf, + bnx2i_conn->gen_pdu.resp_dma_addr); + bnx2i_conn->gen_pdu.resp_buf = NULL; + } + + if (bnx2i_conn->gen_pdu.req_buf) { + dma_free_coherent(&hba->pcidev->dev, + ISCSI_DEF_MAX_RECV_SEG_LEN, + bnx2i_conn->gen_pdu.req_buf, + bnx2i_conn->gen_pdu.req_dma_addr); + bnx2i_conn->gen_pdu.req_buf = NULL; + } +} + +/** + * bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop. + * @hba: pointer to adapter instance + * @bnx2i_conn: iscsi connection pointer + * + * Mgmt task DNA resources are allocated in this routine. + */ +static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba, + struct bnx2i_conn *bnx2i_conn) +{ + /* Allocate memory for login request/response buffers */ + bnx2i_conn->gen_pdu.req_buf = + dma_alloc_coherent(&hba->pcidev->dev, + ISCSI_DEF_MAX_RECV_SEG_LEN, + &bnx2i_conn->gen_pdu.req_dma_addr, + GFP_KERNEL); + if (bnx2i_conn->gen_pdu.req_buf == NULL) + goto login_req_buf_failure; + + bnx2i_conn->gen_pdu.req_buf_size = 0; + bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf; + + bnx2i_conn->gen_pdu.resp_buf = + dma_alloc_coherent(&hba->pcidev->dev, + ISCSI_DEF_MAX_RECV_SEG_LEN, + &bnx2i_conn->gen_pdu.resp_dma_addr, + GFP_KERNEL); + if (bnx2i_conn->gen_pdu.resp_buf == NULL) + goto login_resp_buf_failure; + + bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN; + bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf; + + bnx2i_conn->gen_pdu.req_bd_tbl = + dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL); + if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL) + goto login_req_bd_tbl_failure; + + bnx2i_conn->gen_pdu.resp_bd_tbl = + dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + &bnx2i_conn->gen_pdu.resp_bd_dma, + GFP_KERNEL); + if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL) + goto login_resp_bd_tbl_failure; + + return 0; + +login_resp_bd_tbl_failure: + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + bnx2i_conn->gen_pdu.req_bd_tbl, + bnx2i_conn->gen_pdu.req_bd_dma); + bnx2i_conn->gen_pdu.req_bd_tbl = NULL; + +login_req_bd_tbl_failure: + dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, + bnx2i_conn->gen_pdu.resp_buf, + bnx2i_conn->gen_pdu.resp_dma_addr); + bnx2i_conn->gen_pdu.resp_buf = NULL; +login_resp_buf_failure: + dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, + bnx2i_conn->gen_pdu.req_buf, + bnx2i_conn->gen_pdu.req_dma_addr); + bnx2i_conn->gen_pdu.req_buf = NULL; +login_req_buf_failure: + iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data, + "login resource alloc failed!!\n"); + return -ENOMEM; + +} + + +/** + * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table. + * @bnx2i_conn: iscsi connection pointer + * + * Allocates buffers and BD tables before shipping requests to cnic + * for PDUs prepared by 'iscsid' daemon + */ +static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn) +{ + struct iscsi_bd *bd_tbl; + + bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl; + + bd_tbl->buffer_addr_hi = + (u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32); + bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr; + bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr - + bnx2i_conn->gen_pdu.req_buf; + bd_tbl->reserved0 = 0; + bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN | + ISCSI_BD_FIRST_IN_BD_CHAIN; + + bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.resp_bd_tbl; + bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32; + bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr; + bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN; + bd_tbl->reserved0 = 0; + bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN | + ISCSI_BD_FIRST_IN_BD_CHAIN; +} + + +/** + * bnx2i_iscsi_send_generic_request - called to send mgmt tasks. + * @task: transport layer task pointer + * + * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login, + * Nop-out and Logout requests flow through this path. + */ +static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task) +{ + struct bnx2i_cmd *cmd = task->dd_data; + struct bnx2i_conn *bnx2i_conn = cmd->conn; + int rc = 0; + char *buf; + int data_len; + + bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn); + switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { + case ISCSI_OP_LOGIN: + bnx2i_send_iscsi_login(bnx2i_conn, task); + break; + case ISCSI_OP_NOOP_OUT: + data_len = bnx2i_conn->gen_pdu.req_buf_size; + buf = bnx2i_conn->gen_pdu.req_buf; + if (data_len) + rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, + RESERVED_ITT, + buf, data_len, 1); + else + rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, + RESERVED_ITT, + NULL, 0, 1); + break; + case ISCSI_OP_LOGOUT: + rc = bnx2i_send_iscsi_logout(bnx2i_conn, task); + break; + case ISCSI_OP_SCSI_TMFUNC: + rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task); + break; + default: + iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, + "send_gen: unsupported op 0x%x\n", + task->hdr->opcode); + } + return rc; +} + + +/********************************************************************** + * SCSI-ML Interface + **********************************************************************/ + +/** + * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe + * @sc: SCSI-ML command pointer + * @cmd: iscsi cmd pointer + */ +static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd) +{ + u32 dword; + int lpcnt; + u8 *srcp; + u32 *dstp; + u32 scsi_lun[2]; + + int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun); + cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]); + cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]); + + lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword); + srcp = (u8 *) sc->cmnd; + dstp = (u32 *) cmd->req.cdb; + while (lpcnt--) { + memcpy(&dword, (const void *) srcp, 4); + *dstp = cpu_to_be32(dword); + srcp += 4; + dstp++; + } + if (sc->cmd_len & 0x3) { + dword = (u32) srcp[0] | ((u32) srcp[1] << 8); + *dstp = cpu_to_be32(dword); + } +} + +static void bnx2i_cleanup_task(struct iscsi_task *task) +{ + struct iscsi_conn *conn = task->conn; + struct bnx2i_conn *bnx2i_conn = conn->dd_data; + struct bnx2i_hba *hba = bnx2i_conn->hba; + + /* + * mgmt task or cmd was never sent to us to transmit. + */ + if (!task->sc || task->state == ISCSI_TASK_PENDING) + return; + /* + * need to clean-up task context to claim dma buffers + */ + if (task->state == ISCSI_TASK_ABRT_TMF) { + bnx2i_send_cmd_cleanup_req(hba, task->dd_data); + + spin_unlock_bh(&conn->session->lock); + wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl, + msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT)); + spin_lock_bh(&conn->session->lock); + } + bnx2i_iscsi_unmap_sg_list(task->dd_data); +} + +/** + * bnx2i_mtask_xmit - transmit mtask to chip for further processing + * @conn: transport layer conn structure pointer + * @task: transport layer command structure pointer + */ +static int +bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) +{ + struct bnx2i_conn *bnx2i_conn = conn->dd_data; + struct bnx2i_cmd *cmd = task->dd_data; + + memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN); + + bnx2i_setup_cmd_wqe_template(cmd); + bnx2i_conn->gen_pdu.req_buf_size = task->data_count; + if (task->data_count) { + memcpy(bnx2i_conn->gen_pdu.req_buf, task->data, + task->data_count); + bnx2i_conn->gen_pdu.req_wr_ptr = + bnx2i_conn->gen_pdu.req_buf + task->data_count; + } + cmd->conn = conn->dd_data; + cmd->scsi_cmd = NULL; + return bnx2i_iscsi_send_generic_request(task); +} + +/** + * bnx2i_task_xmit - transmit iscsi command to chip for further processing + * @task: transport layer command structure pointer + * + * maps SG buffers and send request to chip/firmware in the form of SQ WQE + */ +static int bnx2i_task_xmit(struct iscsi_task *task) +{ + struct iscsi_conn *conn = task->conn; + struct iscsi_session *session = conn->session; + struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session); + struct bnx2i_hba *hba = iscsi_host_priv(shost); + struct bnx2i_conn *bnx2i_conn = conn->dd_data; + struct scsi_cmnd *sc = task->sc; + struct bnx2i_cmd *cmd = task->dd_data; + struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr; + + if (test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state)) + return -ENOTCONN; + + if (!bnx2i_conn->is_bound) + return -ENOTCONN; + + /* + * If there is no scsi_cmnd this must be a mgmt task + */ + if (!sc) + return bnx2i_mtask_xmit(conn, task); + + bnx2i_setup_cmd_wqe_template(cmd); + cmd->req.op_code = ISCSI_OP_SCSI_CMD; + cmd->conn = bnx2i_conn; + cmd->scsi_cmd = sc; + cmd->req.total_data_transfer_length = scsi_bufflen(sc); + cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn); + + bnx2i_iscsi_map_sg_list(cmd); + bnx2i_cpy_scsi_cdb(sc, cmd); + + cmd->req.op_attr = ISCSI_ATTR_SIMPLE; + if (sc->sc_data_direction == DMA_TO_DEVICE) { + cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE; + cmd->req.itt = task->itt | + (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT); + bnx2i_setup_write_cmd_bd_info(task); + } else { + if (scsi_bufflen(sc)) + cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ; + cmd->req.itt = task->itt | + (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT); + } + + cmd->req.num_bds = cmd->io_tbl.bd_valid; + if (!cmd->io_tbl.bd_valid) { + cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma; + cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32); + cmd->req.num_bds = 1; + } + + bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd); + return 0; +} + +/** + * bnx2i_session_create - create a new iscsi session + * @cmds_max: max commands supported + * @qdepth: scsi queue depth to support + * @initial_cmdsn: initial iscsi CMDSN to be used for this session + * + * Creates a new iSCSI session instance on given device. + */ +static struct iscsi_cls_session * +bnx2i_session_create(struct iscsi_endpoint *ep, + uint16_t cmds_max, uint16_t qdepth, + uint32_t initial_cmdsn) +{ + struct Scsi_Host *shost; + struct iscsi_cls_session *cls_session; + struct bnx2i_hba *hba; + struct bnx2i_endpoint *bnx2i_ep; + + if (!ep) { + printk(KERN_ERR "bnx2i: missing ep.\n"); + return NULL; + } + + bnx2i_ep = ep->dd_data; + shost = bnx2i_ep->hba->shost; + hba = iscsi_host_priv(shost); + if (bnx2i_adapter_ready(hba)) + return NULL; + + /* + * user can override hw limit as long as it is within + * the min/max. + */ + if (cmds_max > hba->max_sqes) + cmds_max = hba->max_sqes; + else if (cmds_max < BNX2I_SQ_WQES_MIN) + cmds_max = BNX2I_SQ_WQES_MIN; + + cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost, + cmds_max, sizeof(struct bnx2i_cmd), + initial_cmdsn, ISCSI_MAX_TARGET); + if (!cls_session) + return NULL; + + if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data)) + goto session_teardown; + return cls_session; + +session_teardown: + iscsi_session_teardown(cls_session); + return NULL; +} + + +/** + * bnx2i_session_destroy - destroys iscsi session + * @cls_session: pointer to iscsi cls session + * + * Destroys previously created iSCSI session instance and releases + * all resources held by it + */ +static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session) +{ + struct iscsi_session *session = cls_session->dd_data; + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct bnx2i_hba *hba = iscsi_host_priv(shost); + + bnx2i_destroy_cmd_pool(hba, session); + iscsi_session_teardown(cls_session); +} + + +/** + * bnx2i_conn_create - create iscsi connection instance + * @cls_session: pointer to iscsi cls session + * @cid: iscsi cid as per rfc (not NX2's CID terminology) + * + * Creates a new iSCSI connection instance for a given session + */ +static struct iscsi_cls_conn * +bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid) +{ + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct bnx2i_hba *hba = iscsi_host_priv(shost); + struct bnx2i_conn *bnx2i_conn; + struct iscsi_cls_conn *cls_conn; + struct iscsi_conn *conn; + + cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn), + cid); + if (!cls_conn) + return NULL; + conn = cls_conn->dd_data; + + bnx2i_conn = conn->dd_data; + bnx2i_conn->cls_conn = cls_conn; + bnx2i_conn->hba = hba; + /* 'ep' ptr will be assigned in bind() call */ + bnx2i_conn->ep = NULL; + init_completion(&bnx2i_conn->cmd_cleanup_cmpl); + + if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) { + iscsi_conn_printk(KERN_ALERT, conn, + "conn_new: login resc alloc failed!!\n"); + goto free_conn; + } + + return cls_conn; + +free_conn: + iscsi_conn_teardown(cls_conn); + return NULL; +} + +/** + * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together + * @cls_session: pointer to iscsi cls session + * @cls_conn: pointer to iscsi cls conn + * @transport_fd: 64-bit EP handle + * @is_leading: leading connection on this session? + * + * Binds together iSCSI session instance, iSCSI connection instance + * and the TCP connection. This routine returns error code if + * TCP connection does not belong on the device iSCSI sess/conn + * is bound + */ +static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session, + struct iscsi_cls_conn *cls_conn, + uint64_t transport_fd, int is_leading) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct bnx2i_conn *bnx2i_conn = conn->dd_data; + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct bnx2i_hba *hba = iscsi_host_priv(shost); + struct bnx2i_endpoint *bnx2i_ep; + struct iscsi_endpoint *ep; + int ret_code; + + ep = iscsi_lookup_endpoint(transport_fd); + if (!ep) + return -EINVAL; + + bnx2i_ep = ep->dd_data; + if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) || + (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) + /* Peer disconnect via' FIN or RST */ + return -EINVAL; + + if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) + return -EINVAL; + + if (bnx2i_ep->hba != hba) { + /* Error - TCP connection does not belong to this device + */ + iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data, + "conn bind, ep=0x%p (%s) does not", + bnx2i_ep, bnx2i_ep->hba->netdev->name); + iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data, + "belong to hba (%s)\n", + hba->netdev->name); + return -EEXIST; + } + + bnx2i_ep->conn = bnx2i_conn; + bnx2i_conn->ep = bnx2i_ep; + bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid; + bnx2i_conn->fw_cid = bnx2i_ep->ep_cid; + bnx2i_conn->is_bound = 1; + + ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn, + bnx2i_ep->ep_iscsi_cid); + + /* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710 + * driver needs to explicitly replenish RQ index during setup. + */ + if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type)) + bnx2i_put_rq_buf(bnx2i_conn, 0); + + bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE); + return ret_code; +} + + +/** + * bnx2i_conn_destroy - destroy iscsi connection instance & release resources + * @cls_conn: pointer to iscsi cls conn + * + * Destroy an iSCSI connection instance and release memory resources held by + * this connection + */ +static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct bnx2i_conn *bnx2i_conn = conn->dd_data; + struct Scsi_Host *shost; + struct bnx2i_hba *hba; + + shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn)); + hba = iscsi_host_priv(shost); + + bnx2i_conn_free_login_resources(hba, bnx2i_conn); + iscsi_conn_teardown(cls_conn); +} + + +/** + * bnx2i_conn_get_param - return iscsi connection parameter to caller + * @cls_conn: pointer to iscsi cls conn + * @param: parameter type identifier + * @buf: buffer pointer + * + * returns iSCSI connection parameters + */ +static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn, + enum iscsi_param param, char *buf) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct bnx2i_conn *bnx2i_conn = conn->dd_data; + int len = 0; + + switch (param) { + case ISCSI_PARAM_CONN_PORT: + if (bnx2i_conn->ep) + len = sprintf(buf, "%hu\n", + bnx2i_conn->ep->cm_sk->dst_port); + break; + case ISCSI_PARAM_CONN_ADDRESS: + if (bnx2i_conn->ep) + len = sprintf(buf, NIPQUAD_FMT "\n", + NIPQUAD(bnx2i_conn->ep->cm_sk->dst_ip)); + break; + default: + return iscsi_conn_get_param(cls_conn, param, buf); + } + + return len; +} + +/** + * bnx2i_host_get_param - returns host (adapter) related parameters + * @shost: scsi host pointer + * @param: parameter type identifier + * @buf: buffer pointer + */ +static int bnx2i_host_get_param(struct Scsi_Host *shost, + enum iscsi_host_param param, char *buf) +{ + struct bnx2i_hba *hba = iscsi_host_priv(shost); + int len = 0; + + switch (param) { + case ISCSI_HOST_PARAM_HWADDRESS: + len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6); + break; + case ISCSI_HOST_PARAM_NETDEV_NAME: + len = sprintf(buf, "%s\n", hba->netdev->name); + break; + default: + return iscsi_host_get_param(shost, param, buf); + } + return len; +} + +/** + * bnx2i_conn_start - completes iscsi connection migration to FFP + * @cls_conn: pointer to iscsi cls conn + * + * last call in FFP migration to handover iscsi conn to the driver + */ +static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct bnx2i_conn *bnx2i_conn = conn->dd_data; + + bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START; + bnx2i_update_iscsi_conn(conn); + + /* + * this should normally not sleep for a long time so it should + * not disrupt the caller. + */ + bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies; + bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer; + bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep; + add_timer(&bnx2i_conn->ep->ofld_timer); + /* update iSCSI context for this conn, wait for CNIC to complete */ + wait_event_interruptible(bnx2i_conn->ep->ofld_wait, + bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START); + + if (signal_pending(current)) + flush_signals(current); + del_timer_sync(&bnx2i_conn->ep->ofld_timer); + + iscsi_conn_start(cls_conn); + return 0; +} + + +/** + * bnx2i_conn_get_stats - returns iSCSI stats + * @cls_conn: pointer to iscsi cls conn + * @stats: pointer to iscsi statistic struct + */ +static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn, + struct iscsi_stats *stats) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + + stats->txdata_octets = conn->txdata_octets; + stats->rxdata_octets = conn->rxdata_octets; + stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; + stats->dataout_pdus = conn->dataout_pdus_cnt; + stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; + stats->datain_pdus = conn->datain_pdus_cnt; + stats->r2t_pdus = conn->r2t_pdus_cnt; + stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; + stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; + stats->custom_length = 3; + strcpy(stats->custom[2].desc, "eh_abort_cnt"); + stats->custom[2].value = conn->eh_abort_cnt; + stats->digest_err = 0; + stats->timeout_err = 0; + stats->custom_length = 0; +} + + +/** + * bnx2i_check_route - checks if target IP route belongs to one of NX2 devices + * @dst_addr: target IP address + * + * check if route resolves to BNX2 device + */ +static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr) +{ + struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr; + struct bnx2i_hba *hba; + struct cnic_dev *cnic = NULL; + + bnx2i_reg_dev_all(); + + hba = get_adapter_list_head(); + if (hba && hba->cnic) + cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI); + if (!cnic) { + printk(KERN_ALERT "bnx2i: no route," + "can't connect using cnic\n"); + goto no_nx2_route; + } + hba = bnx2i_find_hba_for_cnic(cnic); + if (!hba) + goto no_nx2_route; + + if (bnx2i_adapter_ready(hba)) { + printk(KERN_ALERT "bnx2i: check route, hba not found\n"); + goto no_nx2_route; + } + if (hba->netdev->mtu > hba->mtu_supported) { + printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n", + hba->netdev->name, hba->netdev->mtu); + printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n", + hba->mtu_supported); + goto no_nx2_route; + } + return hba; +no_nx2_route: + return NULL; +} + + +/** + * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources + * @hba: pointer to adapter instance + * @ep: endpoint (transport indentifier) structure + * + * destroys cm_sock structure and on chip iscsi context + */ +static int bnx2i_tear_down_conn(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep) +{ + if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) + hba->cnic->cm_destroy(ep->cm_sk); + + if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state)) + ep->state = EP_STATE_DISCONN_COMPL; + + if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) && + ep->state == EP_STATE_DISCONN_TIMEDOUT) { + printk(KERN_ALERT "bnx2i - ERROR - please submit GRC Dump," + " NW/PCIe trace, driver msgs to developers" + " for analysis\n"); + return 1; + } + + ep->state = EP_STATE_CLEANUP_START; + init_timer(&ep->ofld_timer); + ep->ofld_timer.expires = 10*HZ + jiffies; + ep->ofld_timer.function = bnx2i_ep_ofld_timer; + ep->ofld_timer.data = (unsigned long) ep; + add_timer(&ep->ofld_timer); + + bnx2i_ep_destroy_list_add(hba, ep); + + /* destroy iSCSI context, wait for it to complete */ + bnx2i_send_conn_destroy(hba, ep); + wait_event_interruptible(ep->ofld_wait, + (ep->state != EP_STATE_CLEANUP_START)); + + if (signal_pending(current)) + flush_signals(current); + del_timer_sync(&ep->ofld_timer); + + bnx2i_ep_destroy_list_del(hba, ep); + + if (ep->state != EP_STATE_CLEANUP_CMPL) + /* should never happen */ + printk(KERN_ALERT "bnx2i - conn destroy failed\n"); + + return 0; +} + + +/** + * bnx2i_ep_connect - establish TCP connection to target portal + * @shost: scsi host + * @dst_addr: target IP address + * @non_blocking: blocking or non-blocking call + * + * this routine initiates the TCP/IP connection by invoking Option-2 i/f + * with l5_core and the CNIC. This is a multi-step process of resolving + * route to target, create a iscsi connection context, handshaking with + * CNIC module to create/initialize the socket struct and finally + * sending down option-2 request to complete TCP 3-way handshake + */ +static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, + struct sockaddr *dst_addr, + int non_blocking) +{ + u32 iscsi_cid = BNX2I_CID_RESERVED; + struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr; + struct sockaddr_in6 *desti6; + struct bnx2i_endpoint *bnx2i_ep; + struct bnx2i_hba *hba; + struct cnic_dev *cnic; + struct cnic_sockaddr saddr; + struct iscsi_endpoint *ep; + int rc = 0; + + if (shost) + /* driver is given scsi host to work with */ + hba = iscsi_host_priv(shost); + else + /* + * check if the given destination can be reached through + * a iscsi capable NetXtreme2 device + */ + hba = bnx2i_check_route(dst_addr); + if (!hba) { + rc = -ENOMEM; + goto check_busy; + } + + cnic = hba->cnic; + ep = bnx2i_alloc_ep(hba); + if (!ep) { + rc = -ENOMEM; + goto check_busy; + } + bnx2i_ep = ep->dd_data; + + mutex_lock(&hba->net_dev_lock); + if (bnx2i_adapter_ready(hba)) { + rc = -EPERM; + goto net_if_down; + } + + bnx2i_ep->state = EP_STATE_IDLE; + bnx2i_ep->ep_iscsi_cid = (u16) -1; + bnx2i_ep->num_active_cmds = 0; + iscsi_cid = bnx2i_alloc_iscsi_cid(hba); + if (iscsi_cid == -1) { + printk(KERN_ALERT "alloc_ep: unable to allocate iscsi cid\n"); + rc = -ENOMEM; + goto iscsi_cid_err; + } + bnx2i_ep->hba_age = hba->age; + + rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep); + if (rc != 0) { + printk(KERN_ALERT "bnx2i: ep_conn, alloc QP resc error\n"); + rc = -ENOMEM; + goto qp_resc_err; + } + + bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid; + bnx2i_ep->state = EP_STATE_OFLD_START; + bnx2i_ep_ofld_list_add(hba, bnx2i_ep); + + init_timer(&bnx2i_ep->ofld_timer); + bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies; + bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer; + bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep; + add_timer(&bnx2i_ep->ofld_timer); + + bnx2i_send_conn_ofld_req(hba, bnx2i_ep); + + /* Wait for CNIC hardware to setup conn context and return 'cid' */ + wait_event_interruptible(bnx2i_ep->ofld_wait, + bnx2i_ep->state != EP_STATE_OFLD_START); + + if (signal_pending(current)) + flush_signals(current); + del_timer_sync(&bnx2i_ep->ofld_timer); + + bnx2i_ep_ofld_list_del(hba, bnx2i_ep); + + if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) { + rc = -ENOSPC; + goto conn_failed; + } + + rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid, + iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep); + if (rc) { + rc = -EINVAL; + goto conn_failed; + } + + bnx2i_ep->cm_sk->rcv_buf = 256 * 1024; + bnx2i_ep->cm_sk->snd_buf = 256 * 1024; + clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags); + + memset(&saddr, 0, sizeof(saddr)); + if (dst_addr->sa_family == AF_INET) { + desti = (struct sockaddr_in *) dst_addr; + saddr.remote.v4 = *desti; + saddr.local.v4.sin_family = desti->sin_family; + } else if (dst_addr->sa_family == AF_INET6) { + desti6 = (struct sockaddr_in6 *) dst_addr; + saddr.remote.v6 = *desti6; + saddr.local.v6.sin6_family = desti6->sin6_family; + } + + bnx2i_ep->timestamp = jiffies; + bnx2i_ep->state = EP_STATE_CONNECT_START; + if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { + rc = -EINVAL; + goto conn_failed; + } else + rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr); + + if (rc) + goto release_ep; + + if (bnx2i_map_ep_dbell_regs(bnx2i_ep)) + goto release_ep; + mutex_unlock(&hba->net_dev_lock); + return ep; + +release_ep: + if (bnx2i_tear_down_conn(hba, bnx2i_ep)) { + mutex_unlock(&hba->net_dev_lock); + return ERR_PTR(rc); + } +conn_failed: +net_if_down: +iscsi_cid_err: + bnx2i_free_qp_resc(hba, bnx2i_ep); +qp_resc_err: + bnx2i_free_ep(ep); + mutex_unlock(&hba->net_dev_lock); +check_busy: + bnx2i_unreg_dev_all(); + return ERR_PTR(rc); +} + + +/** + * bnx2i_ep_poll - polls for TCP connection establishement + * @ep: TCP connection (endpoint) handle + * @timeout_ms: timeout value in milli secs + * + * polls for TCP connect request to complete + */ +static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) +{ + struct bnx2i_endpoint *bnx2i_ep; + int rc = 0; + + bnx2i_ep = ep->dd_data; + if ((bnx2i_ep->state == EP_STATE_IDLE) || + (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) || + (bnx2i_ep->state == EP_STATE_OFLD_FAILED)) + return -1; + if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL) + return 1; + + rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait, + ((bnx2i_ep->state == + EP_STATE_OFLD_FAILED) || + (bnx2i_ep->state == + EP_STATE_CONNECT_FAILED) || + (bnx2i_ep->state == + EP_STATE_CONNECT_COMPL)), + msecs_to_jiffies(timeout_ms)); + if (!rc || (bnx2i_ep->state == EP_STATE_OFLD_FAILED)) + rc = -1; + + if (rc > 0) + return 1; + else if (!rc) + return 0; /* timeout */ + else + return rc; +} + + +/** + * bnx2i_ep_tcp_conn_active - check EP state transition + * @ep: endpoint pointer + * + * check if underlying TCP connection is active + */ +static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep) +{ + int ret; + int cnic_dev_10g = 0; + + if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type)) + cnic_dev_10g = 1; + + switch (bnx2i_ep->state) { + case EP_STATE_CONNECT_START: + case EP_STATE_CLEANUP_FAILED: + case EP_STATE_OFLD_FAILED: + case EP_STATE_DISCONN_TIMEDOUT: + ret = 0; + break; + case EP_STATE_CONNECT_COMPL: + case EP_STATE_ULP_UPDATE_START: + case EP_STATE_ULP_UPDATE_COMPL: + case EP_STATE_TCP_FIN_RCVD: + case EP_STATE_ULP_UPDATE_FAILED: + ret = 1; + break; + case EP_STATE_TCP_RST_RCVD: + ret = 0; + break; + case EP_STATE_CONNECT_FAILED: + if (cnic_dev_10g) + ret = 1; + else + ret = 0; + break; + default: + ret = 0; + } + + return ret; +} + + +/** + * bnx2i_ep_disconnect - executes TCP connection teardown process + * @ep: TCP connection (endpoint) handle + * + * executes TCP connection teardown process + */ +static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep) +{ + struct bnx2i_endpoint *bnx2i_ep; + struct bnx2i_conn *bnx2i_conn = NULL; + struct iscsi_session *session = NULL; + struct iscsi_conn *conn; + struct cnic_dev *cnic; + struct bnx2i_hba *hba; + + bnx2i_ep = ep->dd_data; + + /* driver should not attempt connection cleanup untill TCP_CONNECT + * completes either successfully or fails. Timeout is 9-secs, so + * wait for it to complete + */ + while ((bnx2i_ep->state == EP_STATE_CONNECT_START) && + !time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ))) + msleep(250); + + if (bnx2i_ep->conn) { + bnx2i_conn = bnx2i_ep->conn; + conn = bnx2i_conn->cls_conn->dd_data; + session = conn->session; + + spin_lock_bh(&session->lock); + bnx2i_conn->is_bound = 0; + spin_unlock_bh(&session->lock); + } + + hba = bnx2i_ep->hba; + if (bnx2i_ep->state == EP_STATE_IDLE) + goto return_bnx2i_ep; + cnic = hba->cnic; + + mutex_lock(&hba->net_dev_lock); + + if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) + goto free_resc; + if (bnx2i_ep->hba_age != hba->age) + goto free_resc; + + if (!bnx2i_ep_tcp_conn_active(bnx2i_ep)) + goto destory_conn; + + bnx2i_ep->state = EP_STATE_DISCONN_START; + + init_timer(&bnx2i_ep->ofld_timer); + bnx2i_ep->ofld_timer.expires = 10*HZ + jiffies; + bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer; + bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep; + add_timer(&bnx2i_ep->ofld_timer); + + if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { + int close = 0; + + if (session) { + spin_lock_bh(&session->lock); + if (session->state == ISCSI_STATE_LOGGING_OUT) + close = 1; + spin_unlock_bh(&session->lock); + } + if (close) + cnic->cm_close(bnx2i_ep->cm_sk); + else + cnic->cm_abort(bnx2i_ep->cm_sk); + } else + goto free_resc; + + /* wait for option-2 conn teardown */ + wait_event_interruptible(bnx2i_ep->ofld_wait, + bnx2i_ep->state != EP_STATE_DISCONN_START); + + if (signal_pending(current)) + flush_signals(current); + del_timer_sync(&bnx2i_ep->ofld_timer); + +destory_conn: + if (bnx2i_tear_down_conn(hba, bnx2i_ep)) { + mutex_unlock(&hba->net_dev_lock); + return; + } +free_resc: + mutex_unlock(&hba->net_dev_lock); + bnx2i_free_qp_resc(hba, bnx2i_ep); +return_bnx2i_ep: + if (bnx2i_conn) + bnx2i_conn->ep = NULL; + + bnx2i_free_ep(ep); + + if (!hba->ofld_conns_active) + bnx2i_unreg_dev_all(); +} + + +/** + * bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler + * @buf: pointer to buffer containing iscsi path message + * + */ +static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params) +{ + struct bnx2i_hba *hba = iscsi_host_priv(shost); + char *buf = (char *) params; + u16 len = sizeof(*params); + + /* handled by cnic driver */ + hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf, + len); + + return 0; +} + + +/* + * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template + * used while registering with the scsi host and iSCSI transport module. + */ +static struct scsi_host_template bnx2i_host_template = { + .module = THIS_MODULE, + .name = "Broadcom Offload iSCSI Initiator", + .proc_name = "bnx2i", + .queuecommand = iscsi_queuecommand, + .eh_abort_handler = iscsi_eh_abort, + .eh_device_reset_handler = iscsi_eh_device_reset, + .eh_target_reset_handler = iscsi_eh_target_reset, + .can_queue = 1024, + .max_sectors = 127, + .cmd_per_lun = 32, + .this_id = -1, + .use_clustering = ENABLE_CLUSTERING, + .sg_tablesize = ISCSI_MAX_BDS_PER_CMD, + .shost_attrs = bnx2i_dev_attributes, +}; + +struct iscsi_transport bnx2i_iscsi_transport = { + .owner = THIS_MODULE, + .name = "bnx2i", + .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | + CAP_MULTI_R2T | CAP_DATADGST | + CAP_DATA_PATH_OFFLOAD, + .param_mask = ISCSI_MAX_RECV_DLENGTH | + ISCSI_MAX_XMIT_DLENGTH | + ISCSI_HDRDGST_EN | + ISCSI_DATADGST_EN | + ISCSI_INITIAL_R2T_EN | + ISCSI_MAX_R2T | + ISCSI_IMM_DATA_EN | + ISCSI_FIRST_BURST | + ISCSI_MAX_BURST | + ISCSI_PDU_INORDER_EN | + ISCSI_DATASEQ_INORDER_EN | + ISCSI_ERL | + ISCSI_CONN_PORT | + ISCSI_CONN_ADDRESS | + ISCSI_EXP_STATSN | + ISCSI_PERSISTENT_PORT | + ISCSI_PERSISTENT_ADDRESS | + ISCSI_TARGET_NAME | ISCSI_TPGT | + ISCSI_USERNAME | ISCSI_PASSWORD | + ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | + ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | + ISCSI_LU_RESET_TMO | + ISCSI_PING_TMO | ISCSI_RECV_TMO | + ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, + .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_NETDEV_NAME, + .create_session = bnx2i_session_create, + .destroy_session = bnx2i_session_destroy, + .create_conn = bnx2i_conn_create, + .bind_conn = bnx2i_conn_bind, + .destroy_conn = bnx2i_conn_destroy, + .set_param = iscsi_set_param, + .get_conn_param = bnx2i_conn_get_param, + .get_session_param = iscsi_session_get_param, + .get_host_param = bnx2i_host_get_param, + .start_conn = bnx2i_conn_start, + .stop_conn = iscsi_conn_stop, + .send_pdu = iscsi_conn_send_pdu, + .xmit_task = bnx2i_task_xmit, + .get_stats = bnx2i_conn_get_stats, + /* TCP connect - disconnect - option-2 interface calls */ + .ep_connect = bnx2i_ep_connect, + .ep_poll = bnx2i_ep_poll, + .ep_disconnect = bnx2i_ep_disconnect, + .set_path = bnx2i_nl_set_path, + /* Error recovery timeout call */ + .session_recovery_timedout = iscsi_session_recovery_timedout, + .cleanup_task = bnx2i_cleanup_task, +}; diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c new file mode 100644 index 000000000000..96426b751eb2 --- /dev/null +++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c @@ -0,0 +1,142 @@ +/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver. + * + * Copyright (c) 2004 - 2009 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) + */ + +#include "bnx2i.h" + +/** + * bnx2i_dev_to_hba - maps dev pointer to adapter struct + * @dev: device pointer + * + * Map device to hba structure + */ +static inline struct bnx2i_hba *bnx2i_dev_to_hba(struct device *dev) +{ + struct Scsi_Host *shost = class_to_shost(dev); + return iscsi_host_priv(shost); +} + + +/** + * bnx2i_show_sq_info - return(s currently configured send queue (SQ) size + * @dev: device pointer + * @buf: buffer to return current SQ size parameter + * + * Returns current SQ size parameter, this paramater determines the number + * outstanding iSCSI commands supported on a connection + */ +static ssize_t bnx2i_show_sq_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); + + return sprintf(buf, "0x%x\n", hba->max_sqes); +} + + +/** + * bnx2i_set_sq_info - update send queue (SQ) size parameter + * @dev: device pointer + * @buf: buffer to return current SQ size parameter + * @count: parameter buffer size + * + * Interface for user to change shared queue size allocated for each conn + * Must be within SQ limits and a power of 2. For the latter this is needed + * because of how libiscsi preallocates tasks. + */ +static ssize_t bnx2i_set_sq_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); + u32 val; + int max_sq_size; + + if (hba->ofld_conns_active) + goto skip_config; + + if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) + max_sq_size = BNX2I_5770X_SQ_WQES_MAX; + else + max_sq_size = BNX2I_570X_SQ_WQES_MAX; + + if (sscanf(buf, " 0x%x ", &val) > 0) { + if ((val >= BNX2I_SQ_WQES_MIN) && (val <= max_sq_size) && + (is_power_of_2(val))) + hba->max_sqes = val; + } + + return count; + +skip_config: + printk(KERN_ERR "bnx2i: device busy, cannot change SQ size\n"); + return 0; +} + + +/** + * bnx2i_show_ccell_info - returns command cell (HQ) size + * @dev: device pointer + * @buf: buffer to return current SQ size parameter + * + * returns per-connection TCP history queue size parameter + */ +static ssize_t bnx2i_show_ccell_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); + + return sprintf(buf, "0x%x\n", hba->num_ccell); +} + + +/** + * bnx2i_get_link_state - set command cell (HQ) size + * @dev: device pointer + * @buf: buffer to return current SQ size parameter + * @count: parameter buffer size + * + * updates per-connection TCP history queue size parameter + */ +static ssize_t bnx2i_set_ccell_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + u32 val; + struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); + + if (hba->ofld_conns_active) + goto skip_config; + + if (sscanf(buf, " 0x%x ", &val) > 0) { + if ((val >= BNX2I_CCELLS_MIN) && + (val <= BNX2I_CCELLS_MAX)) { + hba->num_ccell = val; + } + } + + return count; + +skip_config: + printk(KERN_ERR "bnx2i: device busy, cannot change CCELL size\n"); + return 0; +} + + +static DEVICE_ATTR(sq_size, S_IRUGO | S_IWUSR, + bnx2i_show_sq_info, bnx2i_set_sq_info); +static DEVICE_ATTR(num_ccell, S_IRUGO | S_IWUSR, + bnx2i_show_ccell_info, bnx2i_set_ccell_info); + +struct device_attribute *bnx2i_dev_attributes[] = { + &dev_attr_sq_size, + &dev_attr_num_ccell, + NULL +}; -- cgit v1.2.3 From 238ddbb98c327a7392ced5ae65216c55969749ea Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Tue, 9 Jun 2009 13:44:02 +0100 Subject: [SCSI] gdth: fix overlapping snprintf users Closes-bug: http://bugzilla.kernel.org/show_bug.cgi?id=13438 Closes-bug: http://bugzilla.kernel.org/show_bug.cgi?id=13437 Signed-off-by: Alan Cox Signed-off-by: James Bottomley --- drivers/scsi/gdth_proc.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c index 59349a316e13..1258da34fbc2 100644 --- a/drivers/scsi/gdth_proc.c +++ b/drivers/scsi/gdth_proc.c @@ -152,6 +152,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, struct Scsi_Host *host, gdth_ha_str *ha) { int size = 0,len = 0; + int hlen; off_t begin = 0,pos = 0; int id, i, j, k, sec, flag; int no_mdrv = 0, drv_no, is_mirr; @@ -192,11 +193,11 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, if (reserve_list[0] == 0xff) strcpy(hrec, "--"); else { - sprintf(hrec, "%d", reserve_list[0]); + hlen = sprintf(hrec, "%d", reserve_list[0]); for (i = 1; i < MAX_RES_ARGS; i++) { if (reserve_list[i] == 0xff) break; - sprintf(hrec,"%s,%d", hrec, reserve_list[i]); + hlen += snprintf(hrec + hlen , 161 - hlen, ",%d", reserve_list[i]); } } size = sprintf(buffer+len, -- cgit v1.2.3 From d765898970f35acef960581f678b9da9d5c779fa Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 5 Jun 2009 14:41:29 +0000 Subject: drm/i915: enable MCHBAR if needed Using the new PNP resource checking code, this patch allows the i915 driver to allocate MCHBAR space if needed and use the BAR to determine current memory settings. [apw@canonical.com: moved to the new generic PNP resource interface] Signed-off-by: Jesse Barnes Signed-off-by: Andy Whitcroft Signed-off-by: Eric Anholt failure to update-index after git-am --reject to hand-apply Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_drv.h | 2 + drivers/gpu/drm/i915/i915_gem_tiling.c | 145 +++++++++++++++++++++++++++++++++ 2 files changed, 147 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index db81f5513daa..6a471458d61a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -150,6 +150,8 @@ typedef struct drm_i915_private { drm_local_map_t hws_map; struct drm_gem_object *hws_obj; + struct resource mch_res; + unsigned int cpp; int back_offset; int front_offset; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 07d976bf4931..9a05cadaa4ad 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -25,6 +25,8 @@ * */ +#include +#include #include "linux/string.h" #include "linux/bitops.h" #include "drmP.h" @@ -81,6 +83,143 @@ * to match what the GPU expects. */ +#define MCHBAR_I915 0x44 +#define MCHBAR_I965 0x48 +#define MCHBAR_SIZE (4*4096) + +#define DEVEN_REG 0x54 +#define DEVEN_MCHBAR_EN (1 << 28) + +/* Allocate space for the MCH regs if needed, return nonzero on error */ +static int +intel_alloc_mchbar_resource(struct drm_device *dev) +{ + struct pci_dev *bridge_dev; + drm_i915_private_t *dev_priv = dev->dev_private; + int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; + u32 temp_lo, temp_hi = 0; + u64 mchbar_addr; + int ret = 0; + + bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); + if (!bridge_dev) { + DRM_DEBUG("no bridge dev?!\n"); + ret = -ENODEV; + goto out; + } + + if (IS_I965G(dev)) + pci_read_config_dword(bridge_dev, reg + 4, &temp_hi); + pci_read_config_dword(bridge_dev, reg, &temp_lo); + mchbar_addr = ((u64)temp_hi << 32) | temp_lo; + + /* If ACPI doesn't have it, assume we need to allocate it ourselves */ + if (mchbar_addr && + pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { + ret = 0; + goto out_put; + } + + /* Get some space for it */ + ret = pci_bus_alloc_resource(bridge_dev->bus, &dev_priv->mch_res, + MCHBAR_SIZE, MCHBAR_SIZE, + PCIBIOS_MIN_MEM, + 0, pcibios_align_resource, + bridge_dev); + if (ret) { + DRM_DEBUG("failed bus alloc: %d\n", ret); + dev_priv->mch_res.start = 0; + goto out_put; + } + + if (IS_I965G(dev)) + pci_write_config_dword(bridge_dev, reg + 4, + upper_32_bits(dev_priv->mch_res.start)); + + pci_write_config_dword(bridge_dev, reg, + lower_32_bits(dev_priv->mch_res.start)); +out_put: + pci_dev_put(bridge_dev); +out: + return ret; +} + +/* Setup MCHBAR if possible, return true if we should disable it again */ +static bool +intel_setup_mchbar(struct drm_device *dev) +{ + struct pci_dev *bridge_dev; + int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; + u32 temp; + bool need_disable = false, enabled; + + bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); + if (!bridge_dev) { + DRM_DEBUG("no bridge dev?!\n"); + goto out; + } + + if (IS_I915G(dev) || IS_I915GM(dev)) { + pci_read_config_dword(bridge_dev, DEVEN_REG, &temp); + enabled = !!(temp & DEVEN_MCHBAR_EN); + } else { + pci_read_config_dword(bridge_dev, mchbar_reg, &temp); + enabled = temp & 1; + } + + /* If it's already enabled, don't have to do anything */ + if (enabled) + goto out_put; + + if (intel_alloc_mchbar_resource(dev)) + goto out_put; + + need_disable = true; + + /* Space is allocated or reserved, so enable it. */ + if (IS_I915G(dev) || IS_I915GM(dev)) { + pci_write_config_dword(bridge_dev, DEVEN_REG, + temp | DEVEN_MCHBAR_EN); + } else { + pci_read_config_dword(bridge_dev, mchbar_reg, &temp); + pci_write_config_dword(bridge_dev, mchbar_reg, temp | 1); + } +out_put: + pci_dev_put(bridge_dev); +out: + return need_disable; +} + +static void +intel_teardown_mchbar(struct drm_device *dev, bool disable) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct pci_dev *bridge_dev; + int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; + u32 temp; + + bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); + if (!bridge_dev) { + DRM_DEBUG("no bridge dev?!\n"); + return; + } + + if (disable) { + if (IS_I915G(dev) || IS_I915GM(dev)) { + pci_read_config_dword(bridge_dev, DEVEN_REG, &temp); + temp &= ~DEVEN_MCHBAR_EN; + pci_write_config_dword(bridge_dev, DEVEN_REG, temp); + } else { + pci_read_config_dword(bridge_dev, mchbar_reg, &temp); + temp &= ~1; + pci_write_config_dword(bridge_dev, mchbar_reg, temp); + } + } + + if (dev_priv->mch_res.start) + release_resource(&dev_priv->mch_res); +} + /** * Detects bit 6 swizzling of address lookup between IGD access and CPU * access through main memory. @@ -91,6 +230,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; + bool need_disable; if (!IS_I9XX(dev)) { /* As far as we know, the 865 doesn't have these bit 6 @@ -101,6 +241,9 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) } else if (IS_MOBILE(dev)) { uint32_t dcc; + /* Try to make sure MCHBAR is enabled before poking at it */ + need_disable = intel_setup_mchbar(dev); + /* On mobile 9xx chipsets, channel interleave by the CPU is * determined by DCC. For single-channel, neither the CPU * nor the GPU do swizzling. For dual channel interleaved, @@ -140,6 +283,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; } + + intel_teardown_mchbar(dev, need_disable); } else { /* The 965, G33, and newer, have a very flexible memory * configuration. It will enable dual-channel mode -- cgit v1.2.3 From 036a4a7d9272582fc7370359515d807393e2f728 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Mon, 8 Jun 2009 14:40:19 +0800 Subject: drm/i915: handle interrupt on new chipset Update interrupt handling methods for IGDNG with new registers for display and graphics interrupt functions. As we won't use irq-based vblank sync in dri2, so display interrupt on new chip will be used for hotplug only in future. Signed-off-by: Zhenyu Wang Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_dma.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 5 ++ drivers/gpu/drm/i915/i915_gem.c | 5 +- drivers/gpu/drm/i915/i915_irq.c | 186 ++++++++++++++++++++++++++++++++++++++-- 4 files changed, 190 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 68e882cd9651..6bc716d13a52 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1162,7 +1162,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev->driver->get_vblank_counter = i915_get_vblank_counter; dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ - if (IS_G4X(dev)) { + if (IS_G4X(dev) || IS_IGDNG(dev)) { dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ dev->driver->get_vblank_counter = gm45_get_vblank_counter; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6a471458d61a..8ef6bcec211b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -167,6 +167,11 @@ typedef struct drm_i915_private { /** Cached value of IMR to avoid reads in updating the bitfield */ u32 irq_mask_reg; u32 pipestat[2]; + /** splitted irq regs for graphics and display engine on IGDNG, + irq_mask_reg is still used for display irq. */ + u32 gt_irq_mask_reg; + u32 gt_irq_enable_reg; + u32 de_irq_enable_reg; u32 hotplug_supported_mask; struct work_struct hotplug_work; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3fbd8a0c40d1..38e0f8301a14 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1714,7 +1714,10 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno) BUG_ON(seqno == 0); if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { - ier = I915_READ(IER); + if (IS_IGDNG(dev)) + ier = I915_READ(DEIER) | I915_READ(GTIER); + else + ier = I915_READ(IER); if (!ier) { DRM_ERROR("something (likely vbetool) disabled " "interrupts, re-enabling\n"); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 701d6809deb7..b86b7b7130c6 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -57,6 +57,47 @@ #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ DRM_I915_VBLANK_PIPE_B) +void +igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) +{ + if ((dev_priv->gt_irq_mask_reg & mask) != 0) { + dev_priv->gt_irq_mask_reg &= ~mask; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); + (void) I915_READ(GTIMR); + } +} + +static inline void +igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) +{ + if ((dev_priv->gt_irq_mask_reg & mask) != mask) { + dev_priv->gt_irq_mask_reg |= mask; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); + (void) I915_READ(GTIMR); + } +} + +/* For display hotplug interrupt */ +void +igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) +{ + if ((dev_priv->irq_mask_reg & mask) != 0) { + dev_priv->irq_mask_reg &= ~mask; + I915_WRITE(DEIMR, dev_priv->irq_mask_reg); + (void) I915_READ(DEIMR); + } +} + +static inline void +igdng_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) +{ + if ((dev_priv->irq_mask_reg & mask) != mask) { + dev_priv->irq_mask_reg |= mask; + I915_WRITE(DEIMR, dev_priv->irq_mask_reg); + (void) I915_READ(DEIMR); + } +} + void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) { @@ -196,6 +237,47 @@ static void i915_hotplug_work_func(struct work_struct *work) drm_sysfs_hotplug_event(dev); } +irqreturn_t igdng_irq_handler(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int ret = IRQ_NONE; + u32 de_iir, gt_iir; + u32 new_de_iir, new_gt_iir; + struct drm_i915_master_private *master_priv; + + de_iir = I915_READ(DEIIR); + gt_iir = I915_READ(GTIIR); + + for (;;) { + if (de_iir == 0 && gt_iir == 0) + break; + + ret = IRQ_HANDLED; + + I915_WRITE(DEIIR, de_iir); + new_de_iir = I915_READ(DEIIR); + I915_WRITE(GTIIR, gt_iir); + new_gt_iir = I915_READ(GTIIR); + + if (dev->primary->master) { + master_priv = dev->primary->master->driver_priv; + if (master_priv->sarea_priv) + master_priv->sarea_priv->last_dispatch = + READ_BREADCRUMB(dev_priv); + } + + if (gt_iir & GT_USER_INTERRUPT) { + dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); + DRM_WAKEUP(&dev_priv->irq_queue); + } + + de_iir = new_de_iir; + gt_iir = new_gt_iir; + } + + return ret; +} + irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; @@ -212,6 +294,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) atomic_inc(&dev_priv->irq_received); + if (IS_IGDNG(dev)) + return igdng_irq_handler(dev); + iir = I915_READ(IIR); if (IS_I965G(dev)) { @@ -349,8 +434,12 @@ void i915_user_irq_get(struct drm_device *dev) unsigned long irqflags; spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) - i915_enable_irq(dev_priv, I915_USER_INTERRUPT); + if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { + if (IS_IGDNG(dev)) + igdng_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); + else + i915_enable_irq(dev_priv, I915_USER_INTERRUPT); + } spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); } @@ -361,8 +450,12 @@ void i915_user_irq_put(struct drm_device *dev) spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); - if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) - i915_disable_irq(dev_priv, I915_USER_INTERRUPT); + if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { + if (IS_IGDNG(dev)) + igdng_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); + else + i915_disable_irq(dev_priv, I915_USER_INTERRUPT); + } spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); } @@ -455,6 +548,9 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) if (!(pipeconf & PIPEACONF_ENABLE)) return -EINVAL; + if (IS_IGDNG(dev)) + return 0; + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); if (IS_I965G(dev)) i915_enable_pipestat(dev_priv, pipe, @@ -474,6 +570,9 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; + if (IS_IGDNG(dev)) + return; + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE | @@ -547,12 +646,65 @@ int i915_vblank_swap(struct drm_device *dev, void *data, /* drm_dma.h hooks */ +static void igdng_irq_preinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + + I915_WRITE(HWSTAM, 0xeffe); + + /* XXX hotplug from PCH */ + + I915_WRITE(DEIMR, 0xffffffff); + I915_WRITE(DEIER, 0x0); + (void) I915_READ(DEIER); + + /* and GT */ + I915_WRITE(GTIMR, 0xffffffff); + I915_WRITE(GTIER, 0x0); + (void) I915_READ(GTIER); +} + +static int igdng_irq_postinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + /* enable kind of interrupts always enabled */ + u32 display_mask = DE_MASTER_IRQ_CONTROL /*| DE_PCH_EVENT */; + u32 render_mask = GT_USER_INTERRUPT; + + dev_priv->irq_mask_reg = ~display_mask; + dev_priv->de_irq_enable_reg = display_mask; + + /* should always can generate irq */ + I915_WRITE(DEIIR, I915_READ(DEIIR)); + I915_WRITE(DEIMR, dev_priv->irq_mask_reg); + I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); + (void) I915_READ(DEIER); + + /* user interrupt should be enabled, but masked initial */ + dev_priv->gt_irq_mask_reg = 0xffffffff; + dev_priv->gt_irq_enable_reg = render_mask; + + I915_WRITE(GTIIR, I915_READ(GTIIR)); + I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); + I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); + (void) I915_READ(GTIER); + + return 0; +} + void i915_driver_irq_preinstall(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; atomic_set(&dev_priv->irq_received, 0); + INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); + + if (IS_IGDNG(dev)) { + igdng_irq_preinstall(dev); + return; + } + if (I915_HAS_HOTPLUG(dev)) { I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); @@ -564,7 +716,6 @@ void i915_driver_irq_preinstall(struct drm_device * dev) I915_WRITE(IMR, 0xffffffff); I915_WRITE(IER, 0x0); (void) I915_READ(IER); - INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); } int i915_driver_irq_postinstall(struct drm_device *dev) @@ -572,8 +723,13 @@ int i915_driver_irq_postinstall(struct drm_device *dev) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; + DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); + dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; + if (IS_IGDNG(dev)) + return igdng_irq_postinstall(dev); + /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; @@ -613,11 +769,24 @@ int i915_driver_irq_postinstall(struct drm_device *dev) (void) I915_READ(IER); opregion_enable_asle(dev); - DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); return 0; } +static void igdng_irq_uninstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + I915_WRITE(HWSTAM, 0xffffffff); + + I915_WRITE(DEIMR, 0xffffffff); + I915_WRITE(DEIER, 0x0); + I915_WRITE(DEIIR, I915_READ(DEIIR)); + + I915_WRITE(GTIMR, 0xffffffff); + I915_WRITE(GTIER, 0x0); + I915_WRITE(GTIIR, I915_READ(GTIIR)); +} + void i915_driver_irq_uninstall(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -627,6 +796,11 @@ void i915_driver_irq_uninstall(struct drm_device * dev) dev_priv->vblank_pipe = 0; + if (IS_IGDNG(dev)) { + igdng_irq_uninstall(dev); + return; + } + if (I915_HAS_HOTPLUG(dev)) { I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); -- cgit v1.2.3 From fa0864b26b4bfa1dd4bb78eeffbc1f398cb56425 Mon Sep 17 00:00:00 2001 From: Michael Cousin Date: Fri, 5 Jun 2009 21:16:22 +0200 Subject: drm/i915: Skip lvds with Aopen i945GTt-VFA Signed-off-by: Michael Cousin Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/intel_lvds.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index e4ca6a3cdbbc..a7ae9f46aa9a 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -493,6 +493,13 @@ static const struct dmi_system_id __initdata intel_no_lvds[] = { DMI_MATCH(DMI_PRODUCT_NAME, "i965GMx-IF"), }, }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Aopen i945GTt-VFA", + .matches = { + DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), + }, + }, { } /* terminating entry */ }; -- cgit v1.2.3 From 83d60795157c83389e6aaa0532d5e19afa976a24 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 6 Jun 2009 09:45:57 +0100 Subject: drm/i915: Sanity check execbuffer arguments before touching state. By sending a broken execbuffer (its length was not suitably aligned) I triggered an operation upon a freed object. The invalid alignment was discovered after updating the write_domain on the object but before the object was placed on the active queue. So during the unwind process following the error, the now freed object attempts to flush its non-existent, but outstanding, GPU writes causing this use-after-free. [drm:i915_dispatch_gem_execbuffer] *ERROR* alignment [drm:i915_gem_execbuffer] *ERROR* dispatch failed -22 WARNING: at lib/kref.c:43 warn_slowpath_null+0x10/0x15() Modules linked in: Pid: 4552, comm: lt-csi-drm Not tainted 2.6.30-rc6 #423 Call Trace: [] warn_slowpath_fmt+0x57/0x6d [] ? get_pageblock_migratetype+0x18/0x1e [] ? free_hot_page+0xa/0xc [] ? __free_pages+0x16/0x1f [] ? shmem_truncate_range+0x63e/0x656 [] ? slob_page_alloc+0x146/0x1c8 [] warn_slowpath_null+0x10/0x15 [] kref_get+0x1b/0x21 [] i915_gem_object_move_to_active+0x1f/0x56 [] i915_add_request+0x156/0x19a [] i915_gem_object_flush_gpu_write_domain+0x28/0x3f [] i915_gem_object_unbind+0x4a/0x124 [] i915_gem_free_object+0x33/0x9b [] drm_gem_object_free+0x28/0x4a [] ? drm_gem_object_free+0x0/0x4a [] kref_put+0x38/0x41 [] drm_gem_object_unreference+0x11/0x13 [] drm_gem_object_handle_unreference+0x1e/0x21 [] drm_gem_object_release_handle+0xa/0xe [] idr_for_each+0x5f/0x98 [] ? drm_gem_object_release_handle+0x0/0xe [] drm_gem_release+0x22/0x34 [] drm_release+0x1e8/0x3c4 [] __fput+0xaf/0x146 [] fput+0x12/0x14 [] filp_close+0x48/0x52 [] put_files_struct+0x57/0x9b [] exit_files+0x1e/0x20 [] do_exit+0x16d/0x511 [] ? __schedule+0x3d4/0x3e5 [] ? handle_irq+0xd/0x69 [] do_group_exit+0x4d/0x73 [] sys_exit_group+0x13/0x17 [] sysenter_do_call+0x12/0x2b Signed-off-by: Chris Wilson Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_gem.c | 38 +++++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 38e0f8301a14..ac22668b239a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3050,20 +3050,12 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev, drm_i915_private_t *dev_priv = dev->dev_private; int nbox = exec->num_cliprects; int i = 0, count; - uint32_t exec_start, exec_len; + uint32_t exec_start, exec_len; RING_LOCALS; exec_start = (uint32_t) exec_offset + exec->batch_start_offset; exec_len = (uint32_t) exec->batch_len; - if ((exec_start | exec_len) & 0x7) { - DRM_ERROR("alignment\n"); - return -EINVAL; - } - - if (!exec_start) - return -EINVAL; - count = nbox ? nbox : 1; for (i = 0; i < count; i++) { @@ -3211,6 +3203,24 @@ err: return ret; } +static int +i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec, + uint64_t exec_offset) +{ + uint32_t exec_start, exec_len; + + exec_start = (uint32_t) exec_offset + exec->batch_start_offset; + exec_len = (uint32_t) exec->batch_len; + + if ((exec_start | exec_len) & 0x7) + return -EINVAL; + + if (!exec_start) + return -EINVAL; + + return 0; +} + int i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -3362,6 +3372,14 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND; batch_obj->pending_write_domain = 0; + /* Sanity check the batch buffer, prior to moving objects */ + exec_offset = exec_list[args->buffer_count - 1].offset; + ret = i915_gem_check_execbuffer (args, exec_offset); + if (ret != 0) { + DRM_ERROR("execbuf with invalid offset/length\n"); + goto err; + } + i915_verify_inactive(dev, __FILE__, __LINE__); /* Zero the global flush/invalidate flags. These @@ -3410,8 +3428,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, } #endif - exec_offset = exec_list[args->buffer_count - 1].offset; - #if WATCH_EXEC i915_gem_dump_object(batch_obj, args->batch_len, -- cgit v1.2.3 From 5f26a2c7ad6eba97141e8372f3def282f934b169 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 6 Jun 2009 09:45:58 +0100 Subject: drm/i915: OR in the COMMAND read domain for the batch buffer. The batch buffer may be shared with another read buffer, so we should not ignore any previously set domains, but just or in the command domain (and check that the buffer is not writable). Signed-off-by: Chris Wilson Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_gem.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ac22668b239a..2d705e8a297e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3369,8 +3369,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, /* Set the pending read domains for the batch buffer to COMMAND */ batch_obj = object_list[args->buffer_count-1]; - batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND; - batch_obj->pending_write_domain = 0; + if (batch_obj->pending_write_domain) { + DRM_ERROR("Attempting to use self-modifying batch buffer\n"); + ret = -EINVAL; + goto err; + } + batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND; /* Sanity check the batch buffer, prior to moving objects */ exec_offset = exec_list[args->buffer_count - 1].offset; -- cgit v1.2.3 From 1f803ee5cea67d2387aeedb4b07e645a743729de Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 6 Jun 2009 09:45:59 +0100 Subject: drm/i915: Call drm_vblank_post_modeset() on error paths. Ensure that the drm_vblank_pre_modeset() is always balanced by drm_vblank_post_modeset() within intel_crtc_mode_set(). Signed-off-by: Chris Wilson Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/intel_display.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index c5c45827ca01..a87eeffc2c8d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1598,6 +1598,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); if (!ok) { DRM_ERROR("Couldn't find PLL settings for mode!\n"); + drm_vblank_post_modeset(dev, pipe); return -EINVAL; } @@ -1858,12 +1859,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* Flush the plane changes */ ret = intel_pipe_set_base(crtc, x, y, old_fb); - if (ret != 0) - return ret; - drm_vblank_post_modeset(dev, pipe); - return 0; + return ret; } /** Loads the palette/gamma unit for the CRTC with the prepared values */ -- cgit v1.2.3 From b1ce786cb85280490ca3c29a62ddf8608826b414 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 6 Jun 2009 09:46:00 +0100 Subject: drm/i915: no need to hold mutex for object lookup Signed-off-by: Chris Wilson Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_gem.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2d705e8a297e..744bf9803ea3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3701,15 +3701,14 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; - mutex_lock(&dev->struct_mutex); obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) { DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n", args->handle); - mutex_unlock(&dev->struct_mutex); return -EBADF; } + mutex_lock(&dev->struct_mutex); /* Update the active list for the hardware's current position. * Otherwise this only updates on a delayed timer or when irqs are * actually unmasked, and our working set ends up being larger than -- cgit v1.2.3 From 21d509e339565c82887733c02465bb7f5866c8f5 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 6 Jun 2009 09:46:02 +0100 Subject: drm/i915: use I915_GEM_GPU_DOMAINS Signed-off-by: Chris Wilson Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_gem.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 744bf9803ea3..cf5dc08b6fa8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -989,10 +989,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, return -ENODEV; /* Only handle setting domains to types used by the CPU. */ - if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) + if (write_domain & I915_GEM_GPU_DOMAINS) return -EINVAL; - if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) + if (read_domains & I915_GEM_GPU_DOMAINS) return -EINVAL; /* Having something in the write domain implies it's in the read @@ -1769,8 +1769,7 @@ i915_gem_flush(struct drm_device *dev, if (flush_domains & I915_GEM_DOMAIN_CPU) drm_agp_chipset_flush(dev); - if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU | - I915_GEM_DOMAIN_GTT)) { + if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { /* * read/write caches: * @@ -2424,8 +2423,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) * wasn't in the GTT, there shouldn't be any way it could have been in * a GPU cache */ - BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); - BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); + BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); + BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); return 0; } @@ -3568,8 +3567,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) atomic_inc(&dev->pin_count); atomic_add(obj->size, &dev->pin_memory); if (!obj_priv->active && - (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | - I915_GEM_DOMAIN_GTT)) == 0 && + (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 && !list_empty(&obj_priv->list)) list_del_init(&obj_priv->list); } @@ -3596,8 +3594,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj) */ if (obj_priv->pin_count == 0) { if (!obj_priv->active && - (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | - I915_GEM_DOMAIN_GTT)) == 0) + (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); atomic_dec(&dev->pin_count); @@ -3847,9 +3844,8 @@ i915_gem_idle(struct drm_device *dev) /* Flush the GPU along with all non-CPU write domains */ - i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT), - ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); - seqno = i915_add_request(dev, NULL, ~I915_GEM_DOMAIN_CPU); + i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); + seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); if (seqno == 0) { mutex_unlock(&dev->struct_mutex); -- cgit v1.2.3 From 2939e1f5331455d17a4a704dd6210e1474002545 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 6 Jun 2009 09:46:03 +0100 Subject: drm/i915: NOMEM->NOSPC To differentiate between encountering an out-of-memory error with running out of space in the aperture, use ENOSPC for the later. Signed-off-by: Chris Wilson Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_gem.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index cf5dc08b6fa8..c0ae6bbbd9b5 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2005,7 +2005,7 @@ i915_gem_evict_something(struct drm_device *dev) /* If we didn't do any of the above, there's nothing to be done * and we just can't fit it in. */ - return -ENOMEM; + return -ENOSPC; } return ret; } @@ -2020,7 +2020,7 @@ i915_gem_evict_everything(struct drm_device *dev) if (ret != 0) break; } - if (ret == -ENOMEM) + if (ret == -ENOSPC) return 0; return ret; } @@ -2229,7 +2229,7 @@ try_again: loff_t offset; if (avail == 0) - return -ENOMEM; + return -ENOSPC; for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { @@ -2378,7 +2378,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) spin_unlock(&dev_priv->mm.active_list_lock); if (lists_empty) { DRM_ERROR("GTT full, but LRU list empty\n"); - return -ENOMEM; + return -ENOSPC; } ret = i915_gem_evict_something(dev); @@ -3349,7 +3349,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, break; /* error other than GTT full, or we've already tried again */ - if (ret != -ENOMEM || pin_tries >= 1) { + if (ret != -ENOSPC || pin_tries >= 1) { if (ret != -ERESTARTSYS) DRM_ERROR("Failed to pin buffers %d\n", ret); goto err; -- cgit v1.2.3 From 03d6069912babc07a3da20e715dd6a5dc8f0f867 Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Fri, 5 Jun 2009 18:19:56 -0700 Subject: drm/i915: Hook connector to encoder during load detection (fixes tv/vga detect) With the DRM-driven DPMS code, encoders are considered idle unless a connector is hooked to them, so mode setting is skipped. This makes load detection fail as none of the hardware is enabled. Signed-off-by: Keith Packard Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/intel_display.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a87eeffc2c8d..b32a51f2a91d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2136,6 +2136,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, } encoder->crtc = crtc; + intel_output->base.encoder = encoder; intel_output->load_detect_temp = true; intel_crtc = to_intel_crtc(crtc); @@ -2171,6 +2172,7 @@ void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_ if (intel_output->load_detect_temp) { encoder->crtc = NULL; + intel_output->base.encoder = NULL; intel_output->load_detect_temp = false; crtc->enabled = drm_helper_crtc_in_use(crtc); drm_helper_disable_unused_functions(dev); -- cgit v1.2.3 From 14d0f0b063f5363984dd305a792854f9c23e9e97 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Fri, 29 May 2009 16:37:04 +0530 Subject: [SCSI] mpt fusion: Fixing 1078 data corruption issue for 36GB memory region The reason for this change is there is a data corruption when four different physical memory regions in the 36GB to 37GB region are accessed. This is only affecting 1078. The solution is we need to use different addressing when filling in the scatter gather table for the effected memory regions. So instead of snooping on all four different memory holes, we treat any physical addresses in the 36GB address with the same algorithm. The fix is explained below 1) Ensure that the message frames are NOT located in the trouble region. There is no remapping available for message frames, they must be allocated outside the problem region. 2) Ensure that Sense buffers are NOT in the trouble region. There is no remapping available. 3) Walk through the SGE entries and if any are inside the trouble region then they need to be remapped as discussed below. 1) Set the Local Address bit in the SGE Flags field. MPI_SGE_FLAGS_LOCAL_ADDRESS 2) Ensure we are using 64-bit SGEs 3) Set MSb (Bit 63) of the 64-bit address, this will indicate buffer location is Host Memory. Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptbase.c | 287 +++++++++++++++++++++++++++++++------- drivers/message/fusion/mptbase.h | 35 +++-- drivers/message/fusion/mptctl.c | 37 ++--- drivers/message/fusion/mptdebug.h | 3 + drivers/message/fusion/mptfc.c | 10 +- drivers/message/fusion/mptsas.c | 18 ++- drivers/message/fusion/mptscsih.c | 102 +++----------- drivers/message/fusion/mptspi.c | 14 +- 8 files changed, 318 insertions(+), 188 deletions(-) (limited to 'drivers') diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 5d496a99e034..a66369218c97 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -998,7 +998,7 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mpt_add_sge - Place a simple SGE at address pAddr. + * mpt_add_sge - Place a simple 32 bit SGE at address pAddr. * @pAddr: virtual address for SGE * @flagslength: SGE flags and data transfer length * @dma_addr: Physical address @@ -1006,23 +1006,117 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf) * This routine places a MPT request frame back on the MPT adapter's * FreeQ. */ -void -mpt_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr) +static void +mpt_add_sge(void *pAddr, u32 flagslength, dma_addr_t dma_addr) { - if (sizeof(dma_addr_t) == sizeof(u64)) { - SGESimple64_t *pSge = (SGESimple64_t *) pAddr; + SGESimple32_t *pSge = (SGESimple32_t *) pAddr; + pSge->FlagsLength = cpu_to_le32(flagslength); + pSge->Address = cpu_to_le32(dma_addr); +} + +/** + * mpt_add_sge_64bit - Place a simple 64 bit SGE at address pAddr. + * @pAddr: virtual address for SGE + * @flagslength: SGE flags and data transfer length + * @dma_addr: Physical address + * + * This routine places a MPT request frame back on the MPT adapter's + * FreeQ. + **/ +static void +mpt_add_sge_64bit(void *pAddr, u32 flagslength, dma_addr_t dma_addr) +{ + SGESimple64_t *pSge = (SGESimple64_t *) pAddr; + pSge->Address.Low = cpu_to_le32 + (lower_32_bits((unsigned long)(dma_addr))); + pSge->Address.High = cpu_to_le32 + (upper_32_bits((unsigned long)dma_addr)); + pSge->FlagsLength = cpu_to_le32 + ((flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING)); +} + +/** + * mpt_add_sge_64bit_1078 - Place a simple 64 bit SGE at address pAddr + * (1078 workaround). + * @pAddr: virtual address for SGE + * @flagslength: SGE flags and data transfer length + * @dma_addr: Physical address + * + * This routine places a MPT request frame back on the MPT adapter's + * FreeQ. + **/ +static void +mpt_add_sge_64bit_1078(void *pAddr, u32 flagslength, dma_addr_t dma_addr) +{ + SGESimple64_t *pSge = (SGESimple64_t *) pAddr; + u32 tmp; + + pSge->Address.Low = cpu_to_le32 + (lower_32_bits((unsigned long)(dma_addr))); + tmp = (u32)(upper_32_bits((unsigned long)dma_addr)); + + /* + * 1078 errata workaround for the 36GB limitation + */ + if ((((u64)dma_addr + MPI_SGE_LENGTH(flagslength)) >> 32) == 9) { + flagslength |= + MPI_SGE_SET_FLAGS(MPI_SGE_FLAGS_LOCAL_ADDRESS); + tmp |= (1<<31); + if (mpt_debug_level & MPT_DEBUG_36GB_MEM) + printk(KERN_DEBUG "1078 P0M2 addressing for " + "addr = 0x%llx len = %d\n", + (unsigned long long)dma_addr, + MPI_SGE_LENGTH(flagslength)); + } + + pSge->Address.High = cpu_to_le32(tmp); + pSge->FlagsLength = cpu_to_le32( + (flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING)); +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * mpt_add_chain - Place a 32 bit chain SGE at address pAddr. + * @pAddr: virtual address for SGE + * @next: nextChainOffset value (u32's) + * @length: length of next SGL segment + * @dma_addr: Physical address + * + */ +static void +mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr) +{ + SGEChain32_t *pChain = (SGEChain32_t *) pAddr; + pChain->Length = cpu_to_le16(length); + pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; + pChain->NextChainOffset = next; + pChain->Address = cpu_to_le32(dma_addr); +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * mpt_add_chain_64bit - Place a 64 bit chain SGE at address pAddr. + * @pAddr: virtual address for SGE + * @next: nextChainOffset value (u32's) + * @length: length of next SGL segment + * @dma_addr: Physical address + * + */ +static void +mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr) +{ + SGEChain64_t *pChain = (SGEChain64_t *) pAddr; u32 tmp = dma_addr & 0xFFFFFFFF; - pSge->FlagsLength = cpu_to_le32(flagslength); - pSge->Address.Low = cpu_to_le32(tmp); - tmp = (u32) ((u64)dma_addr >> 32); - pSge->Address.High = cpu_to_le32(tmp); + pChain->Length = cpu_to_le16(length); + pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT | + MPI_SGE_FLAGS_64_BIT_ADDRESSING); - } else { - SGESimple32_t *pSge = (SGESimple32_t *) pAddr; - pSge->FlagsLength = cpu_to_le32(flagslength); - pSge->Address = cpu_to_le32(dma_addr); - } + pChain->NextChainOffset = next; + + pChain->Address.Low = cpu_to_le32(tmp); + tmp = (u32)(upper_32_bits((unsigned long)dma_addr)); + pChain->Address.High = cpu_to_le32(tmp); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -1225,7 +1319,7 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init) } flags_length = flags_length << MPI_SGE_FLAGS_SHIFT; flags_length |= ioc->HostPageBuffer_sz; - mpt_add_sge(psge, flags_length, ioc->HostPageBuffer_dma); + ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma); ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE; return 0; @@ -1534,21 +1628,42 @@ mpt_mapresources(MPT_ADAPTER *ioc) pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision); - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) - && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { - dinitprintk(ioc, printk(MYIOC_s_INFO_FMT - ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", - ioc->name)); - } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) - && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { - dinitprintk(ioc, printk(MYIOC_s_INFO_FMT - ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", - ioc->name)); + if (sizeof(dma_addr_t) > 4) { + const uint64_t required_mask = dma_get_required_mask + (&pdev->dev); + if (required_mask > DMA_BIT_MASK(32) + && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) + && !pci_set_consistent_dma_mask(pdev, + DMA_BIT_MASK(64))) { + ioc->dma_mask = DMA_BIT_MASK(64); + dinitprintk(ioc, printk(MYIOC_s_INFO_FMT + ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", + ioc->name)); + } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) + && !pci_set_consistent_dma_mask(pdev, + DMA_BIT_MASK(32))) { + ioc->dma_mask = DMA_BIT_MASK(32); + dinitprintk(ioc, printk(MYIOC_s_INFO_FMT + ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", + ioc->name)); + } else { + printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n", + ioc->name, pci_name(pdev)); + return r; + } } else { - printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n", - ioc->name, pci_name(pdev)); - pci_release_selected_regions(pdev, ioc->bars); - return r; + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) + && !pci_set_consistent_dma_mask(pdev, + DMA_BIT_MASK(32))) { + ioc->dma_mask = DMA_BIT_MASK(32); + dinitprintk(ioc, printk(MYIOC_s_INFO_FMT + ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", + ioc->name)); + } else { + printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n", + ioc->name, pci_name(pdev)); + return r; + } } mem_phys = msize = 0; @@ -1650,6 +1765,23 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) return r; } + /* + * Setting up proper handlers for scatter gather handling + */ + if (ioc->dma_mask == DMA_BIT_MASK(64)) { + if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) + ioc->add_sge = &mpt_add_sge_64bit_1078; + else + ioc->add_sge = &mpt_add_sge_64bit; + ioc->add_chain = &mpt_add_chain_64bit; + ioc->sg_addr_size = 8; + } else { + ioc->add_sge = &mpt_add_sge; + ioc->add_chain = &mpt_add_chain; + ioc->sg_addr_size = 4; + } + ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size; + ioc->alloc_total = sizeof(MPT_ADAPTER); ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */ ioc->reply_sz = MPT_REPLY_FRAME_SIZE; @@ -1994,6 +2126,21 @@ mpt_resume(struct pci_dev *pdev) if (err) return err; + if (ioc->dma_mask == DMA_BIT_MASK(64)) { + if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) + ioc->add_sge = &mpt_add_sge_64bit_1078; + else + ioc->add_sge = &mpt_add_sge_64bit; + ioc->add_chain = &mpt_add_chain_64bit; + ioc->sg_addr_size = 8; + } else { + + ioc->add_sge = &mpt_add_sge; + ioc->add_chain = &mpt_add_chain; + ioc->sg_addr_size = 4; + } + ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size; + printk(MYIOC_s_INFO_FMT "pci-resume: ioc-state=0x%x,doorbell=0x%x\n", ioc->name, (mpt_GetIocState(ioc, 1) >> MPI_IOC_STATE_SHIFT), CHIPREG_READ32(&ioc->chip->Doorbell)); @@ -3325,11 +3472,10 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag) FWUpload_t *prequest; FWUploadReply_t *preply; FWUploadTCSGE_t *ptcsge; - int sgeoffset; u32 flagsLength; int ii, sz, reply_sz; int cmdStatus; - + int request_size; /* If the image size is 0, we are done. */ if ((sz = ioc->facts.FWImageSize) == 0) @@ -3364,18 +3510,17 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag) ptcsge->ImageSize = cpu_to_le32(sz); ptcsge++; - sgeoffset = sizeof(FWUpload_t) - sizeof(SGE_MPI_UNION) + sizeof(FWUploadTCSGE_t); - flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | sz; - mpt_add_sge((char *)ptcsge, flagsLength, ioc->cached_fw_dma); - - sgeoffset += sizeof(u32) + sizeof(dma_addr_t); - dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": Sending FW Upload (req @ %p) sgeoffset=%d \n", - ioc->name, prequest, sgeoffset)); + ioc->add_sge((char *)ptcsge, flagsLength, ioc->cached_fw_dma); + request_size = offsetof(FWUpload_t, SGL) + sizeof(FWUploadTCSGE_t) + + ioc->SGE_size; + dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending FW Upload " + " (req @ %p) fw_size=%d mf_request_size=%d\n", ioc->name, prequest, + ioc->facts.FWImageSize, request_size)); DBG_DUMP_FW_REQUEST_FRAME(ioc, (u32 *)prequest); - ii = mpt_handshake_req_reply_wait(ioc, sgeoffset, (u32*)prequest, - reply_sz, (u16*)preply, 65 /*seconds*/, sleepFlag); + ii = mpt_handshake_req_reply_wait(ioc, request_size, (u32 *)prequest, + reply_sz, (u16 *)preply, 65 /*seconds*/, sleepFlag); dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": FW Upload completed rc=%x \n", ioc->name, ii)); @@ -4090,18 +4235,18 @@ initChainBuffers(MPT_ADAPTER *ioc) * num_sge = num sge in request frame + last chain buffer * scale = num sge per chain buffer if no chain element */ - scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); - if (sizeof(dma_addr_t) == sizeof(u64)) - num_sge = scale + (ioc->req_sz - 60) / (sizeof(dma_addr_t) + sizeof(u32)); + scale = ioc->req_sz / ioc->SGE_size; + if (ioc->sg_addr_size == sizeof(u64)) + num_sge = scale + (ioc->req_sz - 60) / ioc->SGE_size; else - num_sge = 1+ scale + (ioc->req_sz - 64) / (sizeof(dma_addr_t) + sizeof(u32)); + num_sge = 1 + scale + (ioc->req_sz - 64) / ioc->SGE_size; - if (sizeof(dma_addr_t) == sizeof(u64)) { + if (ioc->sg_addr_size == sizeof(u64)) { numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + - (ioc->req_sz - 60) / (sizeof(dma_addr_t) + sizeof(u32)); + (ioc->req_sz - 60) / ioc->SGE_size; } else { - numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + - (ioc->req_sz - 64) / (sizeof(dma_addr_t) + sizeof(u32)); + numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) + + scale + (ioc->req_sz - 64) / ioc->SGE_size; } dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "num_sge=%d numSGE=%d\n", ioc->name, num_sge, numSGE)); @@ -4161,12 +4306,42 @@ PrimeIocFifos(MPT_ADAPTER *ioc) dma_addr_t alloc_dma; u8 *mem; int i, reply_sz, sz, total_size, num_chain; + u64 dma_mask; + + dma_mask = 0; /* Prime reply FIFO... */ if (ioc->reply_frames == NULL) { if ( (num_chain = initChainBuffers(ioc)) < 0) return -1; + /* + * 1078 errata workaround for the 36GB limitation + */ + if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078 && + ioc->dma_mask > DMA_35BIT_MASK) { + if (!pci_set_dma_mask(ioc->pcidev, DMA_BIT_MASK(32)) + && !pci_set_consistent_dma_mask(ioc->pcidev, + DMA_BIT_MASK(32))) { + dma_mask = DMA_35BIT_MASK; + d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "setting 35 bit addressing for " + "Request/Reply/Chain and Sense Buffers\n", + ioc->name)); + } else { + /*Reseting DMA mask to 64 bit*/ + pci_set_dma_mask(ioc->pcidev, + DMA_BIT_MASK(64)); + pci_set_consistent_dma_mask(ioc->pcidev, + DMA_BIT_MASK(64)); + + printk(MYIOC_s_ERR_FMT + "failed setting 35 bit addressing for " + "Request/Reply/Chain and Sense Buffers\n", + ioc->name); + return -1; + } + } total_size = reply_sz = (ioc->reply_sz * ioc->reply_depth); dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffer sz=%d bytes, ReplyDepth=%d\n", @@ -4305,6 +4480,12 @@ PrimeIocFifos(MPT_ADAPTER *ioc) alloc_dma += ioc->reply_sz; } + if (dma_mask == DMA_35BIT_MASK && !pci_set_dma_mask(ioc->pcidev, + ioc->dma_mask) && !pci_set_consistent_dma_mask(ioc->pcidev, + ioc->dma_mask)) + d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "restoring 64 bit addressing\n", ioc->name)); + return 0; out_fail: @@ -4324,6 +4505,13 @@ out_fail: ioc->sense_buf_pool, ioc->sense_buf_pool_dma); ioc->sense_buf_pool = NULL; } + + if (dma_mask == DMA_35BIT_MASK && !pci_set_dma_mask(ioc->pcidev, + DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(ioc->pcidev, + DMA_BIT_MASK(64))) + d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "restoring 64 bit addressing\n", ioc->name)); + return -1; } @@ -5926,7 +6114,7 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) ioc->name, pReq->Header.PageType, pReq->Header.PageNumber, pReq->Action)); } - mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr); + ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr); /* Append pCfg pointer to end of mf */ @@ -7613,7 +7801,6 @@ EXPORT_SYMBOL(mpt_get_msg_frame); EXPORT_SYMBOL(mpt_put_msg_frame); EXPORT_SYMBOL(mpt_put_msg_frame_hi_pri); EXPORT_SYMBOL(mpt_free_msg_frame); -EXPORT_SYMBOL(mpt_add_sge); EXPORT_SYMBOL(mpt_send_handshake_request); EXPORT_SYMBOL(mpt_verify_adapter); EXPORT_SYMBOL(mpt_GetIocState); diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index b3e981d2a506..4a606764e317 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -76,8 +76,8 @@ #define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR #endif -#define MPT_LINUX_VERSION_COMMON "3.04.07" -#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.07" +#define MPT_LINUX_VERSION_COMMON "3.04.08" +#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.08" #define WHAT_MAGIC_STRING "@" "(" "#" ")" #define show_mptmod_ver(s,ver) \ @@ -134,6 +134,7 @@ #define MPT_COALESCING_TIMEOUT 0x10 + /* * SCSI transfer rate defines. */ @@ -564,6 +565,10 @@ struct mptfc_rport_info u8 flags; }; +typedef void (*MPT_ADD_SGE)(void *pAddr, u32 flagslength, dma_addr_t dma_addr); +typedef void (*MPT_ADD_CHAIN)(void *pAddr, u8 next, u16 length, + dma_addr_t dma_addr); + /* * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS */ @@ -600,6 +605,10 @@ typedef struct _MPT_ADAPTER int reply_depth; /* Num Allocated reply frames */ int reply_sz; /* Reply frame size */ int num_chain; /* Number of chain buffers */ + MPT_ADD_SGE add_sge; /* Pointer to add_sge + function */ + MPT_ADD_CHAIN add_chain; /* Pointer to add_chain + function */ /* Pool of buffers for chaining. ReqToChain * and ChainToChain track index of chain buffers. * ChainBuffer (DMA) virt/phys addresses. @@ -711,12 +720,15 @@ typedef struct _MPT_ADAPTER struct workqueue_struct *fc_rescan_work_q; struct scsi_cmnd **ScsiLookup; spinlock_t scsi_lookup_lock; - + u64 dma_mask; char reset_work_q_name[20]; struct workqueue_struct *reset_work_q; struct delayed_work fault_reset_work; spinlock_t fault_reset_work_lock; + u8 sg_addr_size; + u8 SGE_size; + } MPT_ADAPTER; /* @@ -753,13 +765,14 @@ typedef struct _mpt_sge { dma_addr_t Address; } MptSge_t; -#define mpt_addr_size() \ - ((sizeof(dma_addr_t) == sizeof(u64)) ? MPI_SGE_FLAGS_64_BIT_ADDRESSING : \ - MPI_SGE_FLAGS_32_BIT_ADDRESSING) -#define mpt_msg_flags() \ - ((sizeof(dma_addr_t) == sizeof(u64)) ? MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 : \ - MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32) +#define mpt_msg_flags(ioc) \ + (ioc->sg_addr_size == sizeof(u64)) ? \ + MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 : \ + MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32 + +#define MPT_SGE_FLAGS_64_BIT_ADDRESSING \ + (MPI_SGE_FLAGS_64_BIT_ADDRESSING << MPI_SGE_FLAGS_SHIFT) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -909,7 +922,6 @@ extern MPT_FRAME_HDR *mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc); extern void mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf); extern void mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf); extern void mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf); -extern void mpt_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr); extern int mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag); extern int mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp); @@ -959,7 +971,6 @@ extern int mpt_fwfault_debug; #define MPT_SGE_FLAGS_END_OF_BUFFER (0x40000000) #define MPT_SGE_FLAGS_LOCAL_ADDRESS (0x08000000) #define MPT_SGE_FLAGS_DIRECTION (0x04000000) -#define MPT_SGE_FLAGS_ADDRESSING (mpt_addr_size() << MPI_SGE_FLAGS_SHIFT) #define MPT_SGE_FLAGS_END_OF_LIST (0x01000000) #define MPT_SGE_FLAGS_TRANSACTION_ELEMENT (0x00000000) @@ -972,14 +983,12 @@ extern int mpt_fwfault_debug; MPT_SGE_FLAGS_END_OF_BUFFER | \ MPT_SGE_FLAGS_END_OF_LIST | \ MPT_SGE_FLAGS_SIMPLE_ELEMENT | \ - MPT_SGE_FLAGS_ADDRESSING | \ MPT_TRANSFER_IOC_TO_HOST) #define MPT_SGE_FLAGS_SSIMPLE_WRITE \ (MPT_SGE_FLAGS_LAST_ELEMENT | \ MPT_SGE_FLAGS_END_OF_BUFFER | \ MPT_SGE_FLAGS_END_OF_LIST | \ MPT_SGE_FLAGS_SIMPLE_ELEMENT | \ - MPT_SGE_FLAGS_ADDRESSING | \ MPT_TRANSFER_HOST_TO_IOC) /*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index c63817117c0a..bece386f1d4b 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -841,8 +841,9 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) * 96 8 * 64 4 */ - maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) - sizeof(FWDownloadTCSGE_t)) - / (sizeof(dma_addr_t) + sizeof(u32)); + maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) - + sizeof(FWDownloadTCSGE_t)) + / iocp->SGE_size; if (numfrags > maxfrags) { ret = -EMLINK; goto fwdl_out; @@ -870,7 +871,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) if (nib == 0 || nib == 3) { ; } else if (sgIn->Address) { - mpt_add_sge(sgOut, sgIn->FlagsLength, sgIn->Address); + iocp->add_sge(sgOut, sgIn->FlagsLength, sgIn->Address); n++; if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) { printk(MYIOC_s_ERR_FMT "%s@%d::_ioctl_fwdl - " @@ -882,7 +883,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) } sgIn++; bl++; - sgOut += (sizeof(dma_addr_t) + sizeof(u32)); + sgOut += iocp->SGE_size; } DBG_DUMP_FW_DOWNLOAD(iocp, (u32 *)mf, numfrags); @@ -1003,7 +1004,7 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags, * */ sgl = sglbuf; - sg_spill = ((ioc->req_sz - sge_offset)/(sizeof(dma_addr_t) + sizeof(u32))) - 1; + sg_spill = ((ioc->req_sz - sge_offset)/ioc->SGE_size) - 1; while (bytes_allocd < bytes) { this_alloc = min(alloc_sz, bytes-bytes_allocd); buflist[buflist_ent].len = this_alloc; @@ -1024,8 +1025,9 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags, dma_addr_t dma_addr; bytes_allocd += this_alloc; - sgl->FlagsLength = (0x10000000|MPT_SGE_FLAGS_ADDRESSING|sgdir|this_alloc); - dma_addr = pci_map_single(ioc->pcidev, buflist[buflist_ent].kptr, this_alloc, dir); + sgl->FlagsLength = (0x10000000|sgdir|this_alloc); + dma_addr = pci_map_single(ioc->pcidev, + buflist[buflist_ent].kptr, this_alloc, dir); sgl->Address = dma_addr; fragcnt++; @@ -1799,9 +1801,9 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) */ sz = karg.dataSgeOffset * 4; if (karg.dataInSize > 0) - sz += sizeof(dma_addr_t) + sizeof(u32); + sz += ioc->SGE_size; if (karg.dataOutSize > 0) - sz += sizeof(dma_addr_t) + sizeof(u32); + sz += ioc->SGE_size; if (sz > ioc->req_sz) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " @@ -1893,7 +1895,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) } pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; - pScsiReq->MsgFlags |= mpt_msg_flags(); + pScsiReq->MsgFlags |= mpt_msg_flags(ioc); /* verify that app has not requested @@ -1979,7 +1981,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) int dataSize; pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; - pScsiReq->MsgFlags |= mpt_msg_flags(); + pScsiReq->MsgFlags |= mpt_msg_flags(ioc); /* verify that app has not requested @@ -2123,8 +2125,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) if (karg.dataInSize > 0) { flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | - MPI_SGE_FLAGS_DIRECTION | - mpt_addr_size() ) + MPI_SGE_FLAGS_DIRECTION) << MPI_SGE_FLAGS_SHIFT; } else { flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE; @@ -2141,8 +2142,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) /* Set up this SGE. * Copy to MF and to sglbuf */ - mpt_add_sge(psge, flagsLength, dma_addr_out); - psge += (sizeof(u32) + sizeof(dma_addr_t)); + ioc->add_sge(psge, flagsLength, dma_addr_out); + psge += ioc->SGE_size; /* Copy user data to kernel space. */ @@ -2175,13 +2176,13 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) /* Set up this SGE * Copy to MF and to sglbuf */ - mpt_add_sge(psge, flagsLength, dma_addr_in); + ioc->add_sge(psge, flagsLength, dma_addr_in); } } } else { /* Add a NULL SGE */ - mpt_add_sge(psge, flagsLength, (dma_addr_t) -1); + ioc->add_sge(psge, flagsLength, (dma_addr_t) -1); } ioc->ioctl->wait_done = 0; @@ -2498,7 +2499,7 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma); if (!pbuf) goto out; - mpt_add_sge((char *)&IstwiRWRequest->SGL, + ioc->add_sge((char *)&IstwiRWRequest->SGL, (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma); ioc->ioctl->wait_done = 0; diff --git a/drivers/message/fusion/mptdebug.h b/drivers/message/fusion/mptdebug.h index 510b9f492093..28e478879284 100644 --- a/drivers/message/fusion/mptdebug.h +++ b/drivers/message/fusion/mptdebug.h @@ -58,6 +58,7 @@ #define MPT_DEBUG_FC 0x00080000 #define MPT_DEBUG_SAS 0x00100000 #define MPT_DEBUG_SAS_WIDE 0x00200000 +#define MPT_DEBUG_36GB_MEM 0x00400000 /* * CONFIG_FUSION_LOGGING - enabled in Kconfig @@ -135,6 +136,8 @@ #define dsaswideprintk(IOC, CMD) \ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE) +#define d36memprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_36GB_MEM) /* diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c index c3c24fdf9fb6..da16b47a3f32 100644 --- a/drivers/message/fusion/mptfc.c +++ b/drivers/message/fusion/mptfc.c @@ -1251,17 +1251,15 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) * A slightly different algorithm is required for * 64bit SGEs. */ - scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); - if (sizeof(dma_addr_t) == sizeof(u64)) { + scale = ioc->req_sz/ioc->SGE_size; + if (ioc->sg_addr_size == sizeof(u64)) { numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + - (ioc->req_sz - 60) / (sizeof(dma_addr_t) + - sizeof(u32)); + (ioc->req_sz - 60) / ioc->SGE_size; } else { numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + - (ioc->req_sz - 64) / (sizeof(dma_addr_t) + - sizeof(u32)); + (ioc->req_sz - 64) / ioc->SGE_size; } if (numSGE < sh->sg_tablesize) { diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index a9019f081b97..b162f7a1c563 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -1319,15 +1319,15 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, /* request */ flagsLength = (MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | - MPI_SGE_FLAGS_DIRECTION | - mpt_addr_size()) << MPI_SGE_FLAGS_SHIFT; + MPI_SGE_FLAGS_DIRECTION) + << MPI_SGE_FLAGS_SHIFT; flagsLength |= (req->data_len - 4); dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio), req->data_len, PCI_DMA_BIDIRECTIONAL); if (!dma_addr_out) goto put_mf; - mpt_add_sge(psge, flagsLength, dma_addr_out); + ioc->add_sge(psge, flagsLength, dma_addr_out); psge += (sizeof(u32) + sizeof(dma_addr_t)); /* response */ @@ -1337,7 +1337,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, rsp->data_len, PCI_DMA_BIDIRECTIONAL); if (!dma_addr_in) goto unmap; - mpt_add_sge(psge, flagsLength, dma_addr_in); + ioc->add_sge(psge, flagsLength, dma_addr_in); mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf); @@ -3211,17 +3211,15 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id) * A slightly different algorithm is required for * 64bit SGEs. */ - scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); - if (sizeof(dma_addr_t) == sizeof(u64)) { + scale = ioc->req_sz/ioc->SGE_size; + if (ioc->sg_addr_size == sizeof(u64)) { numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + - (ioc->req_sz - 60) / (sizeof(dma_addr_t) + - sizeof(u32)); + (ioc->req_sz - 60) / ioc->SGE_size; } else { numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + - (ioc->req_sz - 64) / (sizeof(dma_addr_t) + - sizeof(u32)); + (ioc->req_sz - 64) / ioc->SGE_size; } if (numSGE < sh->sg_tablesize) { diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index e62c6bc4ad33..8c08c73f194c 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -113,69 +113,6 @@ int mptscsih_resume(struct pci_dev *pdev); #define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE -/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * mptscsih_add_sge - Place a simple SGE at address pAddr. - * @pAddr: virtual address for SGE - * @flagslength: SGE flags and data transfer length - * @dma_addr: Physical address - * - * This routine places a MPT request frame back on the MPT adapter's - * FreeQ. - */ -static inline void -mptscsih_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr) -{ - if (sizeof(dma_addr_t) == sizeof(u64)) { - SGESimple64_t *pSge = (SGESimple64_t *) pAddr; - u32 tmp = dma_addr & 0xFFFFFFFF; - - pSge->FlagsLength = cpu_to_le32(flagslength); - pSge->Address.Low = cpu_to_le32(tmp); - tmp = (u32) ((u64)dma_addr >> 32); - pSge->Address.High = cpu_to_le32(tmp); - - } else { - SGESimple32_t *pSge = (SGESimple32_t *) pAddr; - pSge->FlagsLength = cpu_to_le32(flagslength); - pSge->Address = cpu_to_le32(dma_addr); - } -} /* mptscsih_add_sge() */ - -/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * mptscsih_add_chain - Place a chain SGE at address pAddr. - * @pAddr: virtual address for SGE - * @next: nextChainOffset value (u32's) - * @length: length of next SGL segment - * @dma_addr: Physical address - * - * This routine places a MPT request frame back on the MPT adapter's - * FreeQ. - */ -static inline void -mptscsih_add_chain(char *pAddr, u8 next, u16 length, dma_addr_t dma_addr) -{ - if (sizeof(dma_addr_t) == sizeof(u64)) { - SGEChain64_t *pChain = (SGEChain64_t *) pAddr; - u32 tmp = dma_addr & 0xFFFFFFFF; - - pChain->Length = cpu_to_le16(length); - pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | mpt_addr_size(); - - pChain->NextChainOffset = next; - - pChain->Address.Low = cpu_to_le32(tmp); - tmp = (u32) ((u64)dma_addr >> 32); - pChain->Address.High = cpu_to_le32(tmp); - } else { - SGEChain32_t *pChain = (SGEChain32_t *) pAddr; - pChain->Length = cpu_to_le16(length); - pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | mpt_addr_size(); - pChain->NextChainOffset = next; - pChain->Address = cpu_to_le32(dma_addr); - } -} /* mptscsih_add_chain() */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -281,10 +218,10 @@ mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt, */ nextSGEset: - numSgeSlots = ((frm_sz - sgeOffset) / (sizeof(u32) + sizeof(dma_addr_t)) ); + numSgeSlots = ((frm_sz - sgeOffset) / ioc->SGE_size); numSgeThisFrame = (sges_left < numSgeSlots) ? sges_left : numSgeSlots; - sgflags = MPT_SGE_FLAGS_SIMPLE_ELEMENT | MPT_SGE_FLAGS_ADDRESSING | sgdir; + sgflags = MPT_SGE_FLAGS_SIMPLE_ELEMENT | sgdir; /* Get first (num - 1) SG elements * Skip any SG entries with a length of 0 @@ -299,11 +236,11 @@ nextSGEset: } v2 = sg_dma_address(sg); - mptscsih_add_sge(psge, sgflags | thisxfer, v2); + ioc->add_sge(psge, sgflags | thisxfer, v2); sg = sg_next(sg); /* Get next SG element from the OS */ - psge += (sizeof(u32) + sizeof(dma_addr_t)); - sgeOffset += (sizeof(u32) + sizeof(dma_addr_t)); + psge += ioc->SGE_size; + sgeOffset += ioc->SGE_size; sg_done++; } @@ -320,12 +257,8 @@ nextSGEset: thisxfer = sg_dma_len(sg); v2 = sg_dma_address(sg); - mptscsih_add_sge(psge, sgflags | thisxfer, v2); - /* - sg = sg_next(sg); - psge += (sizeof(u32) + sizeof(dma_addr_t)); - */ - sgeOffset += (sizeof(u32) + sizeof(dma_addr_t)); + ioc->add_sge(psge, sgflags | thisxfer, v2); + sgeOffset += ioc->SGE_size; sg_done++; if (chainSge) { @@ -334,7 +267,8 @@ nextSGEset: * Update the chain element * Offset and Length fields. */ - mptscsih_add_chain((char *)chainSge, 0, sgeOffset, ioc->ChainBufferDMA + chain_dma_off); + ioc->add_chain((char *)chainSge, 0, sgeOffset, + ioc->ChainBufferDMA + chain_dma_off); } else { /* The current buffer is the original MF * and there is no Chain buffer. @@ -367,7 +301,7 @@ nextSGEset: * set properly). */ if (sg_done) { - u32 *ptmp = (u32 *) (psge - (sizeof(u32) + sizeof(dma_addr_t))); + u32 *ptmp = (u32 *) (psge - ioc->SGE_size); sgflags = le32_to_cpu(*ptmp); sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT; *ptmp = cpu_to_le32(sgflags); @@ -381,8 +315,9 @@ nextSGEset: * Old chain element is now complete. */ u8 nextChain = (u8) (sgeOffset >> 2); - sgeOffset += (sizeof(u32) + sizeof(dma_addr_t)); - mptscsih_add_chain((char *)chainSge, nextChain, sgeOffset, ioc->ChainBufferDMA + chain_dma_off); + sgeOffset += ioc->SGE_size; + ioc->add_chain((char *)chainSge, nextChain, sgeOffset, + ioc->ChainBufferDMA + chain_dma_off); } else { /* The original MF buffer requires a chain buffer - * set the offset. @@ -1422,7 +1357,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) pScsiReq->CDBLength = SCpnt->cmd_len; pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; pScsiReq->Reserved = 0; - pScsiReq->MsgFlags = mpt_msg_flags(); + pScsiReq->MsgFlags = mpt_msg_flags(ioc); int_to_scsilun(SCpnt->device->lun, (struct scsi_lun *)pScsiReq->LUN); pScsiReq->Control = cpu_to_le32(scsictl); @@ -1448,7 +1383,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) */ if (datalen == 0) { /* Add a NULL SGE */ - mptscsih_add_sge((char *)&pScsiReq->SGL, MPT_SGE_FLAGS_SSIMPLE_READ | 0, + ioc->add_sge((char *)&pScsiReq->SGL, + MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1); } else { /* Add a 32 or 64 bit SGE */ @@ -3172,7 +3108,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) pScsiReq->Reserved = 0; - pScsiReq->MsgFlags = mpt_msg_flags(); + pScsiReq->MsgFlags = mpt_msg_flags(ioc); /* MsgContext set in mpt_get_msg_fram call */ int_to_scsilun(io->lun, (struct scsi_lun *)pScsiReq->LUN); @@ -3199,11 +3135,11 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) ioc->name, cmd, io->channel, io->id, io->lun)); if (dir == MPI_SCSIIO_CONTROL_READ) { - mpt_add_sge((char *) &pScsiReq->SGL, + ioc->add_sge((char *) &pScsiReq->SGL, MPT_SGE_FLAGS_SSIMPLE_READ | io->size, io->data_dma); } else { - mpt_add_sge((char *) &pScsiReq->SGL, + ioc->add_sge((char *) &pScsiReq->SGL, MPT_SGE_FLAGS_SSIMPLE_WRITE | io->size, io->data_dma); } diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c index 61620144e49c..643a3c6443af 100644 --- a/drivers/message/fusion/mptspi.c +++ b/drivers/message/fusion/mptspi.c @@ -300,7 +300,7 @@ mptspi_writeIOCPage4(MPT_SCSI_HOST *hd, u8 channel , u8 id) flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE | (IOCPage4Ptr->Header.PageLength + ii) * 4; - mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma); + ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma); ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "writeIOCPage4: MaxSEP=%d ActiveSEP=%d id=%d bus=%d\n", @@ -643,7 +643,7 @@ mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id) pReq->Reserved2 = 0; pReq->ActionDataWord = 0; /* Reserved for this action */ - mpt_add_sge((char *)&pReq->ActionDataSGE, + ioc->add_sge((char *)&pReq->ActionDataSGE, MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1); ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n", @@ -1423,17 +1423,15 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id) * A slightly different algorithm is required for * 64bit SGEs. */ - scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); - if (sizeof(dma_addr_t) == sizeof(u64)) { + scale = ioc->req_sz/ioc->SGE_size; + if (ioc->sg_addr_size == sizeof(u64)) { numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + - (ioc->req_sz - 60) / (sizeof(dma_addr_t) + - sizeof(u32)); + (ioc->req_sz - 60) / ioc->SGE_size; } else { numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + - (ioc->req_sz - 64) / (sizeof(dma_addr_t) + - sizeof(u32)); + (ioc->req_sz - 64) / ioc->SGE_size; } if (numSGE < sh->sg_tablesize) { -- cgit v1.2.3 From 7b5a65b9e649dad9cf9c6d282df4162843070351 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Fri, 29 May 2009 16:38:14 +0530 Subject: [SCSI] mpt fusion: Added support for MPT discovery completion check sas_discovery_quiesce_io flag is used to control IO start/resume functionality. IO will be stoped while doing discovery of topology. Once discovery is completed It will resume IO. Resending patch including James review. Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptbase.c | 56 ++++++++++++++++++++++++++++++++++++++++ drivers/message/fusion/mptbase.h | 1 + drivers/message/fusion/mptsas.c | 25 ++++++++++++++++-- 3 files changed, 80 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index a66369218c97..54326e09629c 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -276,6 +276,56 @@ mpt_get_cb_idx(MPT_DRIVER_CLASS dclass) return 0; } +/** + * mpt_is_discovery_complete - determine if discovery has completed + * @ioc: per adatper instance + * + * Returns 1 when discovery completed, else zero. + */ +static int +mpt_is_discovery_complete(MPT_ADAPTER *ioc) +{ + ConfigExtendedPageHeader_t hdr; + CONFIGPARMS cfg; + SasIOUnitPage0_t *buffer; + dma_addr_t dma_handle; + int rc = 0; + + memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t)); + memset(&cfg, 0, sizeof(CONFIGPARMS)); + hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION; + hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; + hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + cfg.cfghdr.ehdr = &hdr; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + + if ((mpt_config(ioc, &cfg))) + goto out; + if (!hdr.ExtPageLength) + goto out; + + buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4, + &dma_handle); + if (!buffer) + goto out; + + cfg.physAddr = dma_handle; + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + + if ((mpt_config(ioc, &cfg))) + goto out_free_consistent; + + if (!(buffer->PhyData[0].PortFlags & + MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS)) + rc = 1; + + out_free_consistent: + pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4, + buffer, dma_handle); + out: + return rc; +} + /** * mpt_fault_reset_work - work performed on workq after ioc fault * @work: input argument, used to derive ioc @@ -307,6 +357,12 @@ mpt_fault_reset_work(struct work_struct *work) printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after " "reset (%04xh)\n", ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK); + } else if (ioc->bus_type == SAS && ioc->sas_discovery_quiesce_io) { + if ((mpt_is_discovery_complete(ioc))) { + devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "clearing " + "discovery_quiesce_io flag\n", ioc->name)); + ioc->sas_discovery_quiesce_io = 0; + } } out: diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index 4a606764e317..8cd0a16cdfac 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -706,6 +706,7 @@ typedef struct _MPT_ADAPTER struct mutex sas_discovery_mutex; u8 sas_discovery_runtime; u8 sas_discovery_ignore_events; + u8 sas_discovery_quiesce_io; int sas_index; /* index refrencing */ MPT_SAS_MGMT sas_mgmt; struct work_struct sas_persist_task; diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index b162f7a1c563..40dbaaaf49e1 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -1008,6 +1008,8 @@ mptsas_slave_alloc(struct scsi_device *sdev) static int mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { + MPT_SCSI_HOST *hd; + MPT_ADAPTER *ioc; VirtDevice *vdevice = SCpnt->device->hostdata; if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) { @@ -1016,6 +1018,12 @@ mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) return 0; } + hd = shost_priv(SCpnt->device->host); + ioc = hd->ioc; + + if (ioc->sas_discovery_quiesce_io) + return SCSI_MLQUEUE_HOST_BUSY; + // scsi_print_command(SCpnt); return mptscsih_qcmd(SCpnt,done); @@ -3009,6 +3017,7 @@ mptsas_send_discovery_event(MPT_ADAPTER *ioc, EVENT_DATA_SAS_DISCOVERY *discovery_data) { struct mptsas_discovery_event *ev; + u32 discovery_status; /* * DiscoveryStatus @@ -3017,7 +3026,9 @@ mptsas_send_discovery_event(MPT_ADAPTER *ioc, * kicks off discovery, and return to zero * once its completed. */ - if (discovery_data->DiscoveryStatus) + discovery_status = le32_to_cpu(discovery_data->DiscoveryStatus); + ioc->sas_discovery_quiesce_io = discovery_status ? 1 : 0; + if (discovery_status) return; ev = kzalloc(sizeof(*ev), GFP_ATOMIC); @@ -3299,12 +3310,22 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id) return error; } +void +mptsas_shutdown(struct pci_dev *pdev) +{ + MPT_ADAPTER *ioc = pci_get_drvdata(pdev); + + ioc->sas_discovery_quiesce_io = 0; +} + static void __devexit mptsas_remove(struct pci_dev *pdev) { MPT_ADAPTER *ioc = pci_get_drvdata(pdev); struct mptsas_portinfo *p, *n; int i; + mptsas_shutdown(pdev); + ioc->sas_discovery_ignore_events = 1; sas_remove_host(ioc->sh); @@ -3342,7 +3363,7 @@ static struct pci_driver mptsas_driver = { .id_table = mptsas_pci_table, .probe = mptsas_probe, .remove = __devexit_p(mptsas_remove), - .shutdown = mptscsih_shutdown, + .shutdown = mptsas_shutdown, #ifdef CONFIG_PM .suspend = mptscsih_suspend, .resume = mptscsih_resume, -- cgit v1.2.3 From fd76175a7d3abf4d14df17f5f4c7e68b466b455d Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Fri, 29 May 2009 16:39:06 +0530 Subject: [SCSI] mpt fusion: Optimized SendEvent notification Using Doorbell instead FIFO SendEventNotification was handled through FIFO, now it is using doorbell to communicate with hardware. Added Sleep Flag as an extra argument to support Can-Sleep feature. Resending patch including compilation error fix reviewed by Grant Grundler. Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptbase.c | 76 ++++++++++++++++++++++------------------ 1 file changed, 42 insertions(+), 34 deletions(-) (limited to 'drivers') diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 54326e09629c..0d2fb0eb34b9 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -192,7 +192,8 @@ static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc); static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc); static void mpt_timer_expired(unsigned long data); static void mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc); -static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch); +static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch, + int sleepFlag); static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp); static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag); static int mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init); @@ -208,7 +209,8 @@ static int procmpt_iocinfo_read(char *buf, char **start, off_t offset, static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc); //int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag); -static int ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *evReply, int *evHandlers); +static int ProcessEventNotification(MPT_ADAPTER *ioc, + EventNotificationReply_t *evReply, int *evHandlers); static void mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf); static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info); static void mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info); @@ -2472,28 +2474,36 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) } } + /* Enable MPT base driver management of EventNotification + * and EventAck handling. + */ + if ((ret == 0) && (!ioc->facts.EventState)) { + dinitprintk(ioc, printk(MYIOC_s_INFO_FMT + "SendEventNotification\n", + ioc->name)); + ret = SendEventNotification(ioc, 1, sleepFlag); /* 1=Enable */ + } + + if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState) + rc = SendEventNotification(ioc->alt_ioc, 1, sleepFlag); + if (ret == 0) { /* Enable! (reply interrupt) */ CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM); ioc->active = 1; } - - if (reset_alt_ioc_active && ioc->alt_ioc) { - /* (re)Enable alt-IOC! (reply interrupt) */ - dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "alt_ioc reply irq re-enabled\n", - ioc->alt_ioc->name)); - CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, MPI_HIM_DIM); - ioc->alt_ioc->active = 1; + if (rc == 0) { /* alt ioc */ + if (reset_alt_ioc_active && ioc->alt_ioc) { + /* (re)Enable alt-IOC! (reply interrupt) */ + dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "alt-ioc" + "reply irq re-enabled\n", + ioc->alt_ioc->name)); + CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, + MPI_HIM_DIM); + ioc->alt_ioc->active = 1; + } } - /* Enable MPT base driver management of EventNotification - * and EventAck handling. - */ - if ((ret == 0) && (!ioc->facts.EventState)) - (void) SendEventNotification(ioc, 1); /* 1=Enable EventNotification */ - - if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState) - (void) SendEventNotification(ioc->alt_ioc, 1); /* 1=Enable EventNotification */ /* Add additional "reason" check before call to GetLanConfigPages * (combined with GetIoUnitPage2 call). This prevents a somewhat @@ -6019,30 +6029,28 @@ mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc) * SendEventNotification - Send EventNotification (on or off) request to adapter * @ioc: Pointer to MPT_ADAPTER structure * @EvSwitch: Event switch flags + * @sleepFlag: Specifies whether the process can sleep */ static int -SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch) +SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch, int sleepFlag) { - EventNotification_t *evnp; + EventNotification_t evn; + MPIDefaultReply_t reply_buf; - evnp = (EventNotification_t *) mpt_get_msg_frame(mpt_base_index, ioc); - if (evnp == NULL) { - devtverboseprintk(ioc, printk(MYIOC_s_WARN_FMT "Unable to allocate event request frame!\n", - ioc->name)); - return 0; - } - memset(evnp, 0, sizeof(*evnp)); - - devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending EventNotification (%d) request %p\n", ioc->name, EvSwitch, evnp)); + memset(&evn, 0, sizeof(EventNotification_t)); + memset(&reply_buf, 0, sizeof(MPIDefaultReply_t)); - evnp->Function = MPI_FUNCTION_EVENT_NOTIFICATION; - evnp->ChainOffset = 0; - evnp->MsgFlags = 0; - evnp->Switch = EvSwitch; + evn.Function = MPI_FUNCTION_EVENT_NOTIFICATION; + evn.Switch = EvSwitch; + evn.MsgContext = cpu_to_le32(mpt_base_index << 16); - mpt_put_msg_frame(mpt_base_index, ioc, (MPT_FRAME_HDR *)evnp); + devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "Sending EventNotification (%d) request %p\n", + ioc->name, EvSwitch, &evn)); - return 0; + return mpt_handshake_req_reply_wait(ioc, sizeof(EventNotification_t), + (u32 *)&evn, sizeof(MPIDefaultReply_t), (u16 *)&reply_buf, 30, + sleepFlag); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -- cgit v1.2.3 From f0f09d3b3f06900d64971625d6753dea0623ed45 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Fri, 29 May 2009 16:40:57 +0530 Subject: [SCSI] mpt fusion: config path optimized, completion queue is used 1) Previously we had mutliple #defines to use same values. Now those #defines are optimized. MPT_IOCTL_STATUS_* is removed and MPT_MGMT_STATUS_* are new #defines. 2.) config path is optimized. Instead of wait Queue and timer, using completion Q. 3.) mpt_timer_expired is not used. [jejb: elide patch to eliminate mpt_timer_expired] Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptbase.c | 456 ++++++++++++++++++--------------------- drivers/message/fusion/mptbase.h | 45 ++-- drivers/message/fusion/mptctl.c | 31 +-- drivers/message/fusion/mptsas.c | 8 +- 4 files changed, 255 insertions(+), 285 deletions(-) (limited to 'drivers') diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 0d2fb0eb34b9..e63a6260b0a0 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -146,7 +146,6 @@ static MPT_EVHANDLER MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS]; static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS]; static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS]; -static DECLARE_WAIT_QUEUE_HEAD(mpt_waitq); /* * Driver Callback Index's @@ -159,7 +158,8 @@ static u8 last_drv_idx; * Forward protos... */ static irqreturn_t mpt_interrupt(int irq, void *bus_id); -static int mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply); +static int mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, + MPT_FRAME_HDR *reply); static int mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, u32 *req, int replyBytes, u16 *u16reply, int maxwait, int sleepFlag); @@ -190,7 +190,6 @@ static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum); static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum); static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc); static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc); -static void mpt_timer_expired(unsigned long data); static void mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc); static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch, int sleepFlag); @@ -559,9 +558,9 @@ mpt_interrupt(int irq, void *bus_id) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mpt_base_reply - MPT base driver's callback routine + * mptbase_reply - MPT base driver's callback routine * @ioc: Pointer to MPT_ADAPTER structure - * @mf: Pointer to original MPT request frame + * @req: Pointer to original MPT request frame * @reply: Pointer to MPT reply frame (NULL if TurboReply) * * MPT base driver's callback routine; all base driver @@ -572,122 +571,49 @@ mpt_interrupt(int irq, void *bus_id) * should be freed, or 0 if it shouldn't. */ static int -mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply) +mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) { + EventNotificationReply_t *pEventReply; + u8 event; + int evHandlers; int freereq = 1; - u8 func; - - dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_base_reply() called\n", ioc->name)); -#ifdef CONFIG_FUSION_LOGGING - if ((ioc->debug_level & MPT_DEBUG_MSG_FRAME) && - !(reply->u.hdr.MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)) { - dmfprintk(ioc, printk(MYIOC_s_INFO_FMT ": Original request frame (@%p) header\n", - ioc->name, mf)); - DBG_DUMP_REQUEST_FRAME_HDR(ioc, (u32 *)mf); - } -#endif - - func = reply->u.hdr.Function; - dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_base_reply, Function=%02Xh\n", - ioc->name, func)); - - if (func == MPI_FUNCTION_EVENT_NOTIFICATION) { - EventNotificationReply_t *pEvReply = (EventNotificationReply_t *) reply; - int evHandlers = 0; - int results; - - results = ProcessEventNotification(ioc, pEvReply, &evHandlers); - if (results != evHandlers) { - /* CHECKME! Any special handling needed here? */ - devtverboseprintk(ioc, printk(MYIOC_s_WARN_FMT "Called %d event handlers, sum results = %d\n", - ioc->name, evHandlers, results)); - } - /* - * Hmmm... It seems that EventNotificationReply is an exception - * to the rule of one reply per request. - */ - if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) { + switch (reply->u.hdr.Function) { + case MPI_FUNCTION_EVENT_NOTIFICATION: + pEventReply = (EventNotificationReply_t *)reply; + evHandlers = 0; + ProcessEventNotification(ioc, pEventReply, &evHandlers); + event = le32_to_cpu(pEventReply->Event) & 0xFF; + if (pEventReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) freereq = 0; - } else { - devtverboseprintk(ioc, printk(MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p returns Request frame\n", - ioc->name, pEvReply)); - } - -#ifdef CONFIG_PROC_FS -// LogEvent(ioc, pEvReply); -#endif - - } else if (func == MPI_FUNCTION_EVENT_ACK) { - dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_base_reply, EventAck reply received\n", - ioc->name)); - } else if (func == MPI_FUNCTION_CONFIG) { - CONFIGPARMS *pCfg; - unsigned long flags; - - dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "config_complete (mf=%p,mr=%p)\n", - ioc->name, mf, reply)); - - pCfg = * ((CONFIGPARMS **)((u8 *) mf + ioc->req_sz - sizeof(void *))); - - if (pCfg) { - /* disable timer and remove from linked list */ - del_timer(&pCfg->timer); - - spin_lock_irqsave(&ioc->FreeQlock, flags); - list_del(&pCfg->linkage); - spin_unlock_irqrestore(&ioc->FreeQlock, flags); - - /* - * If IOC Status is SUCCESS, save the header - * and set the status code to GOOD. - */ - pCfg->status = MPT_CONFIG_ERROR; - if (reply) { - ConfigReply_t *pReply = (ConfigReply_t *)reply; - u16 status; - - status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK; - dcprintk(ioc, printk(MYIOC_s_NOTE_FMT " IOCStatus=%04xh, IOCLogInfo=%08xh\n", - ioc->name, status, le32_to_cpu(pReply->IOCLogInfo))); - - pCfg->status = status; - if (status == MPI_IOCSTATUS_SUCCESS) { - if ((pReply->Header.PageType & - MPI_CONFIG_PAGETYPE_MASK) == - MPI_CONFIG_PAGETYPE_EXTENDED) { - pCfg->cfghdr.ehdr->ExtPageLength = - le16_to_cpu(pReply->ExtPageLength); - pCfg->cfghdr.ehdr->ExtPageType = - pReply->ExtPageType; - } - pCfg->cfghdr.hdr->PageVersion = pReply->Header.PageVersion; - - /* If this is a regular header, save PageLength. */ - /* LMP Do this better so not using a reserved field! */ - pCfg->cfghdr.hdr->PageLength = pReply->Header.PageLength; - pCfg->cfghdr.hdr->PageNumber = pReply->Header.PageNumber; - pCfg->cfghdr.hdr->PageType = pReply->Header.PageType; - } - } - - /* - * Wake up the original calling thread - */ - pCfg->wait_done = 1; - wake_up(&mpt_waitq); + if (event != MPI_EVENT_EVENT_CHANGE) + break; + case MPI_FUNCTION_CONFIG: + case MPI_FUNCTION_SAS_IO_UNIT_CONTROL: + ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; + if (reply) { + ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_RF_VALID; + memcpy(ioc->mptbase_cmds.reply, reply, + min(MPT_DEFAULT_FRAME_SIZE, + 4 * reply->u.reply.MsgLength)); } - } else if (func == MPI_FUNCTION_SAS_IO_UNIT_CONTROL) { - /* we should be always getting a reply frame */ - memcpy(ioc->persist_reply_frame, reply, - min(MPT_DEFAULT_FRAME_SIZE, - 4*reply->u.reply.MsgLength)); - del_timer(&ioc->persist_timer); - ioc->persist_wait_done = 1; - wake_up(&mpt_waitq); - } else { - printk(MYIOC_s_ERR_FMT "Unexpected msg function (=%02Xh) reply received!\n", - ioc->name, func); + if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) { + ioc->mptbase_cmds.status &= ~MPT_MGMT_STATUS_PENDING; + complete(&ioc->mptbase_cmds.done); + } else + freereq = 0; + if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_FREE_MF) + freereq = 1; + break; + case MPI_FUNCTION_EVENT_ACK: + devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "EventAck reply received\n", ioc->name)); + break; + default: + printk(MYIOC_s_ERR_FMT + "Unexpected msg function (=%02Xh) reply received!\n", + ioc->name, reply->u.hdr.Function); + break; } /* @@ -1849,6 +1775,9 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) spin_lock_init(&ioc->diagLock); spin_lock_init(&ioc->initializing_hba_lock); + mutex_init(&ioc->mptbase_cmds.mutex); + init_completion(&ioc->mptbase_cmds.done); + /* Initialize the event logging. */ ioc->eventTypes = 0; /* None */ @@ -1866,10 +1795,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) */ memset(&ioc->spi_data, 0, sizeof(SpiCfgData)); - /* Initialize the running configQ head. - */ - INIT_LIST_HEAD(&ioc->configQ); - /* Initialize the fc rport list head. */ INIT_LIST_HEAD(&ioc->fc_rports); @@ -5013,7 +4938,14 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode) SasIoUnitControlReply_t *sasIoUnitCntrReply; MPT_FRAME_HDR *mf = NULL; MPIHeader_t *mpi_hdr; + int ret = 0; + unsigned long timeleft; + + mutex_lock(&ioc->mptbase_cmds.mutex); + /* init the internal cmd struct */ + memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE); + INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status) /* insure garbage is not sent to fw */ switch(persist_opcode) { @@ -5023,17 +4955,19 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode) break; default: - return -1; - break; + ret = -1; + goto out; } - printk("%s: persist_opcode=%x\n",__func__, persist_opcode); + printk(KERN_DEBUG "%s: persist_opcode=%x\n", + __func__, persist_opcode); /* Get a MF for this command. */ if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { - printk("%s: no msg frames!\n",__func__); - return -1; + printk(KERN_DEBUG "%s: no msg frames!\n", __func__); + ret = -1; + goto out; } mpi_hdr = (MPIHeader_t *) mf; @@ -5043,27 +4977,42 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode) sasIoUnitCntrReq->MsgContext = mpi_hdr->MsgContext; sasIoUnitCntrReq->Operation = persist_opcode; - init_timer(&ioc->persist_timer); - ioc->persist_timer.data = (unsigned long) ioc; - ioc->persist_timer.function = mpt_timer_expired; - ioc->persist_timer.expires = jiffies + HZ*10 /* 10 sec */; - ioc->persist_wait_done=0; - add_timer(&ioc->persist_timer); mpt_put_msg_frame(mpt_base_index, ioc, mf); - wait_event(mpt_waitq, ioc->persist_wait_done); + timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done, 10*HZ); + if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { + ret = -ETIME; + printk(KERN_DEBUG "%s: failed\n", __func__); + if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) + goto out; + if (!timeleft) { + printk(KERN_DEBUG "%s: Issuing Reset from %s!!\n", + ioc->name, __func__); + mpt_HardResetHandler(ioc, CAN_SLEEP); + mpt_free_msg_frame(ioc, mf); + } + goto out; + } + + if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { + ret = -1; + goto out; + } sasIoUnitCntrReply = - (SasIoUnitControlReply_t *)ioc->persist_reply_frame; + (SasIoUnitControlReply_t *)ioc->mptbase_cmds.reply; if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) { - printk("%s: IOCStatus=0x%X IOCLogInfo=0x%X\n", - __func__, - sasIoUnitCntrReply->IOCStatus, + printk(KERN_DEBUG "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n", + __func__, sasIoUnitCntrReply->IOCStatus, sasIoUnitCntrReply->IOCLogInfo); - return -1; - } + printk(KERN_DEBUG "%s: failed\n", __func__); + ret = -1; + } else + printk(KERN_DEBUG "%s: success\n", __func__); + out: - printk("%s: success\n",__func__); - return 0; + CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status) + mutex_unlock(&ioc->mptbase_cmds.mutex); + return ret; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -6066,7 +6015,7 @@ SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp) if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n", - ioc->name,__func__)); + ioc->name, __func__)); return -1; } @@ -6103,12 +6052,18 @@ int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) { Config_t *pReq; + ConfigReply_t *pReply; ConfigExtendedPageHeader_t *pExtHdr = NULL; MPT_FRAME_HDR *mf; - unsigned long flags; - int ii, rc; + int ii; int flagsLength; + long timeout; + int ret; + u8 page_type = 0, extend_page; + unsigned long timeleft; int in_isr; + u8 issue_hard_reset = 0; + u8 retry_count = 0; /* Prevent calling wait_event() (below), if caller happens * to be in ISR context, because that is fatal! @@ -6120,13 +6075,31 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) return -EPERM; } + /* don't send if no chance of success */ + if (!ioc->active || + mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_OPERATIONAL) { + dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: ioc not operational, %d, %xh\n", + ioc->name, __func__, ioc->active, + mpt_GetIocState(ioc, 0))); + return -EFAULT; + } + + retry_config: + mutex_lock(&ioc->mptbase_cmds.mutex); + /* init the internal cmd struct */ + memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE); + INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status) + /* Get and Populate a free Frame */ if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { - dcprintk(ioc, printk(MYIOC_s_WARN_FMT "mpt_config: no msg frames!\n", - ioc->name)); - return -EAGAIN; + dcprintk(ioc, printk(MYIOC_s_WARN_FMT + "mpt_config: no msg frames!\n", ioc->name)); + ret = -EAGAIN; + goto out; } + pReq = (Config_t *)mf; pReq->Action = pCfg->action; pReq->Reserved = 0; @@ -6152,7 +6125,9 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) pReq->ExtPageType = pExtHdr->ExtPageType; pReq->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; - /* Page Length must be treated as a reserved field for the extended header. */ + /* Page Length must be treated as a reserved field for the + * extended header. + */ pReq->Header.PageLength = 0; } @@ -6165,78 +6140,91 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) else flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; - if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) == MPI_CONFIG_PAGETYPE_EXTENDED) { + if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) == + MPI_CONFIG_PAGETYPE_EXTENDED) { flagsLength |= pExtHdr->ExtPageLength * 4; - - dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Config request type %d, page %d and action %d\n", - ioc->name, pReq->ExtPageType, pReq->Header.PageNumber, pReq->Action)); - } - else { + page_type = pReq->ExtPageType; + extend_page = 1; + } else { flagsLength |= pCfg->cfghdr.hdr->PageLength * 4; - - dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Config request type %d, page %d and action %d\n", - ioc->name, pReq->Header.PageType, pReq->Header.PageNumber, pReq->Action)); + page_type = pReq->Header.PageType; + extend_page = 0; } - ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr); - - /* Append pCfg pointer to end of mf - */ - *((void **) (((u8 *) mf) + (ioc->req_sz - sizeof(void *)))) = (void *) pCfg; - - /* Initalize the timer - */ - init_timer_on_stack(&pCfg->timer); - pCfg->timer.data = (unsigned long) ioc; - pCfg->timer.function = mpt_timer_expired; - pCfg->wait_done = 0; - - /* Set the timer; ensure 10 second minimum */ - if (pCfg->timeout < 10) - pCfg->timer.expires = jiffies + HZ*10; - else - pCfg->timer.expires = jiffies + HZ*pCfg->timeout; - - /* Add to end of Q, set timer and then issue this command */ - spin_lock_irqsave(&ioc->FreeQlock, flags); - list_add_tail(&pCfg->linkage, &ioc->configQ); - spin_unlock_irqrestore(&ioc->FreeQlock, flags); + dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "Sending Config request type 0x%x, page 0x%x and action %d\n", + ioc->name, page_type, pReq->Header.PageNumber, pReq->Action)); - add_timer(&pCfg->timer); + ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr); + timeout = (pCfg->timeout < 15) ? HZ*15 : HZ*pCfg->timeout; mpt_put_msg_frame(mpt_base_index, ioc, mf); - wait_event(mpt_waitq, pCfg->wait_done); - - /* mf has been freed - do not access */ + timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done, + timeout); + if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { + ret = -ETIME; + dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "Failed Sending Config request type 0x%x, page 0x%x," + " action %d, status %xh, time left %ld\n\n", + ioc->name, page_type, pReq->Header.PageNumber, + pReq->Action, ioc->mptbase_cmds.status, timeleft)); + if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) + goto out; + if (!timeleft) + issue_hard_reset = 1; + goto out; + } - rc = pCfg->status; + if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { + ret = -1; + goto out; + } + pReply = (ConfigReply_t *)ioc->mptbase_cmds.reply; + ret = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK; + if (ret == MPI_IOCSTATUS_SUCCESS) { + if (extend_page) { + pCfg->cfghdr.ehdr->ExtPageLength = + le16_to_cpu(pReply->ExtPageLength); + pCfg->cfghdr.ehdr->ExtPageType = + pReply->ExtPageType; + } + pCfg->cfghdr.hdr->PageVersion = pReply->Header.PageVersion; + pCfg->cfghdr.hdr->PageLength = pReply->Header.PageLength; + pCfg->cfghdr.hdr->PageNumber = pReply->Header.PageNumber; + pCfg->cfghdr.hdr->PageType = pReply->Header.PageType; - return rc; -} + } -/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * mpt_timer_expired - Callback for timer process. - * Used only internal config functionality. - * @data: Pointer to MPT_SCSI_HOST recast as an unsigned long - */ -static void -mpt_timer_expired(unsigned long data) -{ - MPT_ADAPTER *ioc = (MPT_ADAPTER *) data; + if (retry_count) + printk(MYIOC_s_INFO_FMT "Retry completed " + "ret=0x%x timeleft=%ld\n", + ioc->name, ret, timeleft); - dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_timer_expired! \n", ioc->name)); + dcprintk(ioc, printk(KERN_DEBUG "IOCStatus=%04xh, IOCLogInfo=%08xh\n", + ret, le32_to_cpu(pReply->IOCLogInfo))); - /* Perform a FW reload */ - if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0) - printk(MYIOC_s_WARN_FMT "Firmware Reload FAILED!\n", ioc->name); +out: - /* No more processing. - * Hard reset clean-up will wake up - * process and free all resources. - */ - dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_timer_expired complete!\n", ioc->name)); + CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status) + mutex_unlock(&ioc->mptbase_cmds.mutex); + if (issue_hard_reset) { + issue_hard_reset = 0; + printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", + ioc->name, __func__); + mpt_HardResetHandler(ioc, CAN_SLEEP); + mpt_free_msg_frame(ioc, mf); + /* attempt one retry for a timed out command */ + if (!retry_count) { + printk(MYIOC_s_INFO_FMT + "Attempting Retry Config request" + " type 0x%x, page 0x%x," + " action %d\n", ioc->name, page_type, + pCfg->cfghdr.hdr->PageNumber, pCfg->action); + retry_count++; + goto retry_config; + } + } + return ret; - return; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -6250,41 +6238,27 @@ mpt_timer_expired(unsigned long data) static int mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) { - CONFIGPARMS *pCfg; - unsigned long flags; - - dprintk(ioc, printk(MYIOC_s_DEBUG_FMT - ": IOC %s_reset routed to MPT base driver!\n", - ioc->name, reset_phase==MPT_IOC_SETUP_RESET ? "setup" : ( - reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post"))); - - if (reset_phase == MPT_IOC_SETUP_RESET) { - ; - } else if (reset_phase == MPT_IOC_PRE_RESET) { - /* If the internal config Q is not empty - - * delete timer. MF resources will be freed when - * the FIFO's are primed. - */ - spin_lock_irqsave(&ioc->FreeQlock, flags); - list_for_each_entry(pCfg, &ioc->configQ, linkage) - del_timer(&pCfg->timer); - spin_unlock_irqrestore(&ioc->FreeQlock, flags); - - } else { - CONFIGPARMS *pNext; - - /* Search the configQ for internal commands. - * Flush the Q, and wake up all suspended threads. - */ - spin_lock_irqsave(&ioc->FreeQlock, flags); - list_for_each_entry_safe(pCfg, pNext, &ioc->configQ, linkage) { - list_del(&pCfg->linkage); - - pCfg->status = MPT_CONFIG_ERROR; - pCfg->wait_done = 1; - wake_up(&mpt_waitq); + switch (reset_phase) { + case MPT_IOC_SETUP_RESET: + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__)); + break; + case MPT_IOC_PRE_RESET: + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__)); + break; + case MPT_IOC_POST_RESET: + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__)); +/* wake up mptbase_cmds */ + if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) { + ioc->mptbase_cmds.status |= + MPT_MGMT_STATUS_DID_IOCRESET; + complete(&ioc->mptbase_cmds.done); } - spin_unlock_irqrestore(&ioc->FreeQlock, flags); + break; + default: + break; } return 1; /* currently means nothing really */ @@ -7901,7 +7875,7 @@ fusion_init(void) /* Register ourselves (mptbase) in order to facilitate * EventNotification handling. */ - mpt_base_index = mpt_register(mpt_base_reply, MPTBASE_DRIVER); + mpt_base_index = mpt_register(mptbase_reply, MPTBASE_DRIVER); /* Register for hard reset handling callbacks. */ diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index 8cd0a16cdfac..41273fff4b01 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -432,14 +432,6 @@ do { \ * IOCTL structure and associated defines */ -#define MPT_IOCTL_STATUS_DID_IOCRESET 0x01 /* IOC Reset occurred on the current*/ -#define MPT_IOCTL_STATUS_RF_VALID 0x02 /* The Reply Frame is VALID */ -#define MPT_IOCTL_STATUS_TIMER_ACTIVE 0x04 /* The timer is running */ -#define MPT_IOCTL_STATUS_SENSE_VALID 0x08 /* Sense data is valid */ -#define MPT_IOCTL_STATUS_COMMAND_GOOD 0x10 /* Command Status GOOD */ -#define MPT_IOCTL_STATUS_TMTIMER_ACTIVE 0x20 /* The TM timer is running */ -#define MPT_IOCTL_STATUS_TM_FAILED 0x40 /* User TM request failed */ - #define MPTCTL_RESET_OK 0x01 /* Issue Bus Reset */ typedef struct _MPT_IOCTL { @@ -454,16 +446,27 @@ typedef struct _MPT_IOCTL { struct mutex ioctl_mutex; } MPT_IOCTL; -#define MPT_SAS_MGMT_STATUS_RF_VALID 0x02 /* The Reply Frame is VALID */ -#define MPT_SAS_MGMT_STATUS_COMMAND_GOOD 0x10 /* Command Status GOOD */ -#define MPT_SAS_MGMT_STATUS_TM_FAILED 0x40 /* User TM request failed */ - -typedef struct _MPT_SAS_MGMT { +#define MPT_MGMT_STATUS_RF_VALID 0x01 /* The Reply Frame is VALID */ +#define MPT_MGMT_STATUS_COMMAND_GOOD 0x02 /* Command Status GOOD */ +#define MPT_MGMT_STATUS_PENDING 0x04 /* command is pending */ +#define MPT_MGMT_STATUS_DID_IOCRESET 0x08 /* IOC Reset occurred + on the current*/ +#define MPT_MGMT_STATUS_SENSE_VALID 0x10 /* valid sense info */ +#define MPT_MGMT_STATUS_TIMER_ACTIVE 0x20 /* obsolete */ +#define MPT_MGMT_STATUS_FREE_MF 0x40 /* free the mf from + complete routine */ + +#define INITIALIZE_MGMT_STATUS(status) \ + status = MPT_MGMT_STATUS_PENDING; +#define CLEAR_MGMT_STATUS(status) \ + status = 0; + +typedef struct _MPT_MGMT { struct mutex mutex; struct completion done; u8 reply[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */ u8 status; /* current command status */ -}MPT_SAS_MGMT; +} MPT_MGMT; /* * Event Structure and define @@ -661,7 +664,6 @@ typedef struct _MPT_ADAPTER struct _mpt_ioctl_events *events; /* pointer to event log */ u8 *cached_fw; /* Pointer to FW */ dma_addr_t cached_fw_dma; - struct list_head configQ; /* linked list of config. requests */ int hs_reply_idx; #ifndef MFCNT u32 pad0; @@ -674,9 +676,6 @@ typedef struct _MPT_ADAPTER IOCFactsReply_t facts; PortFactsReply_t pfacts[2]; FCPortPage0_t fc_port_page0[2]; - struct timer_list persist_timer; /* persist table timer */ - int persist_wait_done; /* persist completion flag */ - u8 persist_reply_frame[MPT_DEFAULT_FRAME_SIZE]; /* persist reply */ LANPage0_t lan_cnfg_page0; LANPage1_t lan_cnfg_page1; @@ -708,7 +707,8 @@ typedef struct _MPT_ADAPTER u8 sas_discovery_ignore_events; u8 sas_discovery_quiesce_io; int sas_index; /* index refrencing */ - MPT_SAS_MGMT sas_mgmt; + MPT_MGMT sas_mgmt; + MPT_MGMT mptbase_cmds; /* for sending config pages */ struct work_struct sas_persist_task; struct work_struct fc_setup_reset_work; @@ -884,21 +884,16 @@ struct scsi_cmnd; * Generic structure passed to the base mpt_config function. */ typedef struct _x_config_parms { - struct list_head linkage; /* linked list */ - struct timer_list timer; /* timer function for this request */ union { ConfigExtendedPageHeader_t *ehdr; ConfigPageHeader_t *hdr; } cfghdr; dma_addr_t physAddr; - int wait_done; /* wait for this request */ u32 pageAddr; /* properly formatted */ + u16 status; u8 action; u8 dir; u8 timeout; /* seconds */ - u8 pad1; - u16 status; - u16 pad2; } CONFIGPARMS; /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index bece386f1d4b..22b75cb647e8 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -221,7 +221,7 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_reply() NULL Reply " "Function=%x!\n", ioc->name, cmd)); - ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD; + ioc->ioctl->status |= MPT_MGMT_STATUS_COMMAND_GOOD; ioc->ioctl->reset &= ~MPTCTL_RESET_OK; /* We are done, issue wake up @@ -237,14 +237,14 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) */ memcpy(ioc->ioctl->ReplyFrame, reply, min(ioc->reply_sz, 4*reply->u.reply.MsgLength)); - ioc->ioctl->status |= MPT_IOCTL_STATUS_RF_VALID; + ioc->ioctl->status |= MPT_MGMT_STATUS_RF_VALID; /* Set the command status to GOOD if IOC Status is GOOD * OR if SCSI I/O cmd and data underrun or recovered error. */ iocStatus = le16_to_cpu(reply->u.reply.IOCStatus) & MPI_IOCSTATUS_MASK; if (iocStatus == MPI_IOCSTATUS_SUCCESS) - ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD; + ioc->ioctl->status |= MPT_MGMT_STATUS_COMMAND_GOOD; if (iocStatus || reply->u.reply.IOCLogInfo) dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\tiocstatus (0x%04X), " @@ -268,7 +268,8 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) if ((iocStatus == MPI_IOCSTATUS_SCSI_DATA_UNDERRUN) || (iocStatus == MPI_IOCSTATUS_SCSI_RECOVERED_ERROR)) { - ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD; + ioc->ioctl->status |= + MPT_MGMT_STATUS_COMMAND_GOOD; } } @@ -284,7 +285,7 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC)); memcpy(ioc->ioctl->sense, sense_data, sz); - ioc->ioctl->status |= MPT_IOCTL_STATUS_SENSE_VALID; + ioc->ioctl->status |= MPT_MGMT_STATUS_SENSE_VALID; } if (cmd == MPI_FUNCTION_SCSI_TASK_MGMT) @@ -483,10 +484,10 @@ mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) switch(reset_phase) { case MPT_IOC_SETUP_RESET: - ioctl->status |= MPT_IOCTL_STATUS_DID_IOCRESET; + ioctl->status |= MPT_MGMT_STATUS_DID_IOCRESET; break; case MPT_IOC_POST_RESET: - ioctl->status &= ~MPT_IOCTL_STATUS_DID_IOCRESET; + ioctl->status &= ~MPT_MGMT_STATUS_DID_IOCRESET; break; case MPT_IOC_PRE_RESET: default: @@ -1791,7 +1792,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) "No memory available during driver init.\n", __FILE__, __LINE__); return -ENOMEM; - } else if (ioc->ioctl->status & MPT_IOCTL_STATUS_DID_IOCRESET) { + } else if (ioc->ioctl->status & MPT_MGMT_STATUS_DID_IOCRESET) { printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - " "Busy with IOC Reset \n", __FILE__, __LINE__); return -EBUSY; @@ -2231,7 +2232,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) /* If a valid reply frame, copy to the user. * Offset 2: reply length in U32's */ - if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID) { + if (ioc->ioctl->status & MPT_MGMT_STATUS_RF_VALID) { if (karg.maxReplyBytes < ioc->reply_sz) { sz = min(karg.maxReplyBytes, 4*ioc->ioctl->ReplyFrame[2]); } else { @@ -2253,7 +2254,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) /* If valid sense data, copy to user. */ - if (ioc->ioctl->status & MPT_IOCTL_STATUS_SENSE_VALID) { + if (ioc->ioctl->status & MPT_MGMT_STATUS_SENSE_VALID) { sz = min(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE); if (sz > 0) { if (copy_to_user(karg.senseDataPtr, ioc->ioctl->sense, sz)) { @@ -2270,7 +2271,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) /* If the overall status is _GOOD and data in, copy data * to user. */ - if ((ioc->ioctl->status & MPT_IOCTL_STATUS_COMMAND_GOOD) && + if ((ioc->ioctl->status & MPT_MGMT_STATUS_COMMAND_GOOD) && (karg.dataInSize > 0) && (bufIn.kptr)) { if (copy_to_user(karg.dataInBufPtr, @@ -2285,9 +2286,9 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) done_free_mem: - ioc->ioctl->status &= ~(MPT_IOCTL_STATUS_COMMAND_GOOD | - MPT_IOCTL_STATUS_SENSE_VALID | - MPT_IOCTL_STATUS_RF_VALID ); + ioc->ioctl->status &= ~(MPT_MGMT_STATUS_COMMAND_GOOD | + MPT_MGMT_STATUS_SENSE_VALID | + MPT_MGMT_STATUS_RF_VALID); /* Free the allocated memory. */ @@ -2527,7 +2528,7 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) * bays have drives in them * pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3) */ - if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID) + if (ioc->ioctl->status & MPT_MGMT_STATUS_RF_VALID) karg.rsvd = *(u32 *)pbuf; out: diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 40dbaaaf49e1..dc23adf9a30f 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -1122,9 +1122,9 @@ static int mptsas_get_linkerrors(struct sas_phy *phy) static int mptsas_mgmt_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) { - ioc->sas_mgmt.status |= MPT_SAS_MGMT_STATUS_COMMAND_GOOD; + ioc->sas_mgmt.status |= MPT_MGMT_STATUS_COMMAND_GOOD; if (reply != NULL) { - ioc->sas_mgmt.status |= MPT_SAS_MGMT_STATUS_RF_VALID; + ioc->sas_mgmt.status |= MPT_MGMT_STATUS_RF_VALID; memcpy(ioc->sas_mgmt.reply, reply, min(ioc->reply_sz, 4 * reply->u.reply.MsgLength)); } @@ -1182,7 +1182,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset) /* a reply frame is expected */ if ((ioc->sas_mgmt.status & - MPT_IOCTL_STATUS_RF_VALID) == 0) { + MPT_MGMT_STATUS_RF_VALID) == 0) { error = -ENXIO; goto out_unlock; } @@ -1359,7 +1359,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, } mf = NULL; - if (ioc->sas_mgmt.status & MPT_IOCTL_STATUS_RF_VALID) { + if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) { SmpPassthroughReply_t *smprep; smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply; -- cgit v1.2.3 From 37c60f374a855974c27bd30d5662a8fa5e933792 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Fri, 29 May 2009 16:44:06 +0530 Subject: [SCSI] mpt fusion: rewrite of all internal generated functions Rewrite of all internal generated functions that issue commands to firmware, porting them to be single threaded using the generic MPT_MGMT struct. Implemented using completion Queue. Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptbase.c | 2 + drivers/message/fusion/mptbase.h | 5 +- drivers/message/fusion/mptfc.c | 2 - drivers/message/fusion/mptsas.c | 2 - drivers/message/fusion/mptscsih.c | 514 ++++++++++++++++---------------------- drivers/message/fusion/mptscsih.h | 1 + drivers/message/fusion/mptspi.c | 52 ++-- 7 files changed, 248 insertions(+), 330 deletions(-) (limited to 'drivers') diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index e63a6260b0a0..d8d5231f484e 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -1775,6 +1775,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) spin_lock_init(&ioc->diagLock); spin_lock_init(&ioc->initializing_hba_lock); + mutex_init(&ioc->internal_cmds.mutex); + init_completion(&ioc->internal_cmds.done); mutex_init(&ioc->mptbase_cmds.mutex); init_completion(&ioc->mptbase_cmds.done); diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index 41273fff4b01..4d77256954f9 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -465,7 +465,9 @@ typedef struct _MPT_MGMT { struct mutex mutex; struct completion done; u8 reply[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */ + u8 sense[MPT_SENSE_BUFFER_ALLOC]; u8 status; /* current command status */ + int completion_code; } MPT_MGMT; /* @@ -709,6 +711,7 @@ typedef struct _MPT_ADAPTER int sas_index; /* index refrencing */ MPT_MGMT sas_mgmt; MPT_MGMT mptbase_cmds; /* for sending config pages */ + MPT_MGMT internal_cmds; struct work_struct sas_persist_task; struct work_struct fc_setup_reset_work; @@ -863,8 +866,6 @@ typedef struct _MPT_SCSI_HOST { unsigned long timeouts; /* cmd timeouts */ ushort sel_timeout[MPT_MAX_FC_DEVICES]; char *info_kbuf; - wait_queue_head_t scandv_waitq; - int scandv_wait_done; long last_queue_full; u16 tm_iocstatus; u16 spi_pending; diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c index da16b47a3f32..d09387134145 100644 --- a/drivers/message/fusion/mptfc.c +++ b/drivers/message/fusion/mptfc.c @@ -1310,8 +1310,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) hd->timer.data = (unsigned long) hd; hd->timer.function = mptscsih_timer_expired; - init_waitqueue_head(&hd->scandv_waitq); - hd->scandv_wait_done = 0; hd->last_queue_full = 0; sh->transportt = mptfc_transport_template; diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index dc23adf9a30f..16c4232c37de 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -3282,8 +3282,6 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id) ioc->sas_data.ptClear = mpt_pt_clear; - init_waitqueue_head(&hd->scandv_waitq); - hd->scandv_wait_done = 0; hd->last_queue_full = 0; INIT_LIST_HEAD(&hd->target_reset_list); spin_unlock_irqrestore(&ioc->FreeQlock, flags); diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 8c08c73f194c..35173252e948 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -100,6 +100,8 @@ static int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); +static int mptscsih_get_completion_code(MPT_ADAPTER *ioc, + MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply); int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r); static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd); static void mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice); @@ -2571,94 +2573,35 @@ int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) { MPT_SCSI_HOST *hd; - unsigned long flags; - - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - ": IOC %s_reset routed to SCSI host driver!\n", - ioc->name, reset_phase==MPT_IOC_SETUP_RESET ? "setup" : ( - reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post"))); - /* If a FW reload request arrives after base installed but - * before all scsi hosts have been attached, then an alt_ioc - * may have a NULL sh pointer. - */ if (ioc->sh == NULL || shost_priv(ioc->sh) == NULL) return 0; - else - hd = shost_priv(ioc->sh); - - if (reset_phase == MPT_IOC_SETUP_RESET) { - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Setup-Diag Reset\n", ioc->name)); - /* Clean Up: - * 1. Set Hard Reset Pending Flag - * All new commands go to doneQ - */ + hd = shost_priv(ioc->sh); + switch (reset_phase) { + case MPT_IOC_SETUP_RESET: + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__)); hd->resetPending = 1; - - } else if (reset_phase == MPT_IOC_PRE_RESET) { - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Pre-Diag Reset\n", ioc->name)); - - /* 2. Flush running commands - * Clean ScsiLookup (and associated memory) - * AND clean mytaskQ - */ - - /* 2b. Reply to OS all known outstanding I/O commands. - */ - mptscsih_flush_running_cmds(hd); - - /* 2c. If there was an internal command that - * has not completed, configuration or io request, - * free these resources. - */ - if (hd->cmdPtr) { - del_timer(&hd->timer); - mpt_free_msg_frame(ioc, hd->cmdPtr); - } - - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Pre-Reset complete.\n", ioc->name)); - - } else { - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Post-Diag Reset\n", ioc->name)); - - /* Once a FW reload begins, all new OS commands are - * redirected to the doneQ w/ a reset status. - * Init all control structures. - */ - - /* 2. Chain Buffer initialization - */ - - /* 4. Renegotiate to all devices, if SPI - */ - - /* 5. Enable new commands to be posted - */ - spin_lock_irqsave(&ioc->FreeQlock, flags); - hd->tmPending = 0; - spin_unlock_irqrestore(&ioc->FreeQlock, flags); + break; + case MPT_IOC_PRE_RESET: + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__)); hd->resetPending = 0; - hd->tmState = TM_STATE_NONE; - - /* 6. If there was an internal command, - * wake this process up. - */ - if (hd->cmdPtr) { - /* - * Wake up the original calling thread - */ - hd->pLocal = &hd->localReply; - hd->pLocal->completion = MPT_SCANDV_DID_RESET; - hd->scandv_wait_done = 1; - wake_up(&hd->scandv_waitq); - hd->cmdPtr = NULL; + mptscsih_flush_running_cmds(hd); + break; + case MPT_IOC_POST_RESET: + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__)); + if (ioc->internal_cmds.status & MPT_MGMT_STATUS_PENDING) { + ioc->internal_cmds.status |= + MPT_MGMT_STATUS_DID_IOCRESET; + complete(&ioc->internal_cmds.done); } - - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Post-Reset complete.\n", ioc->name)); - + break; + default: + break; } - return 1; /* currently means nothing really */ } @@ -2669,8 +2612,9 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) MPT_SCSI_HOST *hd; u8 event = le32_to_cpu(pEvReply->Event) & 0xFF; - devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n", - ioc->name, event)); + devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "MPT event (=%02Xh) routed to SCSI host driver!\n", + ioc->name, event)); if (ioc->sh == NULL || ((hd = shost_priv(ioc->sh)) == NULL)) @@ -2711,8 +2655,9 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) case MPI_EVENT_STATE_CHANGE: /* 02 */ case MPI_EVENT_EVENT_CHANGE: /* 0A */ default: - dprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": Ignoring event (=%02Xh)\n", - ioc->name, event)); + dprintk(ioc, printk(MYIOC_s_DEBUG_FMT + ": Ignoring event (=%02Xh)\n", + ioc->name, event)); break; } @@ -2745,153 +2690,44 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) * Used ONLY for DV and other internal commands. */ int -mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) +mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, + MPT_FRAME_HDR *reply) { - MPT_SCSI_HOST *hd; SCSIIORequest_t *pReq; - int completionCode; + SCSIIOReply_t *pReply; + u8 cmd; u16 req_idx; + u8 *sense_data; + int sz; - hd = shost_priv(ioc->sh); - - if ((mf == NULL) || - (mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))) { - printk(MYIOC_s_ERR_FMT - "ScanDvComplete, %s req frame ptr! (=%p)\n", - ioc->name, mf?"BAD":"NULL", (void *) mf); - goto wakeup; - } - - del_timer(&hd->timer); - req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); - mptscsih_set_scsi_lookup(ioc, req_idx, NULL); - pReq = (SCSIIORequest_t *) mf; + ioc->internal_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; + ioc->internal_cmds.completion_code = MPT_SCANDV_GOOD; + if (!reply) + goto out; - if (mf != hd->cmdPtr) { - printk(MYIOC_s_WARN_FMT "ScanDvComplete (mf=%p, cmdPtr=%p, idx=%d)\n", - ioc->name, (void *)mf, (void *) hd->cmdPtr, req_idx); + pReply = (SCSIIOReply_t *) reply; + pReq = (SCSIIORequest_t *) req; + ioc->internal_cmds.completion_code = + mptscsih_get_completion_code(ioc, req, reply); + ioc->internal_cmds.status |= MPT_MGMT_STATUS_RF_VALID; + memcpy(ioc->internal_cmds.reply, reply, + min(MPT_DEFAULT_FRAME_SIZE, 4 * reply->u.reply.MsgLength)); + cmd = reply->u.hdr.Function; + if (((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) || + (cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) && + (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID)) { + req_idx = le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx); + sense_data = ((u8 *)ioc->sense_buf_pool + + (req_idx * MPT_SENSE_BUFFER_ALLOC)); + sz = min_t(int, pReq->SenseBufferLength, + MPT_SENSE_BUFFER_ALLOC); + memcpy(ioc->internal_cmds.sense, sense_data, sz); } - hd->cmdPtr = NULL; - - ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScanDvComplete (mf=%p,mr=%p,idx=%d)\n", - ioc->name, mf, mr, req_idx)); - - hd->pLocal = &hd->localReply; - hd->pLocal->scsiStatus = 0; - - /* If target struct exists, clear sense valid flag. - */ - if (mr == NULL) { - completionCode = MPT_SCANDV_GOOD; - } else { - SCSIIOReply_t *pReply; - u16 status; - u8 scsi_status; - - pReply = (SCSIIOReply_t *) mr; - - status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK; - scsi_status = pReply->SCSIStatus; - - - switch(status) { - - case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */ - completionCode = MPT_SCANDV_SELECTION_TIMEOUT; - break; - - case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */ - case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */ - case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */ - case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */ - completionCode = MPT_SCANDV_DID_RESET; - break; - - case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */ - case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */ - case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */ - if (pReply->Function == MPI_FUNCTION_CONFIG) { - ConfigReply_t *pr = (ConfigReply_t *)mr; - completionCode = MPT_SCANDV_GOOD; - hd->pLocal->header.PageVersion = pr->Header.PageVersion; - hd->pLocal->header.PageLength = pr->Header.PageLength; - hd->pLocal->header.PageNumber = pr->Header.PageNumber; - hd->pLocal->header.PageType = pr->Header.PageType; - - } else if (pReply->Function == MPI_FUNCTION_RAID_ACTION) { - /* If the RAID Volume request is successful, - * return GOOD, else indicate that - * some type of error occurred. - */ - MpiRaidActionReply_t *pr = (MpiRaidActionReply_t *)mr; - if (le16_to_cpu(pr->ActionStatus) == MPI_RAID_ACTION_ASTATUS_SUCCESS) - completionCode = MPT_SCANDV_GOOD; - else - completionCode = MPT_SCANDV_SOME_ERROR; - memcpy(hd->pLocal->sense, pr, sizeof(hd->pLocal->sense)); - - } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) { - u8 *sense_data; - int sz; - - /* save sense data in global structure - */ - completionCode = MPT_SCANDV_SENSE; - hd->pLocal->scsiStatus = scsi_status; - sense_data = ((u8 *)ioc->sense_buf_pool + - (req_idx * MPT_SENSE_BUFFER_ALLOC)); - - sz = min_t(int, pReq->SenseBufferLength, - SCSI_STD_SENSE_BYTES); - memcpy(hd->pLocal->sense, sense_data, sz); - - ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT " Check Condition, sense ptr %p\n", - ioc->name, sense_data)); - } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) { - if (pReq->CDB[0] == INQUIRY) - completionCode = MPT_SCANDV_ISSUE_SENSE; - else - completionCode = MPT_SCANDV_DID_RESET; - } - else if (pReply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS) - completionCode = MPT_SCANDV_DID_RESET; - else if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED) - completionCode = MPT_SCANDV_DID_RESET; - else { - completionCode = MPT_SCANDV_GOOD; - hd->pLocal->scsiStatus = scsi_status; - } - break; - - case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */ - if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED) - completionCode = MPT_SCANDV_DID_RESET; - else - completionCode = MPT_SCANDV_SOME_ERROR; - break; - - default: - completionCode = MPT_SCANDV_SOME_ERROR; - break; - - } /* switch(status) */ - - } /* end of address reply case */ - - hd->pLocal->completion = completionCode; - - /* MF and RF are freed in mpt_interrupt - */ -wakeup: - /* Free Chain buffers (will never chain) in scan or dv */ - //mptscsih_freeChainBuffers(ioc, req_idx); - - /* - * Wake up the original calling thread - */ - hd->scandv_wait_done = 1; - wake_up(&hd->scandv_waitq); - + out: + if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_PENDING)) + return 0; + ioc->internal_cmds.status &= ~MPT_MGMT_STATUS_PENDING; + complete(&ioc->internal_cmds.done); return 1; } @@ -2940,6 +2776,95 @@ mptscsih_timer_expired(unsigned long data) return; } +/** + * mptscsih_get_completion_code - + * @ioc: Pointer to MPT_ADAPTER structure + * @reply: + * @cmd: + * + **/ +static int +mptscsih_get_completion_code(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, + MPT_FRAME_HDR *reply) +{ + SCSIIOReply_t *pReply; + MpiRaidActionReply_t *pr; + u8 scsi_status; + u16 status; + int completion_code; + + pReply = (SCSIIOReply_t *)reply; + status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK; + scsi_status = pReply->SCSIStatus; + + devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "IOCStatus=%04xh, SCSIState=%02xh, SCSIStatus=%02xh," + "IOCLogInfo=%08xh\n", ioc->name, status, pReply->SCSIState, + scsi_status, le32_to_cpu(pReply->IOCLogInfo))); + + switch (status) { + + case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */ + completion_code = MPT_SCANDV_SELECTION_TIMEOUT; + break; + + case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */ + case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */ + case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */ + case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */ + completion_code = MPT_SCANDV_DID_RESET; + break; + + case MPI_IOCSTATUS_BUSY: + case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: + completion_code = MPT_SCANDV_BUSY; + break; + + case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */ + case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */ + case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */ + if (pReply->Function == MPI_FUNCTION_CONFIG) { + completion_code = MPT_SCANDV_GOOD; + } else if (pReply->Function == MPI_FUNCTION_RAID_ACTION) { + pr = (MpiRaidActionReply_t *)reply; + if (le16_to_cpu(pr->ActionStatus) == + MPI_RAID_ACTION_ASTATUS_SUCCESS) + completion_code = MPT_SCANDV_GOOD; + else + completion_code = MPT_SCANDV_SOME_ERROR; + } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) + completion_code = MPT_SCANDV_SENSE; + else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) { + if (req->u.scsireq.CDB[0] == INQUIRY) + completion_code = MPT_SCANDV_ISSUE_SENSE; + else + completion_code = MPT_SCANDV_DID_RESET; + } else if (pReply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS) + completion_code = MPT_SCANDV_DID_RESET; + else if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED) + completion_code = MPT_SCANDV_DID_RESET; + else if (scsi_status == MPI_SCSI_STATUS_BUSY) + completion_code = MPT_SCANDV_BUSY; + else + completion_code = MPT_SCANDV_GOOD; + break; + + case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */ + if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED) + completion_code = MPT_SCANDV_DID_RESET; + else + completion_code = MPT_SCANDV_SOME_ERROR; + break; + default: + completion_code = MPT_SCANDV_SOME_ERROR; + break; + + } /* switch(status) */ + + devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT + " completionCode set to %08xh\n", ioc->name, completion_code)); + return completion_code; +} /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** @@ -2966,22 +2891,17 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) { MPT_FRAME_HDR *mf; SCSIIORequest_t *pScsiReq; - SCSIIORequest_t ReqCopy; int my_idx, ii, dir; - int rc, cmdTimeout; - int in_isr; + int timeout; char cmdLen; char CDB[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; - char cmd = io->cmd; - MPT_ADAPTER *ioc = hd->ioc; - - in_isr = in_interrupt(); - if (in_isr) { - dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Internal SCSI IO request not allowed in ISR context!\n", - ioc->name)); - return -EPERM; - } + u8 cmd = io->cmd; + MPT_ADAPTER *ioc = hd->ioc; + int ret = 0; + unsigned long timeleft; + unsigned long flags; + mutex_lock(&ioc->internal_cmds.mutex); /* Set command specific information */ @@ -2991,13 +2911,13 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) dir = MPI_SCSIIO_CONTROL_READ; CDB[0] = cmd; CDB[4] = io->size; - cmdTimeout = 10; + timeout = 10; break; case TEST_UNIT_READY: cmdLen = 6; dir = MPI_SCSIIO_CONTROL_READ; - cmdTimeout = 10; + timeout = 10; break; case START_STOP: @@ -3005,7 +2925,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) dir = MPI_SCSIIO_CONTROL_READ; CDB[0] = cmd; CDB[4] = 1; /*Spin up the disk */ - cmdTimeout = 15; + timeout = 15; break; case REQUEST_SENSE: @@ -3013,7 +2933,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) CDB[0] = cmd; CDB[4] = io->size; dir = MPI_SCSIIO_CONTROL_READ; - cmdTimeout = 10; + timeout = 10; break; case READ_BUFFER: @@ -3032,7 +2952,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) CDB[6] = (io->size >> 16) & 0xFF; CDB[7] = (io->size >> 8) & 0xFF; CDB[8] = io->size & 0xFF; - cmdTimeout = 10; + timeout = 10; break; case WRITE_BUFFER: @@ -3047,21 +2967,21 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) CDB[6] = (io->size >> 16) & 0xFF; CDB[7] = (io->size >> 8) & 0xFF; CDB[8] = io->size & 0xFF; - cmdTimeout = 10; + timeout = 10; break; case RESERVE: cmdLen = 6; dir = MPI_SCSIIO_CONTROL_READ; CDB[0] = cmd; - cmdTimeout = 10; + timeout = 10; break; case RELEASE: cmdLen = 6; dir = MPI_SCSIIO_CONTROL_READ; CDB[0] = cmd; - cmdTimeout = 10; + timeout = 10; break; case SYNCHRONIZE_CACHE: @@ -3069,20 +2989,23 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) dir = MPI_SCSIIO_CONTROL_READ; CDB[0] = cmd; // CDB[1] = 0x02; /* set immediate bit */ - cmdTimeout = 10; + timeout = 10; break; default: /* Error Case */ - return -EFAULT; + ret = -EFAULT; + goto out; } /* Get and Populate a free Frame + * MsgContext set in mpt_get_msg_frame call */ if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) { - dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "No msg frames!\n", - ioc->name)); - return -EBUSY; + dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: No msg frames!\n", + ioc->name, __func__)); + ret = MPT_SCANDV_BUSY; + goto out; } pScsiReq = (SCSIIORequest_t *) mf; @@ -3120,74 +3043,58 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) if (cmd == REQUEST_SENSE) { pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED); - ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Untagged! 0x%2x\n", - ioc->name, cmd)); + devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: Untagged! 0x%02x\n", ioc->name, __func__, cmd)); } - for (ii=0; ii < 16; ii++) + for (ii = 0; ii < 16; ii++) pScsiReq->CDB[ii] = CDB[ii]; pScsiReq->DataLength = cpu_to_le32(io->size); pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma + (my_idx * MPT_SENSE_BUFFER_ALLOC)); - ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Command 0x%x for (%d:%d:%d)\n", - ioc->name, cmd, io->channel, io->id, io->lun)); + devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: Sending Command 0x%02x for fw_channel=%d fw_id=%d lun=%d\n", + ioc->name, __func__, cmd, io->channel, io->id, io->lun)); - if (dir == MPI_SCSIIO_CONTROL_READ) { + if (dir == MPI_SCSIIO_CONTROL_READ) ioc->add_sge((char *) &pScsiReq->SGL, - MPT_SGE_FLAGS_SSIMPLE_READ | io->size, - io->data_dma); - } else { + MPT_SGE_FLAGS_SSIMPLE_READ | io->size, io->data_dma); + else ioc->add_sge((char *) &pScsiReq->SGL, - MPT_SGE_FLAGS_SSIMPLE_WRITE | io->size, - io->data_dma); - } - - /* The ISR will free the request frame, but we need - * the information to initialize the target. Duplicate. - */ - memcpy(&ReqCopy, pScsiReq, sizeof(SCSIIORequest_t)); - - /* Issue this command after: - * finish init - * add timer - * Wait until the reply has been received - * ScsiScanDvCtx callback function will - * set hd->pLocal; - * set scandv_wait_done and call wake_up - */ - hd->pLocal = NULL; - hd->timer.expires = jiffies + HZ*cmdTimeout; - hd->scandv_wait_done = 0; + MPT_SGE_FLAGS_SSIMPLE_WRITE | io->size, io->data_dma); - /* Save cmd pointer, for resource free if timeout or - * FW reload occurs - */ - hd->cmdPtr = mf; - - add_timer(&hd->timer); + INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status) mpt_put_msg_frame(ioc->InternalCtx, ioc, mf); - wait_event(hd->scandv_waitq, hd->scandv_wait_done); - - if (hd->pLocal) { - rc = hd->pLocal->completion; - hd->pLocal->skip = 0; - - /* Always set fatal error codes in some cases. - */ - if (rc == MPT_SCANDV_SELECTION_TIMEOUT) - rc = -ENXIO; - else if (rc == MPT_SCANDV_SOME_ERROR) - rc = -rc; - } else { - rc = -EFAULT; - /* This should never happen. */ - ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "_do_cmd: Null pLocal!!!\n", - ioc->name)); + timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done, + timeout*HZ); + if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { + ret = MPT_SCANDV_DID_RESET; + dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: TIMED OUT for cmd=0x%02x\n", ioc->name, __func__, + cmd)); + if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { + mpt_free_msg_frame(ioc, mf); + goto out; + } + if (!timeleft) { + printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", + ioc->name, __func__); + mpt_HardResetHandler(ioc, CAN_SLEEP); + mpt_free_msg_frame(ioc, mf); + } + goto out; } - return rc; + ret = ioc->internal_cmds.completion_code; + devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: success, rc=0x%02x\n", + ioc->name, __func__, ret)); + + out: + CLEAR_MGMT_STATUS(ioc->internal_cmds.status) + mutex_unlock(&ioc->internal_cmds.mutex); + return ret; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -3427,6 +3334,7 @@ struct device_attribute *mptscsih_host_attrs[] = { &dev_attr_debug_level, NULL, }; + EXPORT_SYMBOL(mptscsih_host_attrs); EXPORT_SYMBOL(mptscsih_remove); diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h index 319aa3033371..0b103a2516ee 100644 --- a/drivers/message/fusion/mptscsih.h +++ b/drivers/message/fusion/mptscsih.h @@ -60,6 +60,7 @@ #define MPT_SCANDV_SELECTION_TIMEOUT (0x00000008) #define MPT_SCANDV_ISSUE_SENSE (0x00000010) #define MPT_SCANDV_FALLBACK (0x00000020) +#define MPT_SCANDV_BUSY (0x00000040) #define MPT_SCANDV_MAX_RETRIES (10) diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c index 643a3c6443af..8f46fdff7f77 100644 --- a/drivers/message/fusion/mptspi.c +++ b/drivers/message/fusion/mptspi.c @@ -614,19 +614,24 @@ static void mptspi_read_parameters(struct scsi_target *starget) spi_width(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WIDE) ? 1 : 0; } -static int +int mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id) { + MPT_ADAPTER *ioc = hd->ioc; MpiRaidActionRequest_t *pReq; MPT_FRAME_HDR *mf; - MPT_ADAPTER *ioc = hd->ioc; + int ret; + unsigned long timeleft; + + mutex_lock(&ioc->internal_cmds.mutex); /* Get and Populate a free Frame */ if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) { - ddvprintk(ioc, printk(MYIOC_s_WARN_FMT "_do_raid: no msg frames!\n", - ioc->name)); - return -EAGAIN; + dfailprintk(hd->ioc, printk(MYIOC_s_WARN_FMT + "%s: no msg frames!\n", ioc->name, __func__)); + ret = -EAGAIN; + goto out; } pReq = (MpiRaidActionRequest_t *)mf; if (quiesce) @@ -649,23 +654,30 @@ mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id) ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n", ioc->name, pReq->Action, channel, id)); - hd->pLocal = NULL; - hd->timer.expires = jiffies + HZ*10; /* 10 second timeout */ - hd->scandv_wait_done = 0; - - /* Save cmd pointer, for resource free if timeout or - * FW reload occurs - */ - hd->cmdPtr = mf; - - add_timer(&hd->timer); + INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status) mpt_put_msg_frame(ioc->InternalCtx, ioc, mf); - wait_event(hd->scandv_waitq, hd->scandv_wait_done); + timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done, 10*HZ); + if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { + ret = -ETIME; + dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: TIMED OUT!\n", + ioc->name, __func__)); + if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) + goto out; + if (!timeleft) { + printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", + ioc->name, __func__); + mpt_HardResetHandler(ioc, CAN_SLEEP); + mpt_free_msg_frame(ioc, mf); + } + goto out; + } - if ((hd->pLocal == NULL) || (hd->pLocal->completion != 0)) - return -1; + ret = ioc->internal_cmds.completion_code; - return 0; + out: + CLEAR_MGMT_STATUS(ioc->internal_cmds.status) + mutex_unlock(&ioc->internal_cmds.mutex); + return ret; } static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd, @@ -1491,8 +1503,6 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id) mpt_saf_te)); ioc->spi_data.noQas = 0; - init_waitqueue_head(&hd->scandv_waitq); - hd->scandv_wait_done = 0; hd->last_queue_full = 0; hd->spi_pending = 0; -- cgit v1.2.3 From 1ba9ab2eb2c53df52e498779e14cf4e5ea77b0ad Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Fri, 29 May 2009 16:44:48 +0530 Subject: [SCSI] mpt fusion: rewrite taskmgmt request and completion routines 1.) rewrite taskmanagement request and completion routines, making them single threaded and using the generic MPT_MGMT struct, deleting mptscsih_TMHandler, replacing with single request TM handler mptscsih_IssueTaskMgmt, and killing the watchdog timer functions. 2.) cleanup ioc_reset callback handlers, introducing wrappers for synchronizing error recovery (mpt_set_taskmgmt_in_progress_flag, mpt_clear_taskmgmt_in_progress_flag), as the fusion firmware only handles one task management request at a time Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptbase.c | 80 +++++-- drivers/message/fusion/mptbase.h | 8 +- drivers/message/fusion/mptscsih.c | 459 ++++++++++++++------------------------ drivers/message/fusion/mptscsih.h | 3 +- drivers/message/fusion/mptspi.c | 2 +- 5 files changed, 239 insertions(+), 313 deletions(-) (limited to 'drivers') diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index d8d5231f484e..af862bf6386f 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -341,7 +341,7 @@ mpt_fault_reset_work(struct work_struct *work) int rc; unsigned long flags; - if (ioc->diagPending || !ioc->active) + if (ioc->ioc_reset_in_progress || !ioc->active) goto out; ioc_raw_state = mpt_GetIocState(ioc, 0); @@ -1771,14 +1771,15 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) ioc->reply_sz = MPT_REPLY_FRAME_SIZE; ioc->pcidev = pdev; - ioc->diagPending = 0; - spin_lock_init(&ioc->diagLock); spin_lock_init(&ioc->initializing_hba_lock); + spin_lock_init(&ioc->taskmgmt_lock); mutex_init(&ioc->internal_cmds.mutex); init_completion(&ioc->internal_cmds.done); mutex_init(&ioc->mptbase_cmds.mutex); init_completion(&ioc->mptbase_cmds.done); + mutex_init(&ioc->taskmgmt_cmds.mutex); + init_completion(&ioc->taskmgmt_cmds.done); /* Initialize the event logging. */ @@ -6572,6 +6573,53 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int sh *size = y; } +/** + * mpt_set_taskmgmt_in_progress_flag - set flags associated with task managment + * @ioc: Pointer to MPT_ADAPTER structure + * + * Returns 0 for SUCCESS or -1 if FAILED. + * + * If -1 is return, then it was not possible to set the flags + **/ +int +mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc) +{ + unsigned long flags; + int retval; + + spin_lock_irqsave(&ioc->taskmgmt_lock, flags); + if (ioc->ioc_reset_in_progress || ioc->taskmgmt_in_progress || + (ioc->alt_ioc && ioc->alt_ioc->taskmgmt_in_progress)) { + retval = -1; + goto out; + } + retval = 0; + ioc->taskmgmt_in_progress = 1; + if (ioc->alt_ioc) + ioc->alt_ioc->taskmgmt_in_progress = 1; + out: + spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); + return retval; +} +EXPORT_SYMBOL(mpt_set_taskmgmt_in_progress_flag); + +/** + * mpt_clear_taskmgmt_in_progress_flag - clear flags associated with task managment + * @ioc: Pointer to MPT_ADAPTER structure + * + **/ +void +mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->taskmgmt_lock, flags); + ioc->taskmgmt_in_progress = 0; + if (ioc->alt_ioc) + ioc->alt_ioc->taskmgmt_in_progress = 0; + spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); +} +EXPORT_SYMBOL(mpt_clear_taskmgmt_in_progress_flag); /** @@ -6638,14 +6686,15 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) /* Reset the adapter. Prevent more than 1 call to * mpt_do_ioc_recovery at any instant in time. */ - spin_lock_irqsave(&ioc->diagLock, flags); - if ((ioc->diagPending) || (ioc->alt_ioc && ioc->alt_ioc->diagPending)){ - spin_unlock_irqrestore(&ioc->diagLock, flags); + spin_lock_irqsave(&ioc->taskmgmt_lock, flags); + if (ioc->ioc_reset_in_progress) { + spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); return 0; - } else { - ioc->diagPending = 1; } - spin_unlock_irqrestore(&ioc->diagLock, flags); + ioc->ioc_reset_in_progress = 1; + if (ioc->alt_ioc) + ioc->alt_ioc->ioc_reset_in_progress = 1; + spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); /* FIXME: If do_ioc_recovery fails, repeat.... */ @@ -6680,11 +6729,14 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) if (ioc->alt_ioc) ioc->alt_ioc->reload_fw = 0; - spin_lock_irqsave(&ioc->diagLock, flags); - ioc->diagPending = 0; - if (ioc->alt_ioc) - ioc->alt_ioc->diagPending = 0; - spin_unlock_irqrestore(&ioc->diagLock, flags); + spin_lock_irqsave(&ioc->taskmgmt_lock, flags); + ioc->ioc_reset_in_progress = 0; + ioc->taskmgmt_in_progress = 0; + if (ioc->alt_ioc) { + ioc->alt_ioc->ioc_reset_in_progress = 0; + ioc->alt_ioc->taskmgmt_in_progress = 0; + } + spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler rc = %d!\n", ioc->name, rc)); diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index 4d77256954f9..2129aff294d5 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -657,8 +657,6 @@ typedef struct _MPT_ADAPTER MPT_IOCTL *ioctl; /* ioctl data pointer */ struct proc_dir_entry *ioc_dentry; struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */ - spinlock_t diagLock; /* diagnostic reset lock */ - int diagPending; u32 biosVersion; /* BIOS version from IO Unit Page 2 */ int eventTypes; /* Event logging parameters */ int eventContext; /* Next event context */ @@ -712,6 +710,10 @@ typedef struct _MPT_ADAPTER MPT_MGMT sas_mgmt; MPT_MGMT mptbase_cmds; /* for sending config pages */ MPT_MGMT internal_cmds; + MPT_MGMT taskmgmt_cmds; + spinlock_t taskmgmt_lock; /* diagnostic reset lock */ + int taskmgmt_in_progress; + u8 ioc_reset_in_progress; struct work_struct sas_persist_task; struct work_struct fc_setup_reset_work; @@ -931,6 +933,8 @@ extern void mpt_free_fw_memory(MPT_ADAPTER *ioc); extern int mpt_findImVolumes(MPT_ADAPTER *ioc); extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode); extern int mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk); +extern int mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc); +extern void mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc); extern void mpt_halt_firmware(MPT_ADAPTER *ioc); diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 35173252e948..2463731ed355 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -92,20 +92,24 @@ static int mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt, SCSIIORequest_t *pReq, int req_idx); static void mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx); static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply); -static int mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd); -static int mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout ); -static int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout); +int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, + int lun, int ctx2abort, ulong timeout); int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); +static void +mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code); static int mptscsih_get_completion_code(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply); int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r); static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd); static void mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice); +static int +mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type, + SCSITaskMgmtReply_t *pScsiTmReply); void mptscsih_remove(struct pci_dev *); void mptscsih_shutdown(struct pci_dev *); #ifdef CONFIG_PM @@ -1466,8 +1470,8 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mptscsih_TMHandler - Generic handler for SCSI Task Management. - * @hd: Pointer to MPT SCSI HOST structure + * mptscsih_IssueTaskMgmt - Generic send Task Management function. + * @hd: Pointer to MPT_SCSI_HOST structure * @type: Task Management type * @channel: channel number for task management * @id: Logical Target ID for reset (if appropriate) @@ -1475,145 +1479,68 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx) * @ctx2abort: Context for the task to be aborted (if appropriate) * @timeout: timeout for task management control * - * Fall through to mpt_HardResetHandler if: not operational, too many - * failed TM requests or handshake failure. + * Remark: _HardResetHandler can be invoked from an interrupt thread (timer) + * or a non-interrupt thread. In the former, must not call schedule(). * - * Remark: Currently invoked from a non-interrupt thread (_bh). + * Not all fields are meaningfull for all task types. * - * Note: With old EH code, at most 1 SCSI TaskMgmt function per IOC - * will be active. + * Returns 0 for SUCCESS, or FAILED. * - * Returns 0 for SUCCESS, or %FAILED. **/ int -mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout) +mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, + int ctx2abort, ulong timeout) { - MPT_ADAPTER *ioc; - int rc = -1; + MPT_FRAME_HDR *mf; + SCSITaskMgmt_t *pScsiTm; + int ii; + int retval; + MPT_ADAPTER *ioc = hd->ioc; + unsigned long timeleft; + u8 issue_hard_reset; u32 ioc_raw_state; - unsigned long flags; - - ioc = hd->ioc; - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler Entered!\n", ioc->name)); - - // SJR - CHECKME - Can we avoid this here? - // (mpt_HardResetHandler has this check...) - spin_lock_irqsave(&ioc->diagLock, flags); - if ((ioc->diagPending) || (ioc->alt_ioc && ioc->alt_ioc->diagPending)) { - spin_unlock_irqrestore(&ioc->diagLock, flags); - return FAILED; - } - spin_unlock_irqrestore(&ioc->diagLock, flags); - - /* Wait a fixed amount of time for the TM pending flag to be cleared. - * If we time out and not bus reset, then we return a FAILED status - * to the caller. - * The call to mptscsih_tm_pending_wait() will set the pending flag - * if we are - * successful. Otherwise, reload the FW. - */ - if (mptscsih_tm_pending_wait(hd) == FAILED) { - if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler abort: " - "Timed out waiting for last TM (%d) to complete! \n", - ioc->name, hd->tmPending)); - return FAILED; - } else if (type == MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET) { - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler target " - "reset: Timed out waiting for last TM (%d) " - "to complete! \n", ioc->name, - hd->tmPending)); - return FAILED; - } else if (type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) { - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler bus reset: " - "Timed out waiting for last TM (%d) to complete! \n", - ioc->name, hd->tmPending)); - return FAILED; - } - } else { - spin_lock_irqsave(&ioc->FreeQlock, flags); - hd->tmPending |= (1 << type); - spin_unlock_irqrestore(&ioc->FreeQlock, flags); - } + unsigned long time_count; + issue_hard_reset = 0; ioc_raw_state = mpt_GetIocState(ioc, 0); if ((ioc_raw_state & MPI_IOC_STATE_MASK) != MPI_IOC_STATE_OPERATIONAL) { printk(MYIOC_s_WARN_FMT - "TM Handler for type=%x: IOC Not operational (0x%x)!\n", + "TaskMgmt type=%x: IOC Not operational (0x%x)!\n", ioc->name, type, ioc_raw_state); - printk(MYIOC_s_WARN_FMT " Issuing HardReset!!\n", ioc->name); + printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n", + ioc->name, __func__); if (mpt_HardResetHandler(ioc, CAN_SLEEP) < 0) - printk(MYIOC_s_WARN_FMT "TMHandler: HardReset " + printk(MYIOC_s_WARN_FMT "TaskMgmt HardReset " "FAILED!!\n", ioc->name); - return FAILED; + return 0; } if (ioc_raw_state & MPI_DOORBELL_ACTIVE) { printk(MYIOC_s_WARN_FMT - "TM Handler for type=%x: ioc_state: " + "TaskMgmt type=%x: ioc_state: " "DOORBELL_ACTIVE (0x%x)!\n", ioc->name, type, ioc_raw_state); return FAILED; } - /* Isse the Task Mgmt request. - */ - if (hd->hard_resets < -1) - hd->hard_resets++; - - rc = mptscsih_IssueTaskMgmt(hd, type, channel, id, lun, - ctx2abort, timeout); - if (rc) - printk(MYIOC_s_INFO_FMT "Issue of TaskMgmt failed!\n", - ioc->name); - else - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Issue of TaskMgmt Successful!\n", - ioc->name)); - - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "TMHandler rc = %d!\n", ioc->name, rc)); - - return rc; -} - - -/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * mptscsih_IssueTaskMgmt - Generic send Task Management function. - * @hd: Pointer to MPT_SCSI_HOST structure - * @type: Task Management type - * @channel: channel number for task management - * @id: Logical Target ID for reset (if appropriate) - * @lun: Logical Unit for reset (if appropriate) - * @ctx2abort: Context for the task to be aborted (if appropriate) - * @timeout: timeout for task management control - * - * Remark: _HardResetHandler can be invoked from an interrupt thread (timer) - * or a non-interrupt thread. In the former, must not call schedule(). - * - * Not all fields are meaningfull for all task types. - * - * Returns 0 for SUCCESS, or FAILED. - * - **/ -static int -mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout) -{ - MPT_FRAME_HDR *mf; - SCSITaskMgmt_t *pScsiTm; - int ii; - int retval; - MPT_ADAPTER *ioc = hd->ioc; + mutex_lock(&ioc->taskmgmt_cmds.mutex); + if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) { + mf = NULL; + retval = FAILED; + goto out; + } /* Return Fail to calling function if no message frames available. */ if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) { - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "IssueTaskMgmt, no msg frames!!\n", - ioc->name)); - return FAILED; + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT + "TaskMgmt no msg frames!!\n", ioc->name)); + retval = FAILED; + mpt_clear_taskmgmt_in_progress_flag(ioc); + goto out; } - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt request @ %p\n", + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n", ioc->name, mf)); /* Format the Request @@ -1637,11 +1564,14 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, i pScsiTm->TaskMsgContext = ctx2abort; - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt: ctx2abort (0x%08x) " - "type=%d\n", ioc->name, ctx2abort, type)); + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt: ctx2abort (0x%08x) " + "task_type = 0x%02X, timeout = %ld\n", ioc->name, ctx2abort, + type, timeout)); DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)pScsiTm); + INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status) + time_count = jiffies; if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf); @@ -1649,47 +1579,50 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, i retval = mpt_send_handshake_request(ioc->TaskCtx, ioc, sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP); if (retval) { - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "send_handshake FAILED!" - " (hd %p, ioc %p, mf %p, rc=%d) \n", ioc->name, hd, - ioc, mf, retval)); - goto fail_out; + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT + "TaskMgmt handshake FAILED!(mf=%p, rc=%d) \n", + ioc->name, mf, retval)); + mpt_free_msg_frame(ioc, mf); + mpt_clear_taskmgmt_in_progress_flag(ioc); + goto out; } } - if(mptscsih_tm_wait_for_completion(hd, timeout) == FAILED) { - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "task management request TIMED OUT!" - " (hd %p, ioc %p, mf %p) \n", ioc->name, hd, - ioc, mf)); - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n", - ioc->name)); - retval = mpt_HardResetHandler(ioc, CAN_SLEEP); - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rc=%d \n", - ioc->name, retval)); - goto fail_out; + timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, + timeout*HZ); + if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { + retval = FAILED; + dtmprintk(ioc, printk(MYIOC_s_ERR_FMT + "TaskMgmt TIMED OUT!(mf=%p)\n", ioc->name, mf)); + mpt_clear_taskmgmt_in_progress_flag(ioc); + if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) + goto out; + issue_hard_reset = 1; + goto out; } - /* - * Handle success case, see if theres a non-zero ioc_status. - */ - if (hd->tm_iocstatus == MPI_IOCSTATUS_SUCCESS || - hd->tm_iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED || - hd->tm_iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED) - retval = 0; - else - retval = FAILED; + retval = mptscsih_taskmgmt_reply(ioc, type, + (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply); - return retval; + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "TaskMgmt completed (%d seconds)\n", + ioc->name, jiffies_to_msecs(jiffies - time_count)/1000)); - fail_out: + out: - /* - * Free task management mf, and corresponding tm flags - */ - mpt_free_msg_frame(ioc, mf); - hd->tmPending = 0; - hd->tmState = TM_STATE_NONE; - return FAILED; + CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status) + if (issue_hard_reset) { + printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", + ioc->name, __func__); + retval = mpt_HardResetHandler(ioc, CAN_SLEEP); + mpt_free_msg_frame(ioc, mf); + } + + retval = (retval == 0) ? 0 : FAILED; + mutex_unlock(&ioc->taskmgmt_cmds.mutex); + return retval; } +EXPORT_SYMBOL(mptscsih_IssueTaskMgmt); static int mptscsih_get_tm_timeout(MPT_ADAPTER *ioc) @@ -1799,9 +1732,11 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) hd->abortSCpnt = SCpnt; - retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, - vdevice->vtarget->channel, vdevice->vtarget->id, vdevice->lun, - ctx2abort, mptscsih_get_tm_timeout(ioc)); + retval = mptscsih_IssueTaskMgmt(hd, + MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, + vdevice->vtarget->channel, + vdevice->vtarget->id, vdevice->lun, + ctx2abort, mptscsih_get_tm_timeout(ioc)); if (SCPNT_TO_LOOKUP_IDX(ioc, SCpnt) == scpnt_idx && SCpnt->serial_number == sn) @@ -1865,9 +1800,11 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt) goto out; } - retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, - vdevice->vtarget->channel, vdevice->vtarget->id, 0, 0, - mptscsih_get_tm_timeout(ioc)); + retval = mptscsih_IssueTaskMgmt(hd, + MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, + vdevice->vtarget->channel, + vdevice->vtarget->id, 0, 0, + mptscsih_get_tm_timeout(ioc)); out: printk (MYIOC_s_INFO_FMT "target reset: %s (sc=%p)\n", @@ -1914,8 +1851,10 @@ mptscsih_bus_reset(struct scsi_cmnd * SCpnt) hd->timeouts++; vdevice = SCpnt->device->hostdata; - retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, - vdevice->vtarget->channel, 0, 0, 0, mptscsih_get_tm_timeout(ioc)); + retval = mptscsih_IssueTaskMgmt(hd, + MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, + vdevice->vtarget->channel, 0, 0, 0, + mptscsih_get_tm_timeout(ioc)); printk(MYIOC_s_INFO_FMT "bus reset: %s (sc=%p)\n", ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt); @@ -1976,65 +1915,55 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt) return retval; } -/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * mptscsih_tm_pending_wait - wait for pending task management request to complete - * @hd: Pointer to MPT host structure. - * - * Returns {SUCCESS,FAILED}. - */ static int -mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd) +mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type, + SCSITaskMgmtReply_t *pScsiTmReply) { - unsigned long flags; - int loop_count = 4 * 10; /* Wait 10 seconds */ - int status = FAILED; - MPT_ADAPTER *ioc = hd->ioc; + u16 iocstatus; + u32 termination_count; + int retval; - do { - spin_lock_irqsave(&ioc->FreeQlock, flags); - if (hd->tmState == TM_STATE_NONE) { - hd->tmState = TM_STATE_IN_PROGRESS; - hd->tmPending = 1; - spin_unlock_irqrestore(&ioc->FreeQlock, flags); - status = SUCCESS; - break; - } - spin_unlock_irqrestore(&ioc->FreeQlock, flags); - msleep(250); - } while (--loop_count); + if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { + retval = FAILED; + goto out; + } - return status; -} + DBG_DUMP_TM_REPLY_FRAME(ioc, (u32 *)pScsiTmReply); -/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * mptscsih_tm_wait_for_completion - wait for completion of TM task - * @hd: Pointer to MPT host structure. - * @timeout: timeout value - * - * Returns {SUCCESS,FAILED}. - */ -static int -mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout ) -{ - unsigned long flags; - int loop_count = 4 * timeout; - int status = FAILED; - MPT_ADAPTER *ioc = hd->ioc; + iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK; + termination_count = le32_to_cpu(pScsiTmReply->TerminationCount); - do { - spin_lock_irqsave(&ioc->FreeQlock, flags); - if(hd->tmPending == 0) { - status = SUCCESS; - spin_unlock_irqrestore(&ioc->FreeQlock, flags); - break; - } - spin_unlock_irqrestore(&ioc->FreeQlock, flags); - msleep(250); - } while (--loop_count); + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "TaskMgmt fw_channel = %d, fw_id = %d, task_type = 0x%02X,\n" + "\tiocstatus = 0x%04X, loginfo = 0x%08X, response_code = 0x%02X,\n" + "\tterm_cmnds = %d\n", ioc->name, pScsiTmReply->Bus, + pScsiTmReply->TargetID, type, le16_to_cpu(pScsiTmReply->IOCStatus), + le32_to_cpu(pScsiTmReply->IOCLogInfo), pScsiTmReply->ResponseCode, + termination_count)); + + if (ioc->facts.MsgVersion >= MPI_VERSION_01_05 && + pScsiTmReply->ResponseCode) + mptscsih_taskmgmt_response_code(ioc, + pScsiTmReply->ResponseCode); - return status; + if (iocstatus == MPI_IOCSTATUS_SUCCESS) { + retval = 0; + goto out; + } + + retval = FAILED; + if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { + if (termination_count == 1) + retval = 0; + goto out; + } + + if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED || + iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED) + retval = 0; + + out: + return retval; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -2088,97 +2017,28 @@ mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code) * Returns 1 indicating alloc'd request frame ptr should be freed. **/ int -mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) +mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, + MPT_FRAME_HDR *mr) { - SCSITaskMgmtReply_t *pScsiTmReply; - SCSITaskMgmt_t *pScsiTmReq; - MPT_SCSI_HOST *hd; - unsigned long flags; - u16 iocstatus; - u8 tmType; - u32 termination_count; - - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed (mf=%p,mr=%p)\n", - ioc->name, mf, mr)); - if (!ioc->sh) { - dtmprintk(ioc, printk(MYIOC_s_WARN_FMT - "TaskMgmt Complete: NULL Scsi Host Ptr\n", ioc->name)); - return 1; - } - - if (mr == NULL) { - dtmprintk(ioc, printk(MYIOC_s_WARN_FMT - "ERROR! TaskMgmt Reply: NULL Request %p\n", ioc->name, mf)); - return 1; - } - - hd = shost_priv(ioc->sh); - pScsiTmReply = (SCSITaskMgmtReply_t*)mr; - pScsiTmReq = (SCSITaskMgmt_t*)mf; - tmType = pScsiTmReq->TaskType; - iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK; - termination_count = le32_to_cpu(pScsiTmReply->TerminationCount); + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "TaskMgmt completed (mf=%p, mr=%p)\n", ioc->name, mf, mr)); - if (ioc->facts.MsgVersion >= MPI_VERSION_01_05 && - pScsiTmReply->ResponseCode) - mptscsih_taskmgmt_response_code(ioc, - pScsiTmReply->ResponseCode); - DBG_DUMP_TM_REPLY_FRAME(ioc, (u32 *)pScsiTmReply); + ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; -#ifdef CONFIG_FUSION_LOGGING - if ((ioc->debug_level & MPT_DEBUG_REPLY) || - (ioc->debug_level & MPT_DEBUG_TM )) - printk("%s: ha=%d [%d:%d:0] task_type=0x%02X " - "iocstatus=0x%04X\n\tloginfo=0x%08X response_code=0x%02X " - "term_cmnds=%d\n", __func__, ioc->id, pScsiTmReply->Bus, - pScsiTmReply->TargetID, pScsiTmReq->TaskType, - le16_to_cpu(pScsiTmReply->IOCStatus), - le32_to_cpu(pScsiTmReply->IOCLogInfo),pScsiTmReply->ResponseCode, - le32_to_cpu(pScsiTmReply->TerminationCount)); -#endif - if (!iocstatus) { - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT " TaskMgmt SUCCESS\n", ioc->name)); - hd->abortSCpnt = NULL; + if (!mr) goto out; - } - - /* Error? (anything non-zero?) */ - - /* clear flags and continue. - */ - switch (tmType) { - - case MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK: - if (termination_count == 1) - iocstatus = MPI_IOCSTATUS_SCSI_TASK_TERMINATED; - hd->abortSCpnt = NULL; - break; - - case MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS: - - /* If an internal command is present - * or the TM failed - reload the FW. - * FC FW may respond FAILED to an ABORT - */ - if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED || - hd->cmdPtr) - if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0) - printk(MYIOC_s_WARN_FMT " Firmware Reload FAILED!!\n", ioc->name); - break; - - case MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET: - default: - break; - } + ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID; + memcpy(ioc->taskmgmt_cmds.reply, mr, + min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength)); out: - spin_lock_irqsave(&ioc->FreeQlock, flags); - hd->tmPending = 0; - hd->tmState = TM_STATE_NONE; - hd->tm_iocstatus = iocstatus; - spin_unlock_irqrestore(&ioc->FreeQlock, flags); - - return 1; + if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) { + mpt_clear_taskmgmt_in_progress_flag(ioc); + ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING; + complete(&ioc->taskmgmt_cmds.done); + return 1; + } + return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -2901,6 +2761,16 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) unsigned long timeleft; unsigned long flags; + /* don't send internal command during diag reset */ + spin_lock_irqsave(&ioc->taskmgmt_lock, flags); + if (ioc->ioc_reset_in_progress) { + spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); + dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: busy with host reset\n", ioc->name, __func__)); + return MPT_SCANDV_BUSY; + } + spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); + mutex_lock(&ioc->internal_cmds.mutex); /* Set command specific information @@ -3360,6 +3230,5 @@ EXPORT_SYMBOL(mptscsih_event_process); EXPORT_SYMBOL(mptscsih_ioc_reset); EXPORT_SYMBOL(mptscsih_change_queue_depth); EXPORT_SYMBOL(mptscsih_timer_expired); -EXPORT_SYMBOL(mptscsih_TMHandler); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h index 0b103a2516ee..6ac5d4a5c4e8 100644 --- a/drivers/message/fusion/mptscsih.h +++ b/drivers/message/fusion/mptscsih.h @@ -113,6 +113,8 @@ extern int mptscsih_resume(struct pci_dev *pdev); extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func); extern const char * mptscsih_info(struct Scsi_Host *SChost); extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)); +extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, + u8 id, int lun, int ctx2abort, ulong timeout); extern void mptscsih_slave_destroy(struct scsi_device *device); extern int mptscsih_slave_configure(struct scsi_device *device); extern int mptscsih_abort(struct scsi_cmnd * SCpnt); @@ -127,7 +129,6 @@ extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pE extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth); extern void mptscsih_timer_expired(unsigned long data); -extern int mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout); extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id); extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id); extern struct device_attribute *mptscsih_host_attrs[]; diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c index 8f46fdff7f77..e94c76dbe780 100644 --- a/drivers/message/fusion/mptspi.c +++ b/drivers/message/fusion/mptspi.c @@ -1522,7 +1522,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id) * issue internal bus reset */ if (ioc->spi_data.bus_reset) - mptscsih_TMHandler(hd, + mptscsih_IssueTaskMgmt(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 0, 0, 0, 0, 5); -- cgit v1.2.3 From e7deff3374cc5951f914dcb3c66bc1b8de8a084e Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Fri, 29 May 2009 16:46:07 +0530 Subject: [SCSI] mpt fusion: Adding DeviceResetCtx for internal Device reset frame 1.) Added taskmgmt_quiesce_io flag in IOC and removed resetPending from _MPT_SCSI_HOST struct. 2.) Reset from Scsi mid layer and internal Reset are seperate context. Adding DeviceResetCtx for internal Device reset frame. mptsas_taskmgmt_complete is optimized as part of implementation. Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptbase.c | 13 +++- drivers/message/fusion/mptbase.h | 2 +- drivers/message/fusion/mptfc.c | 1 - drivers/message/fusion/mptsas.c | 141 +++++++++++++++++++++++--------------- drivers/message/fusion/mptsas.h | 1 + drivers/message/fusion/mptscsih.c | 19 ++--- drivers/message/fusion/mptscsih.h | 1 + drivers/message/fusion/mptspi.c | 1 - 8 files changed, 105 insertions(+), 74 deletions(-) (limited to 'drivers') diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index af862bf6386f..ae203eca831f 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -6243,6 +6243,7 @@ mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) { switch (reset_phase) { case MPT_IOC_SETUP_RESET: + ioc->taskmgmt_quiesce_io = 1; dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__)); break; @@ -6595,8 +6596,11 @@ mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc) } retval = 0; ioc->taskmgmt_in_progress = 1; - if (ioc->alt_ioc) + ioc->taskmgmt_quiesce_io = 1; + if (ioc->alt_ioc) { ioc->alt_ioc->taskmgmt_in_progress = 1; + ioc->alt_ioc->taskmgmt_quiesce_io = 1; + } out: spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); return retval; @@ -6615,8 +6619,11 @@ mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc) spin_lock_irqsave(&ioc->taskmgmt_lock, flags); ioc->taskmgmt_in_progress = 0; - if (ioc->alt_ioc) + ioc->taskmgmt_quiesce_io = 0; + if (ioc->alt_ioc) { ioc->alt_ioc->taskmgmt_in_progress = 0; + ioc->alt_ioc->taskmgmt_quiesce_io = 0; + } spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); } EXPORT_SYMBOL(mpt_clear_taskmgmt_in_progress_flag); @@ -6731,9 +6738,11 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) spin_lock_irqsave(&ioc->taskmgmt_lock, flags); ioc->ioc_reset_in_progress = 0; + ioc->taskmgmt_quiesce_io = 0; ioc->taskmgmt_in_progress = 0; if (ioc->alt_ioc) { ioc->alt_ioc->ioc_reset_in_progress = 0; + ioc->alt_ioc->taskmgmt_quiesce_io = 0; ioc->alt_ioc->taskmgmt_in_progress = 0; } spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index 2129aff294d5..a0bf7d88fcfc 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -713,6 +713,7 @@ typedef struct _MPT_ADAPTER MPT_MGMT taskmgmt_cmds; spinlock_t taskmgmt_lock; /* diagnostic reset lock */ int taskmgmt_in_progress; + u8 taskmgmt_quiesce_io; u8 ioc_reset_in_progress; struct work_struct sas_persist_task; @@ -855,7 +856,6 @@ typedef struct _MPT_SCSI_HOST { * OS callbacks. freeQ is the free pool. */ u8 tmPending; - u8 resetPending; u8 negoNvram; /* DV disabled, nego NVRAM */ u8 pad1; u8 tmState; diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c index d09387134145..a53b33214cde 100644 --- a/drivers/message/fusion/mptfc.c +++ b/drivers/message/fusion/mptfc.c @@ -1292,7 +1292,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) */ hd->tmPending = 0; hd->tmState = TM_STATE_NONE; - hd->resetPending = 0; hd->abortSCpnt = NULL; /* Clear the pointer used to store diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 16c4232c37de..3efa728fc590 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -93,6 +93,7 @@ static u8 mptsasDoneCtx = MPT_MAX_PROTOCOL_DRIVERS; static u8 mptsasTaskCtx = MPT_MAX_PROTOCOL_DRIVERS; static u8 mptsasInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */ static u8 mptsasMgmtCtx = MPT_MAX_PROTOCOL_DRIVERS; +static u8 mptsasDeviceResetCtx = MPT_MAX_PROTOCOL_DRIVERS; static void mptsas_hotplug_work(struct work_struct *work); @@ -523,10 +524,12 @@ mptsas_find_vtarget(MPT_ADAPTER *ioc, u8 channel, u8 id) VirtTarget *vtarget = NULL; shost_for_each_device(sdev, ioc->sh) { - if ((vdevice = sdev->hostdata) == NULL) + vdevice = sdev->hostdata; + if ((vdevice == NULL) || + (vdevice->vtarget == NULL)) continue; if (vdevice->vtarget->id == id && - vdevice->vtarget->channel == channel) + vdevice->vtarget->channel == channel) vtarget = vdevice->vtarget; } return vtarget; @@ -551,9 +554,11 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id) MPT_FRAME_HDR *mf; SCSITaskMgmt_t *pScsiTm; - if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) { - dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames @%d!!\n", - ioc->name,__func__, __LINE__)); + mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc); + if (mf == NULL) { + dfailprintk(ioc, printk(MYIOC_s_WARN_FMT + "%s, no msg frames @%d!!\n", + ioc->name, __func__, __LINE__)); return 0; } @@ -569,7 +574,7 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id) DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf); - mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf); + mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf); return 1; } @@ -605,8 +610,9 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc, target_reset_list = kzalloc(sizeof(*target_reset_list), GFP_ATOMIC); if (!target_reset_list) { - dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n", - ioc->name,__func__, __LINE__)); + dfailprintk(ioc, printk(MYIOC_s_WARN_FMT + "%s, failed to allocate mem @%d..!!\n", + ioc->name, __func__, __LINE__)); return; } @@ -614,55 +620,94 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc, sizeof(*sas_event_data)); list_add_tail(&target_reset_list->list, &hd->target_reset_list); - if (hd->resetPending) - return; + target_reset_list->time_count = jiffies; if (mptsas_target_reset(ioc, channel, id)) { target_reset_list->target_reset_issued = 1; - hd->resetPending = 1; } } /** - * mptsas_dev_reset_complete - * - * Completion for TARGET_RESET after NOT_RESPONDING_EVENT, - * enable work queue to finish off removing device from upper layers. - * then send next TARGET_RESET in the queue. - * - * @ioc + * mptsas_taskmgmt_complete - Completion for TARGET_RESET after + * NOT_RESPONDING_EVENT, enable work queue to finish off removing device + * from upper layers. then send next TARGET_RESET in the queue. + * @ioc: Pointer to MPT_ADAPTER structure * **/ -static void -mptsas_dev_reset_complete(MPT_ADAPTER *ioc) +static int +mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) { MPT_SCSI_HOST *hd = shost_priv(ioc->sh); struct list_head *head = &hd->target_reset_list; - struct mptsas_target_reset_event *target_reset_list; struct mptsas_hotplug_event *ev; EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data; u8 id, channel; __le64 sas_address; + struct mptsas_target_reset_event *target_reset_list; + SCSITaskMgmtReply_t *pScsiTmReply; + + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed: " + "(mf = %p, mr = %p)\n", ioc->name, mf, mr)); + + pScsiTmReply = (SCSITaskMgmtReply_t *)mr; + if (pScsiTmReply) { + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "\tTaskMgmt completed: fw_channel = %d, fw_id = %d,\n" + "\ttask_type = 0x%02X, iocstatus = 0x%04X " + "loginfo = 0x%08X,\n\tresponse_code = 0x%02X, " + "term_cmnds = %d\n", ioc->name, + pScsiTmReply->Bus, pScsiTmReply->TargetID, + pScsiTmReply->TaskType, + le16_to_cpu(pScsiTmReply->IOCStatus), + le32_to_cpu(pScsiTmReply->IOCLogInfo), + pScsiTmReply->ResponseCode, + le32_to_cpu(pScsiTmReply->TerminationCount))); + + if (pScsiTmReply->ResponseCode) + mptscsih_taskmgmt_response_code(ioc, + pScsiTmReply->ResponseCode); + } + + if (pScsiTmReply && (pScsiTmReply->TaskType == + MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK || pScsiTmReply->TaskType == + MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET)) { + ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; + ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID; + memcpy(ioc->taskmgmt_cmds.reply, mr, + min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength)); + if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) { + ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING; + complete(&ioc->taskmgmt_cmds.done); + return 1; + } + return 0; + } + + mpt_clear_taskmgmt_in_progress_flag(ioc); if (list_empty(head)) - return; + return 1; + + target_reset_list = list_entry(head->next, + struct mptsas_target_reset_event, list); - target_reset_list = list_entry(head->next, struct mptsas_target_reset_event, list); + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "TaskMgmt: completed (%d seconds)\n", + ioc->name, jiffies_to_msecs(jiffies - + target_reset_list->time_count)/1000)); sas_event_data = &target_reset_list->sas_event_data; - id = sas_event_data->TargetID; - channel = sas_event_data->Bus; - hd->resetPending = 0; + id = pScsiTmReply->TargetID; + channel = pScsiTmReply->Bus; + target_reset_list->time_count = jiffies; /* * retry target reset */ if (!target_reset_list->target_reset_issued) { - if (mptsas_target_reset(ioc, channel, id)) { + if (mptsas_target_reset(ioc, channel, id)) target_reset_list->target_reset_issued = 1; - hd->resetPending = 1; - } - return; + return 1; } /* @@ -674,7 +719,7 @@ mptsas_dev_reset_complete(MPT_ADAPTER *ioc) if (!ev) { dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n", ioc->name,__func__, __LINE__)); - return; + return 0; } INIT_WORK(&ev->work, mptsas_hotplug_work); @@ -693,40 +738,26 @@ mptsas_dev_reset_complete(MPT_ADAPTER *ioc) schedule_work(&ev->work); kfree(target_reset_list); + /* * issue target reset to next device in the queue */ head = &hd->target_reset_list; if (list_empty(head)) - return; + return 1; target_reset_list = list_entry(head->next, struct mptsas_target_reset_event, list); - sas_event_data = &target_reset_list->sas_event_data; - id = sas_event_data->TargetID; - channel = sas_event_data->Bus; + id = target_reset_list->sas_event_data.TargetID; + channel = target_reset_list->sas_event_data.Bus; + target_reset_list->time_count = jiffies; - if (mptsas_target_reset(ioc, channel, id)) { + if (mptsas_target_reset(ioc, channel, id)) target_reset_list->target_reset_issued = 1; - hd->resetPending = 1; - } -} -/** - * mptsas_taskmgmt_complete - * - * @ioc - * @mf - * @mr - * - **/ -static int -mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) -{ - mptsas_dev_reset_complete(ioc); - return mptscsih_taskmgmt_complete(ioc, mf, mr); + return 1; } /** @@ -3262,7 +3293,6 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id) */ hd->tmPending = 0; hd->tmState = TM_STATE_NONE; - hd->resetPending = 0; hd->abortSCpnt = NULL; /* Clear the pointer used to store @@ -3381,10 +3411,12 @@ mptsas_init(void) return -ENODEV; mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER); - mptsasTaskCtx = mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER); + mptsasTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSAS_DRIVER); mptsasInternalCtx = mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER); mptsasMgmtCtx = mpt_register(mptsas_mgmt_done, MPTSAS_DRIVER); + mptsasDeviceResetCtx = + mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER); mpt_event_register(mptsasDoneCtx, mptsas_event_process); mpt_reset_register(mptsasDoneCtx, mptsas_ioc_reset); @@ -3409,6 +3441,7 @@ mptsas_exit(void) mpt_deregister(mptsasInternalCtx); mpt_deregister(mptsasTaskCtx); mpt_deregister(mptsasDoneCtx); + mpt_deregister(mptsasDeviceResetCtx); } module_init(mptsas_init); diff --git a/drivers/message/fusion/mptsas.h b/drivers/message/fusion/mptsas.h index 2b544e0877e6..bf528a5b59b1 100644 --- a/drivers/message/fusion/mptsas.h +++ b/drivers/message/fusion/mptsas.h @@ -53,6 +53,7 @@ struct mptsas_target_reset_event { struct list_head list; EVENT_DATA_SAS_DEVICE_STATUS_CHANGE sas_event_data; u8 target_reset_issued; + unsigned long time_count; }; enum mptsas_hotplug_action { diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 2463731ed355..a6a2bbda2f18 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -99,7 +99,7 @@ int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); -static void +void mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code); static int mptscsih_get_completion_code(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply); @@ -1304,7 +1304,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p, done()=%p\n", ioc->name, SCpnt, done)); - if (hd->resetPending) { + if (ioc->taskmgmt_quiesce_io) { dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "qcmd: SCpnt=%p timeout + 60HZ\n", ioc->name, SCpnt)); return SCSI_MLQUEUE_HOST_BUSY; @@ -1709,11 +1709,6 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) goto out; } - if (hd->resetPending) { - retval = FAILED; - goto out; - } - if (hd->timeouts < -1) hd->timeouts++; @@ -1782,11 +1777,6 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt) ioc->name, SCpnt); scsi_print_command(SCpnt); - if (hd->resetPending) { - retval = FAILED; - goto out; - } - vdevice = SCpnt->device->hostdata; if (!vdevice || !vdevice->vtarget) { retval = 0; @@ -1967,7 +1957,7 @@ mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type, } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -static void +void mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code) { char *desc; @@ -2001,6 +1991,7 @@ mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code) printk(MYIOC_s_INFO_FMT "Response Code(0x%08x): F/W: %s\n", ioc->name, response_code, desc); } +EXPORT_SYMBOL(mptscsih_taskmgmt_response_code); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** @@ -2442,12 +2433,10 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) case MPT_IOC_SETUP_RESET: dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__)); - hd->resetPending = 1; break; case MPT_IOC_PRE_RESET: dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__)); - hd->resetPending = 0; mptscsih_flush_running_cmds(hd); break; case MPT_IOC_POST_RESET: diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h index 6ac5d4a5c4e8..91e9e9fcd0e4 100644 --- a/drivers/message/fusion/mptscsih.h +++ b/drivers/message/fusion/mptscsih.h @@ -132,3 +132,4 @@ extern void mptscsih_timer_expired(unsigned long data); extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id); extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id); extern struct device_attribute *mptscsih_host_attrs[]; +extern void mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code); diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c index e94c76dbe780..8b940740292e 100644 --- a/drivers/message/fusion/mptspi.c +++ b/drivers/message/fusion/mptspi.c @@ -1476,7 +1476,6 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id) */ hd->tmPending = 0; hd->tmState = TM_STATE_NONE; - hd->resetPending = 0; hd->abortSCpnt = NULL; /* Clear the pointer used to store -- cgit v1.2.3 From ea2a788de4ce5ebab09276e25443f55592af2335 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Fri, 29 May 2009 16:46:50 +0530 Subject: [SCSI] mpt fusion: rewrite of ioctl_cmds internal generated function 1) rewrite of ioctl_cmds internal generated function that issue commands to firmware, porting them to be single threaded using the generic MPT_MGMT struct. All wait Queues are replace by completion Queue. 2) added seperate callback handler for ioctl task managment (mptctl_taskmgmt_reply), to handle command that timeout 3) rewrite mptctl_bus_reset Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptbase.h | 21 +- drivers/message/fusion/mptctl.c | 650 +++++++++++++++++++++----------------- drivers/message/fusion/mptfc.c | 2 - drivers/message/fusion/mptsas.c | 37 ++- drivers/message/fusion/mptscsih.c | 2 - drivers/message/fusion/mptspi.c | 2 - 6 files changed, 394 insertions(+), 320 deletions(-) (limited to 'drivers') diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index a0bf7d88fcfc..24d60128bfe6 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -434,18 +434,6 @@ do { \ #define MPTCTL_RESET_OK 0x01 /* Issue Bus Reset */ -typedef struct _MPT_IOCTL { - struct _MPT_ADAPTER *ioc; - u8 ReplyFrame[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */ - u8 sense[MPT_SENSE_BUFFER_ALLOC]; - int wait_done; /* wake-up value for this ioc */ - u8 rsvd; - u8 status; /* current command status */ - u8 reset; /* 1 if bus reset allowed */ - u8 id; /* target for reset */ - struct mutex ioctl_mutex; -} MPT_IOCTL; - #define MPT_MGMT_STATUS_RF_VALID 0x01 /* The Reply Frame is VALID */ #define MPT_MGMT_STATUS_COMMAND_GOOD 0x02 /* Command Status GOOD */ #define MPT_MGMT_STATUS_PENDING 0x04 /* command is pending */ @@ -460,6 +448,10 @@ typedef struct _MPT_IOCTL { status = MPT_MGMT_STATUS_PENDING; #define CLEAR_MGMT_STATUS(status) \ status = 0; +#define CLEAR_MGMT_PENDING_STATUS(status) \ + status &= ~MPT_MGMT_STATUS_PENDING; +#define SET_MGMT_MSG_CONTEXT(msg_context, value) \ + msg_context = value; typedef struct _MPT_MGMT { struct mutex mutex; @@ -468,6 +460,7 @@ typedef struct _MPT_MGMT { u8 sense[MPT_SENSE_BUFFER_ALLOC]; u8 status; /* current command status */ int completion_code; + u32 msg_context; } MPT_MGMT; /* @@ -654,7 +647,6 @@ typedef struct _MPT_ADAPTER RaidCfgData raid_data; /* Raid config. data */ SasCfgData sas_data; /* Sas config. data */ FcCfgData fc_data; /* Fc config. data */ - MPT_IOCTL *ioctl; /* ioctl data pointer */ struct proc_dir_entry *ioc_dentry; struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */ u32 biosVersion; /* BIOS version from IO Unit Page 2 */ @@ -711,6 +703,7 @@ typedef struct _MPT_ADAPTER MPT_MGMT mptbase_cmds; /* for sending config pages */ MPT_MGMT internal_cmds; MPT_MGMT taskmgmt_cmds; + MPT_MGMT ioctl_cmds; spinlock_t taskmgmt_lock; /* diagnostic reset lock */ int taskmgmt_in_progress; u8 taskmgmt_quiesce_io; @@ -855,10 +848,8 @@ typedef struct _MPT_SCSI_HOST { /* Pool of memory for holding SCpnts before doing * OS callbacks. freeQ is the free pool. */ - u8 tmPending; u8 negoNvram; /* DV disabled, nego NVRAM */ u8 pad1; - u8 tmState; u8 rsvd[2]; MPT_FRAME_HDR *cmdPtr; /* Ptr to nonOS request */ struct scsi_cmnd *abortSCpnt; diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index 22b75cb647e8..ab620132d9a9 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -84,6 +84,7 @@ MODULE_VERSION(my_VERSION); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static u8 mptctl_id = MPT_MAX_PROTOCOL_DRIVERS; +static u8 mptctl_taskmgmt_id = MPT_MAX_PROTOCOL_DRIVERS; static DECLARE_WAIT_QUEUE_HEAD ( mptctl_wait ); @@ -127,10 +128,7 @@ static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc); static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTER *ioc); -static void mptctl_timeout_expired (MPT_IOCTL *ioctl); -static int mptctl_bus_reset(MPT_IOCTL *ioctl); -static int mptctl_set_tm_flags(MPT_SCSI_HOST *hd); -static void mptctl_free_tm_flags(MPT_ADAPTER *ioc); +static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function); /* * Reset Handler cleanup function @@ -183,10 +181,10 @@ mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock) int rc = 0; if (nonblock) { - if (!mutex_trylock(&ioc->ioctl->ioctl_mutex)) + if (!mutex_trylock(&ioc->ioctl_cmds.mutex)) rc = -EAGAIN; } else { - if (mutex_lock_interruptible(&ioc->ioctl->ioctl_mutex)) + if (mutex_lock_interruptible(&ioc->ioctl_cmds.mutex)) rc = -ERESTARTSYS; } return rc; @@ -202,100 +200,78 @@ mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock) static int mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) { - char *sense_data; - int sz, req_index; - u16 iocStatus; - u8 cmd; + char *sense_data; + int req_index; + int sz; - if (req) - cmd = req->u.hdr.Function; - else - return 1; - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\tcompleting mpi function (0x%02X), req=%p, " - "reply=%p\n", ioc->name, req->u.hdr.Function, req, reply)); - - if (ioc->ioctl) { - - if (reply==NULL) { - - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_reply() NULL Reply " - "Function=%x!\n", ioc->name, cmd)); + if (!req) + return 0; - ioc->ioctl->status |= MPT_MGMT_STATUS_COMMAND_GOOD; - ioc->ioctl->reset &= ~MPTCTL_RESET_OK; + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "completing mpi function " + "(0x%02X), req=%p, reply=%p\n", ioc->name, req->u.hdr.Function, + req, reply)); - /* We are done, issue wake up - */ - ioc->ioctl->wait_done = 1; - wake_up (&mptctl_wait); - return 1; + /* + * Handling continuation of the same reply. Processing the first + * reply, and eating the other replys that come later. + */ + if (ioc->ioctl_cmds.msg_context != req->u.hdr.MsgContext) + goto out_continuation; - } + ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; - /* Copy the reply frame (which much exist - * for non-SCSI I/O) to the IOC structure. - */ - memcpy(ioc->ioctl->ReplyFrame, reply, - min(ioc->reply_sz, 4*reply->u.reply.MsgLength)); - ioc->ioctl->status |= MPT_MGMT_STATUS_RF_VALID; + if (!reply) + goto out; - /* Set the command status to GOOD if IOC Status is GOOD - * OR if SCSI I/O cmd and data underrun or recovered error. - */ - iocStatus = le16_to_cpu(reply->u.reply.IOCStatus) & MPI_IOCSTATUS_MASK; - if (iocStatus == MPI_IOCSTATUS_SUCCESS) - ioc->ioctl->status |= MPT_MGMT_STATUS_COMMAND_GOOD; - - if (iocStatus || reply->u.reply.IOCLogInfo) - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\tiocstatus (0x%04X), " - "loginfo (0x%08X)\n", ioc->name, - iocStatus, - le32_to_cpu(reply->u.reply.IOCLogInfo))); - - if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) || - (cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { - - if (reply->u.sreply.SCSIStatus || reply->u.sreply.SCSIState) - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "\tscsi_status (0x%02x), scsi_state (0x%02x), " - "tag = (0x%04x), transfer_count (0x%08x)\n", ioc->name, - reply->u.sreply.SCSIStatus, - reply->u.sreply.SCSIState, - le16_to_cpu(reply->u.sreply.TaskTag), - le32_to_cpu(reply->u.sreply.TransferCount))); - - ioc->ioctl->reset &= ~MPTCTL_RESET_OK; - - if ((iocStatus == MPI_IOCSTATUS_SCSI_DATA_UNDERRUN) || - (iocStatus == MPI_IOCSTATUS_SCSI_RECOVERED_ERROR)) { - ioc->ioctl->status |= - MPT_MGMT_STATUS_COMMAND_GOOD; - } - } + ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_RF_VALID; + sz = min(ioc->reply_sz, 4*reply->u.reply.MsgLength); + memcpy(ioc->ioctl_cmds.reply, reply, sz); - /* Copy the sense data - if present - */ - if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) && - (reply->u.sreply.SCSIState & - MPI_SCSI_STATE_AUTOSENSE_VALID)){ + if (reply->u.reply.IOCStatus || reply->u.reply.IOCLogInfo) + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "iocstatus (0x%04X), loginfo (0x%08X)\n", ioc->name, + le16_to_cpu(reply->u.reply.IOCStatus), + le32_to_cpu(reply->u.reply.IOCLogInfo))); + + if ((req->u.hdr.Function == MPI_FUNCTION_SCSI_IO_REQUEST) || + (req->u.hdr.Function == + MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { + + if (reply->u.sreply.SCSIStatus || reply->u.sreply.SCSIState) + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "scsi_status (0x%02x), scsi_state (0x%02x), " + "tag = (0x%04x), transfer_count (0x%08x)\n", ioc->name, + reply->u.sreply.SCSIStatus, + reply->u.sreply.SCSIState, + le16_to_cpu(reply->u.sreply.TaskTag), + le32_to_cpu(reply->u.sreply.TransferCount))); + + if (reply->u.sreply.SCSIState & + MPI_SCSI_STATE_AUTOSENSE_VALID) { sz = req->u.scsireq.SenseBufferLength; req_index = le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx); - sense_data = - ((u8 *)ioc->sense_buf_pool + + sense_data = ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC)); - memcpy(ioc->ioctl->sense, sense_data, sz); - ioc->ioctl->status |= MPT_MGMT_STATUS_SENSE_VALID; + memcpy(ioc->ioctl_cmds.sense, sense_data, sz); + ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_SENSE_VALID; } + } - if (cmd == MPI_FUNCTION_SCSI_TASK_MGMT) - mptctl_free_tm_flags(ioc); - - /* We are done, issue wake up - */ - ioc->ioctl->wait_done = 1; - wake_up (&mptctl_wait); + out: + /* We are done, issue wake up + */ + if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) { + if (req->u.hdr.Function == MPI_FUNCTION_SCSI_TASK_MGMT) + mpt_clear_taskmgmt_in_progress_flag(ioc); + ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING; + complete(&ioc->ioctl_cmds.done); } + + out_continuation: + if (reply && (reply->u.reply.MsgFlags & + MPI_MSGFLAGS_CONTINUATION_REPLY)) + return 0; return 1; } @@ -305,30 +281,66 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) * Expecting an interrupt, however timed out. * */ -static void mptctl_timeout_expired (MPT_IOCTL *ioctl) +static void +mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf) { - int rc = 1; + unsigned long flags; - if (ioctl == NULL) - return; - dctlprintk(ioctl->ioc, - printk(MYIOC_s_DEBUG_FMT ": Timeout Expired! Host %d\n", - ioctl->ioc->name, ioctl->ioc->id)); + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n", + ioc->name, __func__)); - ioctl->wait_done = 0; - if (ioctl->reset & MPTCTL_RESET_OK) - rc = mptctl_bus_reset(ioctl); + if (mpt_fwfault_debug) + mpt_halt_firmware(ioc); - if (rc) { - /* Issue a reset for this device. - * The IOC is not responding. - */ - dctlprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n", - ioctl->ioc->name)); - mpt_HardResetHandler(ioctl->ioc, CAN_SLEEP); + spin_lock_irqsave(&ioc->taskmgmt_lock, flags); + if (ioc->ioc_reset_in_progress) { + spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); + CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status) + mpt_free_msg_frame(ioc, mf); + return; } - return; + spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); + + if (!mptctl_bus_reset(ioc, mf->u.hdr.Function)) + return; + + /* Issue a reset for this device. + * The IOC is not responding. + */ + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n", + ioc->name)); + CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status) + mpt_HardResetHandler(ioc, CAN_SLEEP); + mpt_free_msg_frame(ioc, mf); +} + +static int +mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) +{ + if (!mf) + return 0; + + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "TaskMgmt completed (mf=%p, mr=%p)\n", + ioc->name, mf, mr)); + + ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; + + if (!mr) + goto out; + + ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID; + memcpy(ioc->taskmgmt_cmds.reply, mr, + min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength)); + out: + if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) { + mpt_clear_taskmgmt_in_progress_flag(ioc); + ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING; + complete(&ioc->taskmgmt_cmds.done); + return 1; + } + return 0; } /* mptctl_bus_reset @@ -336,133 +348,150 @@ static void mptctl_timeout_expired (MPT_IOCTL *ioctl) * Bus reset code. * */ -static int mptctl_bus_reset(MPT_IOCTL *ioctl) +static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function) { MPT_FRAME_HDR *mf; SCSITaskMgmt_t *pScsiTm; - MPT_SCSI_HOST *hd; + SCSITaskMgmtReply_t *pScsiTmReply; int ii; - int retval=0; - - - ioctl->reset &= ~MPTCTL_RESET_OK; - - if (ioctl->ioc->sh == NULL) + int retval; + unsigned long timeout; + unsigned long time_count; + u16 iocstatus; + + /* bus reset is only good for SCSI IO, RAID PASSTHRU */ + if (!(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) || + (function == MPI_FUNCTION_SCSI_IO_REQUEST)) { + dtmprintk(ioc, printk(MYIOC_s_WARN_FMT + "TaskMgmt, not SCSI_IO!!\n", ioc->name)); return -EPERM; + } - hd = shost_priv(ioctl->ioc->sh); - if (hd == NULL) + mutex_lock(&ioc->taskmgmt_cmds.mutex); + if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) { + mutex_unlock(&ioc->taskmgmt_cmds.mutex); return -EPERM; + } - /* Single threading .... - */ - if (mptctl_set_tm_flags(hd) != 0) - return -EPERM; + retval = 0; /* Send request */ - if ((mf = mpt_get_msg_frame(mptctl_id, ioctl->ioc)) == NULL) { - dtmprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt, no msg frames!!\n", - ioctl->ioc->name)); - - mptctl_free_tm_flags(ioctl->ioc); - return -ENOMEM; + mf = mpt_get_msg_frame(mptctl_taskmgmt_id, ioc); + if (mf == NULL) { + dtmprintk(ioc, printk(MYIOC_s_WARN_FMT + "TaskMgmt, no msg frames!!\n", ioc->name)); + mpt_clear_taskmgmt_in_progress_flag(ioc); + retval = -ENOMEM; + goto mptctl_bus_reset_done; } - dtmprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt request @ %p\n", - ioctl->ioc->name, mf)); + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n", + ioc->name, mf)); pScsiTm = (SCSITaskMgmt_t *) mf; - pScsiTm->TargetID = ioctl->id; - pScsiTm->Bus = hd->port; /* 0 */ - pScsiTm->ChainOffset = 0; + memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t)); pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; - pScsiTm->Reserved = 0; pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS; - pScsiTm->Reserved1 = 0; pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION; - + pScsiTm->TargetID = 0; + pScsiTm->Bus = 0; + pScsiTm->ChainOffset = 0; + pScsiTm->Reserved = 0; + pScsiTm->Reserved1 = 0; + pScsiTm->TaskMsgContext = 0; for (ii= 0; ii < 8; ii++) pScsiTm->LUN[ii] = 0; - for (ii=0; ii < 7; ii++) pScsiTm->Reserved2[ii] = 0; - pScsiTm->TaskMsgContext = 0; - dtmprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT - "mptctl_bus_reset: issued.\n", ioctl->ioc->name)); - - DBG_DUMP_TM_REQUEST_FRAME(ioctl->ioc, (u32 *)mf); + switch (ioc->bus_type) { + case FC: + timeout = 40; + break; + case SAS: + timeout = 30; + break; + case SPI: + default: + timeout = 2; + break; + } - ioctl->wait_done=0; + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "TaskMgmt type=%d timeout=%ld\n", + ioc->name, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, timeout)); - if ((ioctl->ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && - (ioctl->ioc->facts.MsgVersion >= MPI_VERSION_01_05)) - mpt_put_msg_frame_hi_pri(mptctl_id, ioctl->ioc, mf); + INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status) + CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status) + time_count = jiffies; + if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && + (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) + mpt_put_msg_frame_hi_pri(mptctl_taskmgmt_id, ioc, mf); else { - retval = mpt_send_handshake_request(mptctl_id, ioctl->ioc, - sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP); + retval = mpt_send_handshake_request(mptctl_taskmgmt_id, ioc, + sizeof(SCSITaskMgmt_t), (u32 *)pScsiTm, CAN_SLEEP); if (retval != 0) { - dfailprintk(ioctl->ioc, printk(MYIOC_s_ERR_FMT "_send_handshake FAILED!" - " (hd %p, ioc %p, mf %p) \n", hd->ioc->name, hd, - hd->ioc, mf)); + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT + "TaskMgmt send_handshake FAILED!" + " (ioc %p, mf %p, rc=%d) \n", ioc->name, + ioc, mf, retval)); + mpt_clear_taskmgmt_in_progress_flag(ioc); goto mptctl_bus_reset_done; } } /* Now wait for the command to complete */ - ii = wait_event_timeout(mptctl_wait, - ioctl->wait_done == 1, - HZ*5 /* 5 second timeout */); + ii = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, timeout*HZ); + if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "TaskMgmt failed\n", ioc->name)); + mpt_free_msg_frame(ioc, mf); + mpt_clear_taskmgmt_in_progress_flag(ioc); + if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) + retval = 0; + else + retval = -1; /* return failure */ + goto mptctl_bus_reset_done; + } - if(ii <=0 && (ioctl->wait_done != 1 )) { - mpt_free_msg_frame(hd->ioc, mf); - ioctl->wait_done = 0; + if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "TaskMgmt failed\n", ioc->name)); + retval = -1; /* return failure */ + goto mptctl_bus_reset_done; + } + + pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply; + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "TaskMgmt fw_channel = %d, fw_id = %d, task_type=0x%02X, " + "iocstatus=0x%04X\n\tloginfo=0x%08X, response_code=0x%02X, " + "term_cmnds=%d\n", ioc->name, pScsiTmReply->Bus, + pScsiTmReply->TargetID, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, + le16_to_cpu(pScsiTmReply->IOCStatus), + le32_to_cpu(pScsiTmReply->IOCLogInfo), + pScsiTmReply->ResponseCode, + le32_to_cpu(pScsiTmReply->TerminationCount))); + + iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK; + + if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED || + iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED || + iocstatus == MPI_IOCSTATUS_SUCCESS) + retval = 0; + else { + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "TaskMgmt failed\n", ioc->name)); retval = -1; /* return failure */ } -mptctl_bus_reset_done: - mptctl_free_tm_flags(ioctl->ioc); + mptctl_bus_reset_done: + mutex_unlock(&ioc->taskmgmt_cmds.mutex); + CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status) return retval; } -static int -mptctl_set_tm_flags(MPT_SCSI_HOST *hd) { - unsigned long flags; - - spin_lock_irqsave(&hd->ioc->FreeQlock, flags); - - if (hd->tmState == TM_STATE_NONE) { - hd->tmState = TM_STATE_IN_PROGRESS; - hd->tmPending = 1; - spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); - } else { - spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); - return -EBUSY; - } - - return 0; -} - -static void -mptctl_free_tm_flags(MPT_ADAPTER *ioc) -{ - MPT_SCSI_HOST * hd; - unsigned long flags; - - hd = shost_priv(ioc->sh); - if (hd == NULL) - return; - - spin_lock_irqsave(&ioc->FreeQlock, flags); - - hd->tmState = TM_STATE_NONE; - hd->tmPending = 0; - spin_unlock_irqrestore(&ioc->FreeQlock, flags); - - return; -} /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* mptctl_ioc_reset @@ -474,22 +503,23 @@ mptctl_free_tm_flags(MPT_ADAPTER *ioc) static int mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) { - MPT_IOCTL *ioctl = ioc->ioctl; - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IOC %s_reset routed to IOCTL driver!\n", ioc->name, - reset_phase==MPT_IOC_SETUP_RESET ? "setup" : ( - reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post"))); - - if(ioctl == NULL) - return 1; - switch(reset_phase) { case MPT_IOC_SETUP_RESET: - ioctl->status |= MPT_MGMT_STATUS_DID_IOCRESET; + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__)); + break; + case MPT_IOC_PRE_RESET: + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__)); break; case MPT_IOC_POST_RESET: - ioctl->status &= ~MPT_MGMT_STATUS_DID_IOCRESET; + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__)); + if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) { + ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_DID_IOCRESET; + complete(&ioc->ioctl_cmds.done); + } break; - case MPT_IOC_PRE_RESET: default: break; } @@ -643,7 +673,7 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) else ret = -EINVAL; - mutex_unlock(&iocp->ioctl->ioctl_mutex); + mutex_unlock(&iocp->ioctl_cmds.mutex); return ret; } @@ -759,6 +789,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) int sge_offset = 0; u16 iocstat; pFWDownloadReply_t ReplyMsg = NULL; + unsigned long timeleft; if (mpt_verify_adapter(ioc, &iocp) < 0) { printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n", @@ -893,16 +924,30 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) * Finally, perform firmware download. */ ReplyMsg = NULL; + SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, dlmsg->MsgContext); + INITIALIZE_MGMT_STATUS(iocp->ioctl_cmds.status) mpt_put_msg_frame(mptctl_id, iocp, mf); /* Now wait for the command to complete */ - ret = wait_event_timeout(mptctl_wait, - iocp->ioctl->wait_done == 1, - HZ*60); +retry_wait: + timeleft = wait_for_completion_timeout(&iocp->ioctl_cmds.done, HZ*60); + if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { + ret = -ETIME; + printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__); + if (iocp->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { + mpt_free_msg_frame(iocp, mf); + goto fwdl_out; + } + if (!timeleft) + mptctl_timeout_expired(iocp, mf); + else + goto retry_wait; + goto fwdl_out; + } - if(ret <=0 && (iocp->ioctl->wait_done != 1 )) { - /* Now we need to reset the board */ - mptctl_timeout_expired(iocp->ioctl); + if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { + printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__); + mpt_free_msg_frame(iocp, mf); ret = -ENODATA; goto fwdl_out; } @@ -910,7 +955,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) if (sgl) kfree_sgl(sgl, sgl_dma, buflist, iocp); - ReplyMsg = (pFWDownloadReply_t)iocp->ioctl->ReplyFrame; + ReplyMsg = (pFWDownloadReply_t)iocp->ioctl_cmds.reply; iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK; if (iocstat == MPI_IOCSTATUS_SUCCESS) { printk(MYIOC_s_INFO_FMT "F/W update successfull!\n", iocp->name); @@ -934,6 +979,9 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) return 0; fwdl_out: + + CLEAR_MGMT_STATUS(iocp->ioctl_cmds.status); + SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, 0); kfree_sgl(sgl, sgl_dma, buflist, iocp); return ret; } @@ -1774,7 +1822,10 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) int msgContext; u16 req_idx; ulong timeout; + unsigned long timeleft; struct scsi_device *sdev; + unsigned long flags; + u8 function; /* bufIn and bufOut are used for user to kernel space transfers */ @@ -1787,16 +1838,15 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) __FILE__, __LINE__, iocnum); return -ENODEV; } - if (!ioc->ioctl) { - printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - " - "No memory available during driver init.\n", - __FILE__, __LINE__); - return -ENOMEM; - } else if (ioc->ioctl->status & MPT_MGMT_STATUS_DID_IOCRESET) { + + spin_lock_irqsave(&ioc->taskmgmt_lock, flags); + if (ioc->ioc_reset_in_progress) { + spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - " - "Busy with IOC Reset \n", __FILE__, __LINE__); + "Busy with diagnostic reset\n", __FILE__, __LINE__); return -EBUSY; } + spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); /* Verify that the final request frame will not be too large. */ @@ -1830,10 +1880,12 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Unable to read MF from mpt_ioctl_command struct @ %p\n", ioc->name, __FILE__, __LINE__, mfPtr); + function = -1; rc = -EFAULT; goto done_free_mem; } hdr->MsgContext = cpu_to_le32(msgContext); + function = hdr->Function; /* Verify that this request is allowed. @@ -1841,7 +1893,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sending mpi function (0x%02X), req=%p\n", ioc->name, hdr->Function, mf)); - switch (hdr->Function) { + switch (function) { case MPI_FUNCTION_IOC_FACTS: case MPI_FUNCTION_PORT_FACTS: karg.dataOutSize = karg.dataInSize = 0; @@ -1938,8 +1990,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) pScsiReq->Control = cpu_to_le32(scsidir | qtag); pScsiReq->DataLength = cpu_to_le32(dataSize); - ioc->ioctl->reset = MPTCTL_RESET_OK; - ioc->ioctl->id = pScsiReq->TargetID; } else { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " @@ -2017,8 +2067,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) pScsiReq->Control = cpu_to_le32(scsidir | qtag); pScsiReq->DataLength = cpu_to_le32(dataSize); - ioc->ioctl->reset = MPTCTL_RESET_OK; - ioc->ioctl->id = pScsiReq->TargetID; } else { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "SCSI driver is not loaded. \n", @@ -2029,20 +2077,17 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) break; case MPI_FUNCTION_SCSI_TASK_MGMT: - { - MPT_SCSI_HOST *hd = NULL; - if ((ioc->sh == NULL) || ((hd = shost_priv(ioc->sh)) == NULL)) { - printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " - "SCSI driver not loaded or SCSI host not found. \n", - ioc->name, __FILE__, __LINE__); - rc = -EFAULT; - goto done_free_mem; - } else if (mptctl_set_tm_flags(hd) != 0) { - rc = -EPERM; - goto done_free_mem; - } - } + { + SCSITaskMgmt_t *pScsiTm; + pScsiTm = (SCSITaskMgmt_t *)mf; + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "\tTaskType=0x%x MsgFlags=0x%x " + "TaskMsgContext=0x%x id=%d channel=%d\n", + ioc->name, pScsiTm->TaskType, le32_to_cpu + (pScsiTm->TaskMsgContext), pScsiTm->MsgFlags, + pScsiTm->TargetID, pScsiTm->Bus)); break; + } case MPI_FUNCTION_IOC_INIT: { @@ -2186,9 +2231,16 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) ioc->add_sge(psge, flagsLength, (dma_addr_t) -1); } - ioc->ioctl->wait_done = 0; + SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, hdr->MsgContext); + INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status) if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) { + mutex_lock(&ioc->taskmgmt_cmds.mutex); + if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) { + mutex_unlock(&ioc->taskmgmt_cmds.mutex); + goto done_free_mem; + } + DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf); if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && @@ -2199,10 +2251,11 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) sizeof(SCSITaskMgmt_t), (u32*)mf, CAN_SLEEP); if (rc != 0) { dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "_send_handshake FAILED! (ioc %p, mf %p)\n", + "send_handshake FAILED! (ioc %p, mf %p)\n", ioc->name, ioc, mf)); - mptctl_free_tm_flags(ioc); + mpt_clear_taskmgmt_in_progress_flag(ioc); rc = -ENODATA; + mutex_unlock(&ioc->taskmgmt_cmds.mutex); goto done_free_mem; } } @@ -2212,36 +2265,47 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) /* Now wait for the command to complete */ timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT; - timeout = wait_event_timeout(mptctl_wait, - ioc->ioctl->wait_done == 1, - HZ*timeout); - - if(timeout <=0 && (ioc->ioctl->wait_done != 1 )) { - /* Now we need to reset the board */ - - if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) - mptctl_free_tm_flags(ioc); - - mptctl_timeout_expired(ioc->ioctl); - rc = -ENODATA; +retry_wait: + timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done, + HZ*timeout); + if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { + rc = -ETIME; + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "%s: TIMED OUT!\n", + ioc->name, __func__)); + if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { + if (function == MPI_FUNCTION_SCSI_TASK_MGMT) + mutex_unlock(&ioc->taskmgmt_cmds.mutex); + goto done_free_mem; + } + if (!timeleft) { + if (function == MPI_FUNCTION_SCSI_TASK_MGMT) + mutex_unlock(&ioc->taskmgmt_cmds.mutex); + mptctl_timeout_expired(ioc, mf); + mf = NULL; + } else + goto retry_wait; goto done_free_mem; } + if (function == MPI_FUNCTION_SCSI_TASK_MGMT) + mutex_unlock(&ioc->taskmgmt_cmds.mutex); + + mf = NULL; /* If a valid reply frame, copy to the user. * Offset 2: reply length in U32's */ - if (ioc->ioctl->status & MPT_MGMT_STATUS_RF_VALID) { + if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) { if (karg.maxReplyBytes < ioc->reply_sz) { - sz = min(karg.maxReplyBytes, 4*ioc->ioctl->ReplyFrame[2]); + sz = min(karg.maxReplyBytes, + 4*ioc->ioctl_cmds.reply[2]); } else { - sz = min(ioc->reply_sz, 4*ioc->ioctl->ReplyFrame[2]); + sz = min(ioc->reply_sz, 4*ioc->ioctl_cmds.reply[2]); } - if (sz > 0) { if (copy_to_user(karg.replyFrameBufPtr, - &ioc->ioctl->ReplyFrame, sz)){ + ioc->ioctl_cmds.reply, sz)){ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Unable to write out reply frame %p\n", @@ -2254,10 +2318,11 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) /* If valid sense data, copy to user. */ - if (ioc->ioctl->status & MPT_MGMT_STATUS_SENSE_VALID) { + if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_SENSE_VALID) { sz = min(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE); if (sz > 0) { - if (copy_to_user(karg.senseDataPtr, ioc->ioctl->sense, sz)) { + if (copy_to_user(karg.senseDataPtr, + ioc->ioctl_cmds.sense, sz)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Unable to write sense data to user %p\n", ioc->name, __FILE__, __LINE__, @@ -2271,7 +2336,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) /* If the overall status is _GOOD and data in, copy data * to user. */ - if ((ioc->ioctl->status & MPT_MGMT_STATUS_COMMAND_GOOD) && + if ((ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD) && (karg.dataInSize > 0) && (bufIn.kptr)) { if (copy_to_user(karg.dataInBufPtr, @@ -2286,9 +2351,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) done_free_mem: - ioc->ioctl->status &= ~(MPT_MGMT_STATUS_COMMAND_GOOD | - MPT_MGMT_STATUS_SENSE_VALID | - MPT_MGMT_STATUS_RF_VALID); + CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status) + SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0); /* Free the allocated memory. */ @@ -2338,6 +2402,8 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) ToolboxIstwiReadWriteRequest_t *IstwiRWRequest; MPT_FRAME_HDR *mf = NULL; MPIHeader_t *mpi_hdr; + unsigned long timeleft; + int retval; /* Reset long to int. Should affect IA64 and SPARC only */ @@ -2478,8 +2544,8 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) * Gather ISTWI(Industry Standard Two Wire Interface) Data */ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { - dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n", - ioc->name,__func__)); + dfailprintk(ioc, printk(MYIOC_s_WARN_FMT + "%s, no msg frames!!\n", ioc->name, __func__)); goto out; } @@ -2503,19 +2569,26 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) ioc->add_sge((char *)&IstwiRWRequest->SGL, (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma); - ioc->ioctl->wait_done = 0; + retval = 0; + SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, + IstwiRWRequest->MsgContext); + INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status) mpt_put_msg_frame(mptctl_id, ioc, mf); - rc = wait_event_timeout(mptctl_wait, - ioc->ioctl->wait_done == 1, - HZ*MPT_IOCTL_DEFAULT_TIMEOUT /* 10 sec */); - - if(rc <=0 && (ioc->ioctl->wait_done != 1 )) { - /* - * Now we need to reset the board - */ - mpt_free_msg_frame(ioc, mf); - mptctl_timeout_expired(ioc->ioctl); +retry_wait: + timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done, + HZ*MPT_IOCTL_DEFAULT_TIMEOUT); + if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { + retval = -ETIME; + printk(MYIOC_s_WARN_FMT "%s: failed\n", ioc->name, __func__); + if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { + mpt_free_msg_frame(ioc, mf); + goto out; + } + if (!timeleft) + mptctl_timeout_expired(ioc, mf); + else + goto retry_wait; goto out; } @@ -2528,10 +2601,13 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) * bays have drives in them * pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3) */ - if (ioc->ioctl->status & MPT_MGMT_STATUS_RF_VALID) + if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) karg.rsvd = *(u32 *)pbuf; out: + CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status) + SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0); + if (pbuf) pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma); @@ -2755,7 +2831,7 @@ compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd, ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen); - mutex_unlock(&iocp->ioctl->ioctl_mutex); + mutex_unlock(&iocp->ioctl_cmds.mutex); return ret; } @@ -2809,7 +2885,7 @@ compat_mpt_command(struct file *filp, unsigned int cmd, */ ret = mptctl_do_mpt_command (karg, &uarg->MF); - mutex_unlock(&iocp->ioctl->ioctl_mutex); + mutex_unlock(&iocp->ioctl_cmds.mutex); return ret; } @@ -2861,21 +2937,10 @@ static long compat_mpctl_ioctl(struct file *f, unsigned int cmd, unsigned long a static int mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - MPT_IOCTL *mem; MPT_ADAPTER *ioc = pci_get_drvdata(pdev); - /* - * Allocate and inite a MPT_IOCTL structure - */ - mem = kzalloc(sizeof(MPT_IOCTL), GFP_KERNEL); - if (!mem) { - mptctl_remove(pdev); - return -ENOMEM; - } - - ioc->ioctl = mem; - ioc->ioctl->ioc = ioc; - mutex_init(&ioc->ioctl->ioctl_mutex); + mutex_init(&ioc->ioctl_cmds.mutex); + init_completion(&ioc->ioctl_cmds.done); return 0; } @@ -2889,9 +2954,6 @@ mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id) static void mptctl_remove(struct pci_dev *pdev) { - MPT_ADAPTER *ioc = pci_get_drvdata(pdev); - - kfree ( ioc->ioctl ); } static struct mpt_pci_driver mptctl_driver = { @@ -2931,6 +2993,7 @@ static int __init mptctl_init(void) goto out_fail; } + mptctl_taskmgmt_id = mpt_register(mptctl_taskmgmt_reply, MPTCTL_DRIVER); mpt_reset_register(mptctl_id, mptctl_ioc_reset); mpt_event_register(mptctl_id, mptctl_event_process); @@ -2955,6 +3018,7 @@ static void mptctl_exit(void) /* De-register callback handler from base module */ mpt_deregister(mptctl_id); + mpt_reset_deregister(mptctl_taskmgmt_id); mpt_device_driver_deregister(MPTCTL_DRIVER); diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c index a53b33214cde..e61df133a59e 100644 --- a/drivers/message/fusion/mptfc.c +++ b/drivers/message/fusion/mptfc.c @@ -1290,8 +1290,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* Clear the TM flags */ - hd->tmPending = 0; - hd->tmState = TM_STATE_NONE; hd->abortSCpnt = NULL; /* Clear the pointer used to store diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 3efa728fc590..eb6b10eb11d2 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -553,15 +553,21 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id) { MPT_FRAME_HDR *mf; SCSITaskMgmt_t *pScsiTm; + if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) + return 0; + mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc); if (mf == NULL) { dfailprintk(ioc, printk(MYIOC_s_WARN_FMT - "%s, no msg frames @%d!!\n", - ioc->name, __func__, __LINE__)); - return 0; + "%s, no msg frames @%d!!\n", ioc->name, + __func__, __LINE__)); + goto out_fail; } + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n", + ioc->name, mf)); + /* Format the Request */ pScsiTm = (SCSITaskMgmt_t *) mf; @@ -574,9 +580,18 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id) DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf); + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "TaskMgmt type=%d (sas device delete) fw_channel = %d fw_id = %d)\n", + ioc->name, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, channel, id)); + mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf); return 1; + + out_fail: + + mpt_clear_taskmgmt_in_progress_flag(ioc); + return 0; } /** @@ -719,9 +734,12 @@ mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) if (!ev) { dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n", ioc->name,__func__, __LINE__)); - return 0; + goto out_fail; } + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n", + ioc->name, mf)); + INIT_WORK(&ev->work, mptsas_hotplug_work); ev->ioc = ioc; ev->handle = le16_to_cpu(sas_event_data->DevHandle); @@ -734,10 +752,19 @@ mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) sizeof(__le64)); ev->sas_address = le64_to_cpu(sas_address); ev->device_info = le32_to_cpu(sas_event_data->DeviceInfo); + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "TaskMgmt type=%d (sas device delete) fw_channel = %d fw_id = %d)\n", + ioc->name, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, channel, id)); + ev->event_type = MPTSAS_DEL_DEVICE; schedule_work(&ev->work); kfree(target_reset_list); + out_fail: + + mpt_clear_taskmgmt_in_progress_flag(ioc); + return 0; + /* * issue target reset to next device in the queue @@ -3291,8 +3318,6 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* Clear the TM flags */ - hd->tmPending = 0; - hd->tmState = TM_STATE_NONE; hd->abortSCpnt = NULL; /* Clear the pointer used to store diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index a6a2bbda2f18..477f6f8251e5 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -1895,8 +1895,6 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt) * NONE. */ retval = 0; - hd->tmPending = 0; - hd->tmState = TM_STATE_NONE; } printk(MYIOC_s_INFO_FMT "host reset: %s (sc=%p)\n", diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c index 8b940740292e..c5b808fd55ba 100644 --- a/drivers/message/fusion/mptspi.c +++ b/drivers/message/fusion/mptspi.c @@ -1474,8 +1474,6 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* Clear the TM flags */ - hd->tmPending = 0; - hd->tmState = TM_STATE_NONE; hd->abortSCpnt = NULL; /* Clear the pointer used to store -- cgit v1.2.3 From 3eb0822c6740c5564c37a2fe56449cdb4f3d800c Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Fri, 29 May 2009 16:47:26 +0530 Subject: [SCSI] mpt fusion: Firmware event implementation using seperate WorkQueue Now Firmware events are handled by firmware event queue. Previously it was handled in interrupt context/WorkQueue of Linux. Firmware Event handling is restructured and optimized. Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptbase.c | 10 + drivers/message/fusion/mptbase.h | 9 + drivers/message/fusion/mptsas.c | 1378 ++++++++++++++++++++++++++------------ drivers/message/fusion/mptsas.h | 33 +- 4 files changed, 984 insertions(+), 446 deletions(-) (limited to 'drivers') diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index ae203eca831f..d67b26378a52 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -1931,6 +1931,11 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) */ mpt_detect_bound_ports(ioc, pdev); + INIT_LIST_HEAD(&ioc->fw_event_list); + spin_lock_init(&ioc->fw_event_lock); + snprintf(ioc->fw_event_q_name, 20, "mpt/%d", ioc->id); + ioc->fw_event_q = create_singlethread_workqueue(ioc->fw_event_q_name); + if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP, CAN_SLEEP)) != 0){ printk(MYIOC_s_ERR_FMT "didn't initialize properly! (%d)\n", @@ -2010,6 +2015,11 @@ mpt_detach(struct pci_dev *pdev) cancel_delayed_work(&ioc->fault_reset_work); destroy_workqueue(wq); + spin_lock_irqsave(&ioc->fw_event_lock, flags); + wq = ioc->fw_event_q; + ioc->fw_event_q = NULL; + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); + destroy_workqueue(wq); sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name); remove_proc_entry(pname, NULL); diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index 24d60128bfe6..b6efc64e8264 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -694,9 +694,18 @@ typedef struct _MPT_ADAPTER struct net_device *netdev; struct list_head sas_topology; struct mutex sas_topology_mutex; + + struct workqueue_struct *fw_event_q; + struct list_head fw_event_list; + spinlock_t fw_event_lock; + u8 fw_events_off; /* if '1', then ignore events */ + char fw_event_q_name[20]; + struct mutex sas_discovery_mutex; u8 sas_discovery_runtime; u8 sas_discovery_ignore_events; + struct list_head sas_device_info_list; + struct mutex sas_device_info_mutex; u8 sas_discovery_quiesce_io; int sas_index; /* index refrencing */ MPT_MGMT sas_mgmt; diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index eb6b10eb11d2..22a027ec9e5d 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -95,7 +95,24 @@ static u8 mptsasInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for interna static u8 mptsasMgmtCtx = MPT_MAX_PROTOCOL_DRIVERS; static u8 mptsasDeviceResetCtx = MPT_MAX_PROTOCOL_DRIVERS; -static void mptsas_hotplug_work(struct work_struct *work); +static void mptsas_firmware_event_work(struct work_struct *work); +static void mptsas_send_sas_event(struct fw_event_work *fw_event); +static void mptsas_send_raid_event(struct fw_event_work *fw_event); +static void mptsas_send_ir2_event(struct fw_event_work *fw_event); +static void mptsas_parse_device_info(struct sas_identify *identify, + struct mptsas_devinfo *device_info); +static inline void mptsas_set_rphy(MPT_ADAPTER *ioc, + struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy); +static struct mptsas_phyinfo *mptsas_find_phyinfo_by_sas_address + (MPT_ADAPTER *ioc, u64 sas_address); +static int mptsas_sas_device_pg0(MPT_ADAPTER *ioc, + struct mptsas_devinfo *device_info, u32 form, u32 form_specific); +static int mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, + struct mptsas_enclosure *enclosure, u32 form, u32 form_specific); +static int mptsas_add_end_device(MPT_ADAPTER *ioc, + struct mptsas_phyinfo *phy_info); +static void mptsas_del_end_device(MPT_ADAPTER *ioc, + struct mptsas_phyinfo *phy_info); static void mptsas_print_phy_data(MPT_ADAPTER *ioc, MPI_SAS_IO_UNIT0_PHY_DATA *phy_data) @@ -219,6 +236,100 @@ static void mptsas_print_expander_pg1(MPT_ADAPTER *ioc, SasExpanderPage1_t *pg1) le16_to_cpu(pg1->AttachedDevHandle))); } +/* inhibit sas firmware event handling */ +static void +mptsas_fw_event_off(MPT_ADAPTER *ioc) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + ioc->fw_events_off = 1; + ioc->sas_discovery_quiesce_io = 0; + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); + +} + +/* enable sas firmware event handling */ +static void +mptsas_fw_event_on(MPT_ADAPTER *ioc) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + ioc->fw_events_off = 0; + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +/* queue a sas firmware event */ +static void +mptsas_add_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event, + unsigned long delay) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + list_add_tail(&fw_event->list, &ioc->fw_event_list); + INIT_DELAYED_WORK(&fw_event->work, mptsas_firmware_event_work); + devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: add (fw_event=0x%p)\n", + ioc->name, __func__, fw_event)); + queue_delayed_work(ioc->fw_event_q, &fw_event->work, + delay); + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +/* free memory assoicated to a sas firmware event */ +static void +mptsas_free_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: kfree (fw_event=0x%p)\n", + ioc->name, __func__, fw_event)); + list_del(&fw_event->list); + kfree(fw_event); + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +/* walk the firmware event queue, and either stop or wait for + * outstanding events to complete */ +static void +mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc) +{ + struct fw_event_work *fw_event, *next; + struct mptsas_target_reset_event *target_reset_list, *n; + u8 flush_q; + MPT_SCSI_HOST *hd = shost_priv(ioc->sh); + + /* flush the target_reset_list */ + if (!list_empty(&hd->target_reset_list)) { + list_for_each_entry_safe(target_reset_list, n, + &hd->target_reset_list, list) { + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: removing target reset for id=%d\n", + ioc->name, __func__, + target_reset_list->sas_event_data.TargetID)); + list_del(&target_reset_list->list); + kfree(target_reset_list); + } + } + + if (list_empty(&ioc->fw_event_list) || + !ioc->fw_event_q || in_interrupt()) + return; + + flush_q = 0; + list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) { + if (cancel_delayed_work(&fw_event->work)) + mptsas_free_fw_event(ioc, fw_event); + else + flush_q = 1; + } + if (flush_q) + flush_workqueue(ioc->fw_event_q); +} + + static inline MPT_ADAPTER *phy_to_ioc(struct sas_phy *phy) { struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); @@ -309,6 +420,7 @@ mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_detai if(phy_info->port_details != port_details) continue; memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo)); + mptsas_set_rphy(ioc, phy_info, NULL); phy_info->port_details = NULL; } kfree(port_details); @@ -380,6 +492,157 @@ starget) phy_info->port_details->starget = starget; } +/** + * mptsas_add_device_component - + * @ioc: Pointer to MPT_ADAPTER structure + * @channel: fw mapped id's + * @id: + * @sas_address: + * @device_info: + * + **/ +static void +mptsas_add_device_component(MPT_ADAPTER *ioc, u8 channel, u8 id, + u64 sas_address, u32 device_info, u16 slot, u64 enclosure_logical_id) +{ + struct mptsas_device_info *sas_info, *next; + struct scsi_device *sdev; + struct scsi_target *starget; + struct sas_rphy *rphy; + + /* + * Delete all matching devices out of the list + */ + mutex_lock(&ioc->sas_device_info_mutex); + list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list, + list) { + if ((sas_info->sas_address == sas_address || + (sas_info->fw.channel == channel && + sas_info->fw.id == id))) { + list_del(&sas_info->list); + kfree(sas_info); + } + } + + sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL); + if (!sas_info) + goto out; + + /* + * Set Firmware mapping + */ + sas_info->fw.id = id; + sas_info->fw.channel = channel; + + sas_info->sas_address = sas_address; + sas_info->device_info = device_info; + sas_info->slot = slot; + sas_info->enclosure_logical_id = enclosure_logical_id; + INIT_LIST_HEAD(&sas_info->list); + list_add_tail(&sas_info->list, &ioc->sas_device_info_list); + + /* + * Set OS mapping + */ + shost_for_each_device(sdev, ioc->sh) { + starget = scsi_target(sdev); + rphy = dev_to_rphy(starget->dev.parent); + if (rphy->identify.sas_address == sas_address) { + sas_info->os.id = starget->id; + sas_info->os.channel = starget->channel; + } + } + + out: + mutex_unlock(&ioc->sas_device_info_mutex); + return; +} + +/** + * mptsas_add_device_component_by_fw - + * @ioc: Pointer to MPT_ADAPTER structure + * @channel: fw mapped id's + * @id: + * + **/ +static void +mptsas_add_device_component_by_fw(MPT_ADAPTER *ioc, u8 channel, u8 id) +{ + struct mptsas_devinfo sas_device; + struct mptsas_enclosure enclosure_info; + int rc; + + rc = mptsas_sas_device_pg0(ioc, &sas_device, + (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID << + MPI_SAS_DEVICE_PGAD_FORM_SHIFT), + (channel << 8) + id); + if (rc) + return; + + memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure)); + mptsas_sas_enclosure_pg0(ioc, &enclosure_info, + (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE << + MPI_SAS_ENCLOS_PGAD_FORM_SHIFT), + sas_device.handle_enclosure); + + mptsas_add_device_component(ioc, sas_device.channel, + sas_device.id, sas_device.sas_address, sas_device.device_info, + sas_device.slot, enclosure_info.enclosure_logical_id); +} + +/** + * mptsas_add_device_component_starget - + * @ioc: Pointer to MPT_ADAPTER structure + * @starget: + * + **/ +static void +mptsas_add_device_component_starget(MPT_ADAPTER *ioc, + struct scsi_target *starget) +{ + VirtTarget *vtarget; + struct sas_rphy *rphy; + struct mptsas_phyinfo *phy_info = NULL; + struct mptsas_enclosure enclosure_info; + + rphy = dev_to_rphy(starget->dev.parent); + vtarget = starget->hostdata; + phy_info = mptsas_find_phyinfo_by_sas_address(ioc, + rphy->identify.sas_address); + if (!phy_info) + return; + + memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure)); + mptsas_sas_enclosure_pg0(ioc, &enclosure_info, + (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE << + MPI_SAS_ENCLOS_PGAD_FORM_SHIFT), + phy_info->attached.handle_enclosure); + + mptsas_add_device_component(ioc, phy_info->attached.channel, + phy_info->attached.id, phy_info->attached.sas_address, + phy_info->attached.device_info, + phy_info->attached.slot, enclosure_info.enclosure_logical_id); +} + +/** + * mptsas_del_device_components - Cleaning the list + * @ioc: Pointer to MPT_ADAPTER structure + * + **/ +static void +mptsas_del_device_components(MPT_ADAPTER *ioc) +{ + struct mptsas_device_info *sas_info, *next; + + mutex_lock(&ioc->sas_device_info_mutex); + list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list, + list) { + list_del(&sas_info->list); + kfree(sas_info); + } + mutex_unlock(&ioc->sas_device_info_mutex); +} + /* * mptsas_setup_wide_ports @@ -535,6 +798,29 @@ mptsas_find_vtarget(MPT_ADAPTER *ioc, u8 channel, u8 id) return vtarget; } +static void +mptsas_queue_device_delete(MPT_ADAPTER *ioc, + MpiEventDataSasDeviceStatusChange_t *sas_event_data) +{ + struct fw_event_work *fw_event; + int sz; + + sz = offsetof(struct fw_event_work, event_data) + + sizeof(MpiEventDataSasDeviceStatusChange_t); + fw_event = kzalloc(sz, GFP_ATOMIC); + if (!fw_event) { + printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n", + ioc->name, __func__, __LINE__); + return; + } + memcpy(fw_event->event_data, sas_event_data, + sizeof(MpiEventDataSasDeviceStatusChange_t)); + fw_event->event = MPI_EVENT_SAS_DEVICE_STATUS_CHANGE; + fw_event->ioc = ioc; + mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1)); +} + + /** * mptsas_target_reset * @@ -654,10 +940,8 @@ mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) { MPT_SCSI_HOST *hd = shost_priv(ioc->sh); struct list_head *head = &hd->target_reset_list; - struct mptsas_hotplug_event *ev; EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data; u8 id, channel; - __le64 sas_address; struct mptsas_target_reset_event *target_reset_list; SCSITaskMgmtReply_t *pScsiTmReply; @@ -729,41 +1013,9 @@ mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) * enable work queue to remove device from upper layers */ list_del(&target_reset_list->list); - - ev = kzalloc(sizeof(*ev), GFP_ATOMIC); - if (!ev) { - dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n", - ioc->name,__func__, __LINE__)); - goto out_fail; - } - - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n", - ioc->name, mf)); - - INIT_WORK(&ev->work, mptsas_hotplug_work); - ev->ioc = ioc; - ev->handle = le16_to_cpu(sas_event_data->DevHandle); - ev->parent_handle = - le16_to_cpu(sas_event_data->ParentDevHandle); - ev->channel = channel; - ev->id =id; - ev->phy_id = sas_event_data->PhyNum; - memcpy(&sas_address, &sas_event_data->SASAddress, - sizeof(__le64)); - ev->sas_address = le64_to_cpu(sas_address); - ev->device_info = le32_to_cpu(sas_event_data->DeviceInfo); - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "TaskMgmt type=%d (sas device delete) fw_channel = %d fw_id = %d)\n", - ioc->name, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, channel, id)); - - ev->event_type = MPTSAS_DEL_DEVICE; - schedule_work(&ev->work); - kfree(target_reset_list); - - out_fail: - - mpt_clear_taskmgmt_in_progress_flag(ioc); - return 0; + if ((mptsas_find_vtarget(ioc, channel, id)) && !ioc->fw_events_off) + mptsas_queue_device_delete(ioc, + &target_reset_list->sas_event_data); /* @@ -798,37 +1050,58 @@ static int mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) { MPT_SCSI_HOST *hd; - struct mptsas_target_reset_event *target_reset_list, *n; int rc; rc = mptscsih_ioc_reset(ioc, reset_phase); + if ((ioc->bus_type != SAS) || (!rc)) + return rc; - if (ioc->bus_type != SAS) - goto out; - - if (reset_phase != MPT_IOC_POST_RESET) - goto out; - - if (!ioc->sh || !ioc->sh->hostdata) - goto out; hd = shost_priv(ioc->sh); if (!hd->ioc) goto out; - if (list_empty(&hd->target_reset_list)) - goto out; - - /* flush the target_reset_list */ - list_for_each_entry_safe(target_reset_list, n, - &hd->target_reset_list, list) { - list_del(&target_reset_list->list); - kfree(target_reset_list); + switch (reset_phase) { + case MPT_IOC_SETUP_RESET: + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__)); + mptsas_fw_event_off(ioc); + break; + case MPT_IOC_PRE_RESET: + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__)); + break; + case MPT_IOC_POST_RESET: + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__)); + if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) { + ioc->sas_mgmt.status |= MPT_MGMT_STATUS_DID_IOCRESET; + complete(&ioc->sas_mgmt.done); + } + mptsas_cleanup_fw_event_q(ioc); + mptsas_fw_event_on(ioc); + break; + default: + break; } out: return rc; } + +/** + * enum device_state - + * @DEVICE_RETRY: need to retry the TUR + * @DEVICE_ERROR: TUR return error, don't add device + * @DEVICE_READY: device can be added + * + */ +enum device_state{ + DEVICE_RETRY, + DEVICE_ERROR, + DEVICE_READY, +}; + static int mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure, u32 form, u32 form_specific) @@ -894,15 +1167,268 @@ mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure, return error; } +/** + * mptsas_add_end_device - report a new end device to sas transport layer + * @ioc: Pointer to MPT_ADAPTER structure + * @phy_info: decribes attached device + * + * return (0) success (1) failure + * + **/ +static int +mptsas_add_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info) +{ + struct sas_rphy *rphy; + struct sas_port *port; + struct sas_identify identify; + char *ds = NULL; + u8 fw_id; + + if (!phy_info) { + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT + "%s: exit at line=%d\n", ioc->name, + __func__, __LINE__)); + return 1; + } + + fw_id = phy_info->attached.id; + + if (mptsas_get_rphy(phy_info)) { + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT + "%s: fw_id=%d exit at line=%d\n", ioc->name, + __func__, fw_id, __LINE__)); + return 2; + } + + port = mptsas_get_port(phy_info); + if (!port) { + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT + "%s: fw_id=%d exit at line=%d\n", ioc->name, + __func__, fw_id, __LINE__)); + return 3; + } + + if (phy_info->attached.device_info & + MPI_SAS_DEVICE_INFO_SSP_TARGET) + ds = "ssp"; + if (phy_info->attached.device_info & + MPI_SAS_DEVICE_INFO_STP_TARGET) + ds = "stp"; + if (phy_info->attached.device_info & + MPI_SAS_DEVICE_INFO_SATA_DEVICE) + ds = "sata"; + + printk(MYIOC_s_INFO_FMT "attaching %s device: fw_channel %d, fw_id %d," + " phy %d, sas_addr 0x%llx\n", ioc->name, ds, + phy_info->attached.channel, phy_info->attached.id, + phy_info->attached.phy_id, (unsigned long long) + phy_info->attached.sas_address); + + mptsas_parse_device_info(&identify, &phy_info->attached); + rphy = sas_end_device_alloc(port); + if (!rphy) { + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT + "%s: fw_id=%d exit at line=%d\n", ioc->name, + __func__, fw_id, __LINE__)); + return 5; /* non-fatal: an rphy can be added later */ + } + + rphy->identify = identify; + if (sas_rphy_add(rphy)) { + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT + "%s: fw_id=%d exit at line=%d\n", ioc->name, + __func__, fw_id, __LINE__)); + sas_rphy_free(rphy); + return 6; + } + mptsas_set_rphy(ioc, phy_info, rphy); + return 0; +} + +/** + * mptsas_del_end_device - report a deleted end device to sas transport + * layer + * @ioc: Pointer to MPT_ADAPTER structure + * @phy_info: decribes attached device + * + **/ +static void +mptsas_del_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info) +{ + struct sas_rphy *rphy; + struct sas_port *port; + struct mptsas_portinfo *port_info; + struct mptsas_phyinfo *phy_info_parent; + int i; + char *ds = NULL; + u8 fw_id; + u64 sas_address; + + if (!phy_info) + return; + + fw_id = phy_info->attached.id; + sas_address = phy_info->attached.sas_address; + + if (!phy_info->port_details) { + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT + "%s: fw_id=%d exit at line=%d\n", ioc->name, + __func__, fw_id, __LINE__)); + return; + } + rphy = mptsas_get_rphy(phy_info); + if (!rphy) { + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT + "%s: fw_id=%d exit at line=%d\n", ioc->name, + __func__, fw_id, __LINE__)); + return; + } + + if (phy_info->attached.device_info & MPI_SAS_DEVICE_INFO_SSP_INITIATOR + || phy_info->attached.device_info + & MPI_SAS_DEVICE_INFO_SMP_INITIATOR + || phy_info->attached.device_info + & MPI_SAS_DEVICE_INFO_STP_INITIATOR) + ds = "initiator"; + if (phy_info->attached.device_info & + MPI_SAS_DEVICE_INFO_SSP_TARGET) + ds = "ssp"; + if (phy_info->attached.device_info & + MPI_SAS_DEVICE_INFO_STP_TARGET) + ds = "stp"; + if (phy_info->attached.device_info & + MPI_SAS_DEVICE_INFO_SATA_DEVICE) + ds = "sata"; + + dev_printk(KERN_DEBUG, &rphy->dev, MYIOC_s_FMT + "removing %s device: fw_channel %d, fw_id %d, phy %d," + "sas_addr 0x%llx\n", ioc->name, ds, phy_info->attached.channel, + phy_info->attached.id, phy_info->attached.phy_id, + (unsigned long long) sas_address); + + port = mptsas_get_port(phy_info); + if (!port) { + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT + "%s: fw_id=%d exit at line=%d\n", ioc->name, + __func__, fw_id, __LINE__)); + return; + } + port_info = phy_info->portinfo; + phy_info_parent = port_info->phy_info; + for (i = 0; i < port_info->num_phys; i++, phy_info_parent++) { + if (!phy_info_parent->phy) + continue; + if (phy_info_parent->attached.sas_address != + sas_address) + continue; + dev_printk(KERN_DEBUG, &phy_info_parent->phy->dev, + MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n", + ioc->name, phy_info_parent->phy_id, + phy_info_parent->phy); + sas_port_delete_phy(port, phy_info_parent->phy); + } + + dev_printk(KERN_DEBUG, &port->dev, MYIOC_s_FMT + "delete port %d, sas_addr (0x%llx)\n", ioc->name, + port->port_identifier, (unsigned long long)sas_address); + sas_port_delete(port); + mptsas_set_port(ioc, phy_info, NULL); + mptsas_port_delete(ioc, phy_info->port_details); +} + +struct mptsas_phyinfo * +mptsas_refreshing_device_handles(MPT_ADAPTER *ioc, + struct mptsas_devinfo *sas_device) +{ + struct mptsas_phyinfo *phy_info; + struct mptsas_portinfo *port_info; + int i; + + phy_info = mptsas_find_phyinfo_by_sas_address(ioc, + sas_device->sas_address); + if (!phy_info) + goto out; + port_info = phy_info->portinfo; + if (!port_info) + goto out; + mutex_lock(&ioc->sas_topology_mutex); + for (i = 0; i < port_info->num_phys; i++) { + if (port_info->phy_info[i].attached.sas_address != + sas_device->sas_address) + continue; + port_info->phy_info[i].attached.channel = sas_device->channel; + port_info->phy_info[i].attached.id = sas_device->id; + port_info->phy_info[i].attached.sas_address = + sas_device->sas_address; + port_info->phy_info[i].attached.handle = sas_device->handle; + port_info->phy_info[i].attached.handle_parent = + sas_device->handle_parent; + port_info->phy_info[i].attached.handle_enclosure = + sas_device->handle_enclosure; + } + mutex_unlock(&ioc->sas_topology_mutex); + out: + return phy_info; +} + +/** + * mptsas_firmware_event_work - work thread for processing fw events + * @work: work queue payload containing info describing the event + * Context: user + * + */ +static void +mptsas_firmware_event_work(struct work_struct *work) +{ + struct fw_event_work *fw_event = + container_of(work, struct fw_event_work, work.work); + MPT_ADAPTER *ioc = fw_event->ioc; + + + /* events handling turned off during host reset */ + if (ioc->fw_events_off) { + mptsas_free_fw_event(ioc, fw_event); + return; + } + + devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: fw_event=(0x%p), " + "event = (0x%02x)\n", ioc->name, __func__, fw_event, + (fw_event->event & 0xFF))); + + switch (fw_event->event) { + case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: + mptsas_send_sas_event(fw_event); + break; + case MPI_EVENT_INTEGRATED_RAID: + mptsas_send_raid_event(fw_event); + break; + case MPI_EVENT_IR2: + mptsas_send_ir2_event(fw_event); + break; + case MPI_EVENT_PERSISTENT_TABLE_FULL: + mptbase_sas_persist_operation(ioc, + MPI_SAS_OP_CLEAR_NOT_PRESENT); + mptsas_free_fw_event(ioc, fw_event); + break; + } +} + + + static int mptsas_slave_configure(struct scsi_device *sdev) { + struct Scsi_Host *host = sdev->host; + MPT_SCSI_HOST *hd = shost_priv(host); + MPT_ADAPTER *ioc = hd->ioc; if (sdev->channel == MPTSAS_RAID_CHANNEL) goto out; sas_read_port_mode_page(sdev); + mptsas_add_device_component_starget(ioc, scsi_target(sdev)); + out: return mptscsih_slave_configure(sdev); } @@ -984,11 +1510,15 @@ mptsas_target_destroy(struct scsi_target *starget) struct sas_rphy *rphy; struct mptsas_portinfo *p; int i; - MPT_ADAPTER *ioc = hd->ioc; + MPT_ADAPTER *ioc = hd->ioc; + VirtTarget *vtarget; if (!starget->hostdata) return; + vtarget = starget->hostdata; + + if (starget->channel == MPTSAS_RAID_CHANNEL) goto out; @@ -998,12 +1528,21 @@ mptsas_target_destroy(struct scsi_target *starget) if (p->phy_info[i].attached.sas_address != rphy->identify.sas_address) continue; + + starget_printk(KERN_INFO, starget, MYIOC_s_FMT + "delete device: fw_channel %d, fw_id %d, phy %d, " + "sas_addr 0x%llx\n", ioc->name, + p->phy_info[i].attached.channel, + p->phy_info[i].attached.id, + p->phy_info[i].attached.phy_id, (unsigned long long) + p->phy_info[i].attached.sas_address); + mptsas_set_starget(&p->phy_info[i], NULL); - goto out; } } out: + vtarget->starget = NULL; kfree(starget->hostdata); starget->hostdata = NULL; } @@ -2471,6 +3010,7 @@ mptsas_discovery_work(struct work_struct *work) kfree(ev); } + static struct mptsas_phyinfo * mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address) { @@ -2495,30 +3035,6 @@ mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address) return phy_info; } -static struct mptsas_phyinfo * -mptsas_find_phyinfo_by_target(MPT_ADAPTER *ioc, u8 channel, u8 id) -{ - struct mptsas_portinfo *port_info; - struct mptsas_phyinfo *phy_info = NULL; - int i; - - mutex_lock(&ioc->sas_topology_mutex); - list_for_each_entry(port_info, &ioc->sas_topology, list) { - for (i = 0; i < port_info->num_phys; i++) { - if (!mptsas_is_end_device( - &port_info->phy_info[i].attached)) - continue; - if (port_info->phy_info[i].attached.id != id) - continue; - if (port_info->phy_info[i].attached.channel != channel) - continue; - phy_info = &port_info->phy_info[i]; - break; - } - } - mutex_unlock(&ioc->sas_topology_mutex); - return phy_info; -} static struct mptsas_phyinfo * mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 channel, u8 id) @@ -2547,17 +3063,6 @@ mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 channel, u8 id) return phy_info; } -/* - * Work queue thread to clear the persitency table - */ -static void -mptsas_persist_clear_table(struct work_struct *work) -{ - MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, sas_persist_task); - - mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT); -} - static void mptsas_reprobe_lun(struct scsi_device *sdev, void *data) { @@ -2583,7 +3088,8 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id) pRaidVolumePage0_t buffer = NULL; RaidPhysDiskPage0_t phys_disk; int i; - struct mptsas_hotplug_event *ev; + struct mptsas_phyinfo *phy_info; + struct mptsas_devinfo sas_device; memset(&cfg, 0 , sizeof(CONFIGPARMS)); memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); @@ -2623,20 +3129,16 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id) buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0) continue; - ev = kzalloc(sizeof(*ev), GFP_ATOMIC); - if (!ev) { - printk(MYIOC_s_WARN_FMT "mptsas: lost hotplug event\n", ioc->name); - goto out; - } + if (mptsas_sas_device_pg0(ioc, &sas_device, + (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID << + MPI_SAS_DEVICE_PGAD_FORM_SHIFT), + (phys_disk.PhysDiskBus << 8) + + phys_disk.PhysDiskID)) + continue; - INIT_WORK(&ev->work, mptsas_hotplug_work); - ev->ioc = ioc; - ev->id = phys_disk.PhysDiskID; - ev->channel = phys_disk.PhysDiskBus; - ev->phys_disk_num_valid = 1; - ev->phys_disk_num = phys_disk.PhysDiskNum; - ev->event_type = MPTSAS_ADD_DEVICE; - schedule_work(&ev->work); + phy_info = mptsas_find_phyinfo_by_sas_address(ioc, + sas_device.sas_address); + mptsas_add_end_device(ioc, phy_info); } out: @@ -2648,417 +3150,385 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id) * Work queue thread to handle SAS hotplug events */ static void -mptsas_hotplug_work(struct work_struct *work) +mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event, + struct mptsas_hotplug_event *hot_plug_info) { - struct mptsas_hotplug_event *ev = - container_of(work, struct mptsas_hotplug_event, work); - - MPT_ADAPTER *ioc = ev->ioc; struct mptsas_phyinfo *phy_info; - struct sas_rphy *rphy; - struct sas_port *port; - struct scsi_device *sdev; struct scsi_target * starget; - struct sas_identify identify; - char *ds = NULL; struct mptsas_devinfo sas_device; VirtTarget *vtarget; - VirtDevice *vdevice; + int i; - mutex_lock(&ioc->sas_discovery_mutex); - switch (ev->event_type) { - case MPTSAS_DEL_DEVICE: + switch (hot_plug_info->event_type) { - phy_info = NULL; - if (ev->phys_disk_num_valid) { - if (ev->hidden_raid_component){ - if (mptsas_sas_device_pg0(ioc, &sas_device, - (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID << - MPI_SAS_DEVICE_PGAD_FORM_SHIFT), - (ev->channel << 8) + ev->id)) { - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: exit at line=%d\n", ioc->name, - __func__, __LINE__)); - break; - } - phy_info = mptsas_find_phyinfo_by_sas_address( - ioc, sas_device.sas_address); - }else - phy_info = mptsas_find_phyinfo_by_phys_disk_num( - ioc, ev->channel, ev->phys_disk_num); + case MPTSAS_ADD_PHYSDISK: + + if (!ioc->raid_data.pIocPg2) + break; + + for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) { + if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID == + hot_plug_info->id) { + printk(MYIOC_s_WARN_FMT "firmware bug: unable " + "to add hidden disk - target_id matchs " + "volume_id\n", ioc->name); + mptsas_free_fw_event(ioc, fw_event); + return; + } } + mpt_findImVolumes(ioc); + case MPTSAS_ADD_DEVICE: + memset(&sas_device, 0, sizeof(struct mptsas_devinfo)); + mptsas_sas_device_pg0(ioc, &sas_device, + (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID << + MPI_SAS_DEVICE_PGAD_FORM_SHIFT), + (hot_plug_info->channel << 8) + + hot_plug_info->id); + + if (!sas_device.handle) + return; + + phy_info = mptsas_refreshing_device_handles(ioc, &sas_device); if (!phy_info) - phy_info = mptsas_find_phyinfo_by_target(ioc, - ev->channel, ev->id); + break; - /* - * Sanity checks, for non-existing phys and remote rphys. - */ - if (!phy_info){ + if (mptsas_get_rphy(phy_info)) + break; + + mptsas_add_end_device(ioc, phy_info); + break; + + case MPTSAS_DEL_DEVICE: + phy_info = mptsas_find_phyinfo_by_sas_address(ioc, + hot_plug_info->sas_address); + mptsas_del_end_device(ioc, phy_info); + break; + + case MPTSAS_DEL_PHYSDISK: + + mpt_findImVolumes(ioc); + + phy_info = mptsas_find_phyinfo_by_phys_disk_num( + ioc, hot_plug_info->channel, + hot_plug_info->phys_disk_num); + mptsas_del_end_device(ioc, phy_info); + break; + + case MPTSAS_ADD_PHYSDISK_REPROBE: + + if (mptsas_sas_device_pg0(ioc, &sas_device, + (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID << + MPI_SAS_DEVICE_PGAD_FORM_SHIFT), + (hot_plug_info->channel << 8) + hot_plug_info->id)) { dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: exit at line=%d\n", ioc->name, - __func__, __LINE__)); + "%s: fw_id=%d exit at line=%d\n", ioc->name, + __func__, hot_plug_info->id, __LINE__)); break; } - if (!phy_info->port_details) { + + phy_info = mptsas_find_phyinfo_by_sas_address( + ioc, sas_device.sas_address); + + if (!phy_info) { dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: exit at line=%d\n", ioc->name, - __func__, __LINE__)); + "%s: fw_id=%d exit at line=%d\n", ioc->name, + __func__, hot_plug_info->id, __LINE__)); break; } - rphy = mptsas_get_rphy(phy_info); - if (!rphy) { + + starget = mptsas_get_starget(phy_info); + if (!starget) { dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: exit at line=%d\n", ioc->name, - __func__, __LINE__)); + "%s: fw_id=%d exit at line=%d\n", ioc->name, + __func__, hot_plug_info->id, __LINE__)); break; } - port = mptsas_get_port(phy_info); - if (!port) { + vtarget = starget->hostdata; + if (!vtarget) { dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: exit at line=%d\n", ioc->name, - __func__, __LINE__)); + "%s: fw_id=%d exit at line=%d\n", ioc->name, + __func__, hot_plug_info->id, __LINE__)); break; } - starget = mptsas_get_starget(phy_info); - if (starget) { - vtarget = starget->hostdata; - - if (!vtarget) { - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: exit at line=%d\n", ioc->name, - __func__, __LINE__)); - break; - } + mpt_findImVolumes(ioc); - /* - * Handling RAID components - */ - if (ev->phys_disk_num_valid && - ev->hidden_raid_component) { - printk(MYIOC_s_INFO_FMT - "RAID Hidding: channel=%d, id=%d, " - "physdsk %d \n", ioc->name, ev->channel, - ev->id, ev->phys_disk_num); - vtarget->id = ev->phys_disk_num; - vtarget->tflags |= - MPT_TARGET_FLAGS_RAID_COMPONENT; - mptsas_reprobe_target(starget, 1); - phy_info->attached.phys_disk_num = - ev->phys_disk_num; - break; - } - } + starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Hidding: " + "fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n", + ioc->name, hot_plug_info->channel, hot_plug_info->id, + hot_plug_info->phys_disk_num, (unsigned long long) + sas_device.sas_address); - if (phy_info->attached.device_info & - MPI_SAS_DEVICE_INFO_SSP_TARGET) - ds = "ssp"; - if (phy_info->attached.device_info & - MPI_SAS_DEVICE_INFO_STP_TARGET) - ds = "stp"; - if (phy_info->attached.device_info & - MPI_SAS_DEVICE_INFO_SATA_DEVICE) - ds = "sata"; - - printk(MYIOC_s_INFO_FMT - "removing %s device, channel %d, id %d, phy %d\n", - ioc->name, ds, ev->channel, ev->id, phy_info->phy_id); - dev_printk(KERN_DEBUG, &port->dev, MYIOC_s_FMT - "delete port (%d)\n", ioc->name, port->port_identifier); - sas_port_delete(port); - mptsas_port_delete(ioc, phy_info->port_details); + vtarget->id = hot_plug_info->phys_disk_num; + vtarget->tflags |= MPT_TARGET_FLAGS_RAID_COMPONENT; + phy_info->attached.phys_disk_num = hot_plug_info->phys_disk_num; + mptsas_reprobe_target(starget, 1); break; - case MPTSAS_ADD_DEVICE: - if (ev->phys_disk_num_valid) - mpt_findImVolumes(ioc); + case MPTSAS_DEL_PHYSDISK_REPROBE: - /* - * Refresh sas device pg0 data - */ if (mptsas_sas_device_pg0(ioc, &sas_device, (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID << MPI_SAS_DEVICE_PGAD_FORM_SHIFT), - (ev->channel << 8) + ev->id)) { + (hot_plug_info->channel << 8) + hot_plug_info->id)) { dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: exit at line=%d\n", ioc->name, - __func__, __LINE__)); + "%s: fw_id=%d exit at line=%d\n", + ioc->name, __func__, + hot_plug_info->id, __LINE__)); break; } - __mptsas_discovery_work(ioc); - phy_info = mptsas_find_phyinfo_by_sas_address(ioc, sas_device.sas_address); - - if (!phy_info || !phy_info->port_details) { + if (!phy_info) { dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: exit at line=%d\n", ioc->name, - __func__, __LINE__)); + "%s: fw_id=%d exit at line=%d\n", ioc->name, + __func__, hot_plug_info->id, __LINE__)); break; } starget = mptsas_get_starget(phy_info); - if (starget && (!ev->hidden_raid_component)){ - - vtarget = starget->hostdata; - - if (!vtarget) { - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: exit at line=%d\n", ioc->name, - __func__, __LINE__)); - break; - } - /* - * Handling RAID components - */ - if (vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) { - printk(MYIOC_s_INFO_FMT - "RAID Exposing: channel=%d, id=%d, " - "physdsk %d \n", ioc->name, ev->channel, - ev->id, ev->phys_disk_num); - vtarget->tflags &= - ~MPT_TARGET_FLAGS_RAID_COMPONENT; - vtarget->id = ev->id; - mptsas_reprobe_target(starget, 0); - phy_info->attached.phys_disk_num = ~0; - } + if (!starget) { + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT + "%s: fw_id=%d exit at line=%d\n", ioc->name, + __func__, hot_plug_info->id, __LINE__)); break; } - if (mptsas_get_rphy(phy_info)) { + vtarget = starget->hostdata; + if (!vtarget) { dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: exit at line=%d\n", ioc->name, - __func__, __LINE__)); - if (ev->channel) printk("%d\n", __LINE__); + "%s: fw_id=%d exit at line=%d\n", ioc->name, + __func__, hot_plug_info->id, __LINE__)); break; } - port = mptsas_get_port(phy_info); - if (!port) { + if (!(vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)) { dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: exit at line=%d\n", ioc->name, - __func__, __LINE__)); + "%s: fw_id=%d exit at line=%d\n", ioc->name, + __func__, hot_plug_info->id, __LINE__)); break; } - memcpy(&phy_info->attached, &sas_device, - sizeof(struct mptsas_devinfo)); - - if (phy_info->attached.device_info & - MPI_SAS_DEVICE_INFO_SSP_TARGET) - ds = "ssp"; - if (phy_info->attached.device_info & - MPI_SAS_DEVICE_INFO_STP_TARGET) - ds = "stp"; - if (phy_info->attached.device_info & - MPI_SAS_DEVICE_INFO_SATA_DEVICE) - ds = "sata"; - - printk(MYIOC_s_INFO_FMT - "attaching %s device, channel %d, id %d, phy %d\n", - ioc->name, ds, ev->channel, ev->id, ev->phy_id); - mptsas_parse_device_info(&identify, &phy_info->attached); - rphy = sas_end_device_alloc(port); - if (!rphy) { - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: exit at line=%d\n", ioc->name, - __func__, __LINE__)); - break; /* non-fatal: an rphy can be added later */ - } + mpt_findImVolumes(ioc); - rphy->identify = identify; - if (sas_rphy_add(rphy)) { - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: exit at line=%d\n", ioc->name, - __func__, __LINE__)); - sas_rphy_free(rphy); - break; - } - mptsas_set_rphy(ioc, phy_info, rphy); + starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Exposing:" + " fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n", + ioc->name, hot_plug_info->channel, hot_plug_info->id, + hot_plug_info->phys_disk_num, (unsigned long long) + sas_device.sas_address); + + vtarget->tflags &= ~MPT_TARGET_FLAGS_RAID_COMPONENT; + vtarget->id = hot_plug_info->id; + phy_info->attached.phys_disk_num = ~0; + mptsas_reprobe_target(starget, 0); + mptsas_add_device_component_by_fw(ioc, + hot_plug_info->channel, hot_plug_info->id); break; + case MPTSAS_ADD_RAID: - sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, - ev->id, 0); - if (sdev) { - scsi_device_put(sdev); - break; - } - printk(MYIOC_s_INFO_FMT - "attaching raid volume, channel %d, id %d\n", - ioc->name, MPTSAS_RAID_CHANNEL, ev->id); - scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, ev->id, 0); + mpt_findImVolumes(ioc); + printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, " + "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL, + hot_plug_info->id); + scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, + hot_plug_info->id, 0); break; + case MPTSAS_DEL_RAID: - sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, - ev->id, 0); - if (!sdev) - break; - printk(MYIOC_s_INFO_FMT - "removing raid volume, channel %d, id %d\n", - ioc->name, MPTSAS_RAID_CHANNEL, ev->id); - vdevice = sdev->hostdata; - scsi_remove_device(sdev); - scsi_device_put(sdev); + mpt_findImVolumes(ioc); + printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, " + "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL, + hot_plug_info->id); + scsi_remove_device(hot_plug_info->sdev); + scsi_device_put(hot_plug_info->sdev); break; + case MPTSAS_ADD_INACTIVE_VOLUME: + + mpt_findImVolumes(ioc); mptsas_adding_inactive_raid_components(ioc, - ev->channel, ev->id); + hot_plug_info->channel, hot_plug_info->id); break; - case MPTSAS_IGNORE_EVENT: + default: break; } - mutex_unlock(&ioc->sas_discovery_mutex); - kfree(ev); + mptsas_free_fw_event(ioc, fw_event); } static void -mptsas_send_sas_event(MPT_ADAPTER *ioc, - EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data) +mptsas_send_sas_event(struct fw_event_work *fw_event) { - struct mptsas_hotplug_event *ev; - u32 device_info = le32_to_cpu(sas_event_data->DeviceInfo); - __le64 sas_address; + MPT_ADAPTER *ioc; + struct mptsas_hotplug_event hot_plug_info; + EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data; + u32 device_info; + u64 sas_address; + + ioc = fw_event->ioc; + sas_event_data = (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *) + fw_event->event_data; + device_info = le32_to_cpu(sas_event_data->DeviceInfo); if ((device_info & - (MPI_SAS_DEVICE_INFO_SSP_TARGET | - MPI_SAS_DEVICE_INFO_STP_TARGET | - MPI_SAS_DEVICE_INFO_SATA_DEVICE )) == 0) + (MPI_SAS_DEVICE_INFO_SSP_TARGET | + MPI_SAS_DEVICE_INFO_STP_TARGET | + MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0) { + mptsas_free_fw_event(ioc, fw_event); + return; + } + + if (sas_event_data->ReasonCode == + MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED) { + mptbase_sas_persist_operation(ioc, + MPI_SAS_OP_CLEAR_NOT_PRESENT); + mptsas_free_fw_event(ioc, fw_event); return; + } switch (sas_event_data->ReasonCode) { case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING: - - mptsas_target_reset_queue(ioc, sas_event_data); - break; - case MPI_EVENT_SAS_DEV_STAT_RC_ADDED: - ev = kzalloc(sizeof(*ev), GFP_ATOMIC); - if (!ev) { - printk(MYIOC_s_WARN_FMT "lost hotplug event\n", ioc->name); - break; - } - - INIT_WORK(&ev->work, mptsas_hotplug_work); - ev->ioc = ioc; - ev->handle = le16_to_cpu(sas_event_data->DevHandle); - ev->parent_handle = - le16_to_cpu(sas_event_data->ParentDevHandle); - ev->channel = sas_event_data->Bus; - ev->id = sas_event_data->TargetID; - ev->phy_id = sas_event_data->PhyNum; + memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event)); + hot_plug_info.handle = le16_to_cpu(sas_event_data->DevHandle); + hot_plug_info.channel = sas_event_data->Bus; + hot_plug_info.id = sas_event_data->TargetID; + hot_plug_info.phy_id = sas_event_data->PhyNum; memcpy(&sas_address, &sas_event_data->SASAddress, - sizeof(__le64)); - ev->sas_address = le64_to_cpu(sas_address); - ev->device_info = device_info; - + sizeof(u64)); + hot_plug_info.sas_address = le64_to_cpu(sas_address); + hot_plug_info.device_info = device_info; if (sas_event_data->ReasonCode & MPI_EVENT_SAS_DEV_STAT_RC_ADDED) - ev->event_type = MPTSAS_ADD_DEVICE; + hot_plug_info.event_type = MPTSAS_ADD_DEVICE; else - ev->event_type = MPTSAS_DEL_DEVICE; - schedule_work(&ev->work); + hot_plug_info.event_type = MPTSAS_DEL_DEVICE; + mptsas_hotplug_work(ioc, fw_event, &hot_plug_info); break; + case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED: - /* - * Persistent table is full. - */ - INIT_WORK(&ioc->sas_persist_task, - mptsas_persist_clear_table); - schedule_work(&ioc->sas_persist_task); + mptbase_sas_persist_operation(ioc, + MPI_SAS_OP_CLEAR_NOT_PRESENT); + mptsas_free_fw_event(ioc, fw_event); break; - /* - * TODO, handle other events - */ + case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA: - case MPI_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: + /* TODO */ case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: - case MPI_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: - case MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL: - case MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: - case MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL: + /* TODO */ default: + mptsas_free_fw_event(ioc, fw_event); break; } } + static void -mptsas_send_raid_event(MPT_ADAPTER *ioc, - EVENT_DATA_RAID *raid_event_data) +mptsas_send_raid_event(struct fw_event_work *fw_event) { - struct mptsas_hotplug_event *ev; - int status = le32_to_cpu(raid_event_data->SettingsStatus); - int state = (status >> 8) & 0xff; - - if (ioc->bus_type != SAS) - return; - - ev = kzalloc(sizeof(*ev), GFP_ATOMIC); - if (!ev) { - printk(MYIOC_s_WARN_FMT "lost hotplug event\n", ioc->name); - return; + MPT_ADAPTER *ioc; + EVENT_DATA_RAID *raid_event_data; + struct mptsas_hotplug_event hot_plug_info; + int status; + int state; + struct scsi_device *sdev = NULL; + VirtDevice *vdevice = NULL; + RaidPhysDiskPage0_t phys_disk; + + ioc = fw_event->ioc; + raid_event_data = (EVENT_DATA_RAID *)fw_event->event_data; + status = le32_to_cpu(raid_event_data->SettingsStatus); + state = (status >> 8) & 0xff; + + memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event)); + hot_plug_info.id = raid_event_data->VolumeID; + hot_plug_info.channel = raid_event_data->VolumeBus; + hot_plug_info.phys_disk_num = raid_event_data->PhysDiskNum; + + if (raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_DELETED || + raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_CREATED || + raid_event_data->ReasonCode == + MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED) { + sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, + hot_plug_info.id, 0); + hot_plug_info.sdev = sdev; + if (sdev) + vdevice = sdev->hostdata; } - INIT_WORK(&ev->work, mptsas_hotplug_work); - ev->ioc = ioc; - ev->id = raid_event_data->VolumeID; - ev->channel = raid_event_data->VolumeBus; - ev->event_type = MPTSAS_IGNORE_EVENT; + devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: " + "ReasonCode=%02x\n", ioc->name, __func__, + raid_event_data->ReasonCode)); switch (raid_event_data->ReasonCode) { case MPI_EVENT_RAID_RC_PHYSDISK_DELETED: - ev->phys_disk_num_valid = 1; - ev->phys_disk_num = raid_event_data->PhysDiskNum; - ev->event_type = MPTSAS_ADD_DEVICE; + hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK_REPROBE; break; case MPI_EVENT_RAID_RC_PHYSDISK_CREATED: - ev->phys_disk_num_valid = 1; - ev->phys_disk_num = raid_event_data->PhysDiskNum; - ev->hidden_raid_component = 1; - ev->event_type = MPTSAS_DEL_DEVICE; + hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK_REPROBE; break; case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED: switch (state) { case MPI_PD_STATE_ONLINE: case MPI_PD_STATE_NOT_COMPATIBLE: - ev->phys_disk_num_valid = 1; - ev->phys_disk_num = raid_event_data->PhysDiskNum; - ev->hidden_raid_component = 1; - ev->event_type = MPTSAS_ADD_DEVICE; + mpt_raid_phys_disk_pg0(ioc, + raid_event_data->PhysDiskNum, &phys_disk); + hot_plug_info.id = phys_disk.PhysDiskID; + hot_plug_info.channel = phys_disk.PhysDiskBus; + hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK; break; + case MPI_PD_STATE_FAILED: case MPI_PD_STATE_MISSING: case MPI_PD_STATE_OFFLINE_AT_HOST_REQUEST: case MPI_PD_STATE_FAILED_AT_HOST_REQUEST: case MPI_PD_STATE_OFFLINE_FOR_ANOTHER_REASON: - ev->phys_disk_num_valid = 1; - ev->phys_disk_num = raid_event_data->PhysDiskNum; - ev->event_type = MPTSAS_DEL_DEVICE; + hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK; break; default: break; } break; case MPI_EVENT_RAID_RC_VOLUME_DELETED: - ev->event_type = MPTSAS_DEL_RAID; + if (!sdev) + break; + vdevice->vtarget->deleted = 1; /* block IO */ + hot_plug_info.event_type = MPTSAS_DEL_RAID; break; case MPI_EVENT_RAID_RC_VOLUME_CREATED: - ev->event_type = MPTSAS_ADD_RAID; + if (sdev) { + scsi_device_put(sdev); + break; + } + hot_plug_info.event_type = MPTSAS_ADD_RAID; break; case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED: + if (!(status & MPI_RAIDVOL0_STATUS_FLAG_ENABLED)) { + if (!sdev) + break; + vdevice->vtarget->deleted = 1; /* block IO */ + hot_plug_info.event_type = MPTSAS_DEL_RAID; + break; + } switch (state) { case MPI_RAIDVOL0_STATUS_STATE_FAILED: case MPI_RAIDVOL0_STATUS_STATE_MISSING: - ev->event_type = MPTSAS_DEL_RAID; + if (!sdev) + break; + vdevice->vtarget->deleted = 1; /* block IO */ + hot_plug_info.event_type = MPTSAS_DEL_RAID; break; case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL: case MPI_RAIDVOL0_STATUS_STATE_DEGRADED: - ev->event_type = MPTSAS_ADD_RAID; + if (sdev) { + scsi_device_put(sdev); + break; + } + hot_plug_info.event_type = MPTSAS_ADD_RAID; break; default: break; @@ -3067,7 +3537,11 @@ mptsas_send_raid_event(MPT_ADAPTER *ioc, default: break; } - schedule_work(&ev->work); + + if (hot_plug_info.event_type != MPTSAS_IGNORE_EVENT) + mptsas_hotplug_work(ioc, fw_event, &hot_plug_info); + else + mptsas_free_fw_event(ioc, fw_event); } static void @@ -3106,76 +3580,88 @@ mptsas_send_discovery_event(MPT_ADAPTER *ioc, * */ static void -mptsas_send_ir2_event(MPT_ADAPTER *ioc, PTR_MPI_EVENT_DATA_IR2 ir2_data) +mptsas_send_ir2_event(struct fw_event_work *fw_event) { - struct mptsas_hotplug_event *ev; - - if (ir2_data->ReasonCode != - MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED) - return; - - ev = kzalloc(sizeof(*ev), GFP_ATOMIC); - if (!ev) + MPT_ADAPTER *ioc; + struct mptsas_hotplug_event hot_plug_info; + MPI_EVENT_DATA_IR2 *ir2_data; + u8 reasonCode; + + ioc = fw_event->ioc; + ir2_data = (MPI_EVENT_DATA_IR2 *)fw_event->event_data; + reasonCode = ir2_data->ReasonCode; + + devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: " + "ReasonCode=%02x\n", ioc->name, __func__, reasonCode)); + + memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event)); + hot_plug_info.id = ir2_data->TargetID; + hot_plug_info.channel = ir2_data->Bus; + switch (reasonCode) { + case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED: + hot_plug_info.event_type = MPTSAS_ADD_INACTIVE_VOLUME; + break; + default: + mptsas_free_fw_event(ioc, fw_event); return; - - INIT_WORK(&ev->work, mptsas_hotplug_work); - ev->ioc = ioc; - ev->id = ir2_data->TargetID; - ev->channel = ir2_data->Bus; - ev->event_type = MPTSAS_ADD_INACTIVE_VOLUME; - - schedule_work(&ev->work); -}; + } + mptsas_hotplug_work(ioc, fw_event, &hot_plug_info); +} static int mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply) { - int rc=1; - u8 event = le32_to_cpu(reply->Event) & 0xFF; - - if (!ioc->sh) - goto out; + u32 event = le32_to_cpu(reply->Event); + int sz, event_data_sz; + struct fw_event_work *fw_event; + unsigned long delay; - /* - * sas_discovery_ignore_events - * - * This flag is to prevent anymore processing of - * sas events once mptsas_remove function is called. - */ - if (ioc->sas_discovery_ignore_events) { - rc = mptscsih_event_process(ioc, reply); - goto out; - } + /* events turned off due to host reset or driver unloading */ + if (ioc->fw_events_off) + return 0; + delay = msecs_to_jiffies(1); switch (event) { case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: - mptsas_send_sas_event(ioc, - (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data); - break; - case MPI_EVENT_INTEGRATED_RAID: - mptsas_send_raid_event(ioc, - (EVENT_DATA_RAID *)reply->Data); - break; - case MPI_EVENT_PERSISTENT_TABLE_FULL: - INIT_WORK(&ioc->sas_persist_task, - mptsas_persist_clear_table); - schedule_work(&ioc->sas_persist_task); + { + EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data = + (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data; + + if (sas_event_data->ReasonCode == + MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING) { + mptsas_target_reset_queue(ioc, sas_event_data); + return 0; + } break; - case MPI_EVENT_SAS_DISCOVERY: + } + case MPI_EVENT_SAS_DISCOVERY: mptsas_send_discovery_event(ioc, (EVENT_DATA_SAS_DISCOVERY *)reply->Data); break; + case MPI_EVENT_INTEGRATED_RAID: + case MPI_EVENT_PERSISTENT_TABLE_FULL: case MPI_EVENT_IR2: - mptsas_send_ir2_event(ioc, - (PTR_MPI_EVENT_DATA_IR2)reply->Data); + case MPI_EVENT_SAS_PHY_LINK_STATUS: + case MPI_EVENT_QUEUE_FULL: break; default: - rc = mptscsih_event_process(ioc, reply); - break; + return 0; } - out: - return rc; + event_data_sz = ((reply->MsgLength * 4) - + offsetof(EventNotificationReply_t, Data)); + sz = offsetof(struct fw_event_work, event_data) + event_data_sz; + fw_event = kzalloc(sz, GFP_ATOMIC); + if (!fw_event) { + printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n", ioc->name, + __func__, __LINE__); + return 0; + } + memcpy(fw_event->event_data, reply->Data, event_data_sz); + fw_event->event = event; + fw_event->ioc = ioc; + mptsas_add_fw_event(ioc, fw_event, delay); + return 0; } static int @@ -3197,6 +3683,7 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id) return r; ioc = pci_get_drvdata(pdev); + mptsas_fw_event_off(ioc); ioc->DoneCtx = mptsasDoneCtx; ioc->TaskCtx = mptsasTaskCtx; ioc->InternalCtx = mptsasInternalCtx; @@ -3339,6 +3826,9 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id) hd->last_queue_full = 0; INIT_LIST_HEAD(&hd->target_reset_list); + INIT_LIST_HEAD(&ioc->sas_device_info_list); + mutex_init(&ioc->sas_device_info_mutex); + spin_unlock_irqrestore(&ioc->FreeQlock, flags); if (ioc->sas_data.ptClear==1) { @@ -3354,7 +3844,7 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id) } mptsas_scan_sas_topology(ioc); - + mptsas_fw_event_on(ioc); return 0; out_mptsas_probe: @@ -3368,7 +3858,8 @@ mptsas_shutdown(struct pci_dev *pdev) { MPT_ADAPTER *ioc = pci_get_drvdata(pdev); - ioc->sas_discovery_quiesce_io = 0; + mptsas_fw_event_off(ioc); + mptsas_cleanup_fw_event_q(ioc); } static void __devexit mptsas_remove(struct pci_dev *pdev) @@ -3379,6 +3870,8 @@ static void __devexit mptsas_remove(struct pci_dev *pdev) mptsas_shutdown(pdev); + mptsas_del_device_components(ioc); + ioc->sas_discovery_ignore_events = 1; sas_remove_host(ioc->sh); @@ -3387,6 +3880,7 @@ static void __devexit mptsas_remove(struct pci_dev *pdev) list_del(&p->list); for (i = 0 ; i < p->num_phys ; i++) mptsas_port_delete(ioc, p->phy_info[i].port_details); + kfree(p->phy_info); kfree(p); } diff --git a/drivers/message/fusion/mptsas.h b/drivers/message/fusion/mptsas.h index bf528a5b59b1..9e0885a86d23 100644 --- a/drivers/message/fusion/mptsas.h +++ b/drivers/message/fusion/mptsas.h @@ -61,12 +61,30 @@ enum mptsas_hotplug_action { MPTSAS_DEL_DEVICE, MPTSAS_ADD_RAID, MPTSAS_DEL_RAID, + MPTSAS_ADD_PHYSDISK, + MPTSAS_ADD_PHYSDISK_REPROBE, + MPTSAS_DEL_PHYSDISK, + MPTSAS_DEL_PHYSDISK_REPROBE, MPTSAS_ADD_INACTIVE_VOLUME, MPTSAS_IGNORE_EVENT, }; +struct mptsas_mapping{ + u8 id; + u8 channel; +}; + +struct mptsas_device_info { + struct list_head list; + struct mptsas_mapping os; /* operating system mapping*/ + struct mptsas_mapping fw; /* firmware mapping */ + u64 sas_address; + u32 device_info; /* specific bits for devices */ + u16 slot; /* enclosure slot id */ + u64 enclosure_logical_id; /*enclosure address */ +}; + struct mptsas_hotplug_event { - struct work_struct work; MPT_ADAPTER *ioc; enum mptsas_hotplug_action event_type; u64 sas_address; @@ -74,11 +92,18 @@ struct mptsas_hotplug_event { u8 id; u32 device_info; u16 handle; - u16 parent_handle; u8 phy_id; - u8 phys_disk_num_valid; /* hrc (hidden raid component) */ u8 phys_disk_num; /* hrc - unique index*/ - u8 hidden_raid_component; /* hrc - don't expose*/ + struct scsi_device *sdev; +}; + +struct fw_event_work { + struct list_head list; + struct delayed_work work; + MPT_ADAPTER *ioc; + u32 event; + u8 retries; + u8 event_data[1]; }; struct mptsas_discovery_event { -- cgit v1.2.3 From f9c34022eae9c76465dc2ec8805b9905e171ef40 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Fri, 29 May 2009 16:49:36 +0530 Subject: [SCSI] mpt fusion: SAS topology scan changes, expander events SAS topology scan is restructured. HBA firmware is generating more events. Expander Events are added, Link status events are also added with respect to SAS topology scan optimization. Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptbase.h | 5 + drivers/message/fusion/mptsas.c | 746 ++++++++++++++++++++++++++------------- 2 files changed, 515 insertions(+), 236 deletions(-) (limited to 'drivers') diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index b6efc64e8264..8d1aadb6b4ac 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -704,6 +704,11 @@ typedef struct _MPT_ADAPTER struct mutex sas_discovery_mutex; u8 sas_discovery_runtime; u8 sas_discovery_ignore_events; + + /* port_info object for the host */ + struct mptsas_portinfo *hba_port_info; + u64 hba_port_sas_addr; + u16 hba_port_num_phy; struct list_head sas_device_info_list; struct mutex sas_device_info_mutex; u8 sas_discovery_quiesce_io; diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 22a027ec9e5d..7f2f76f0db3f 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -113,6 +113,12 @@ static int mptsas_add_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info); static void mptsas_del_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info); +static void mptsas_send_link_status_event(struct fw_event_work *fw_event); +static struct mptsas_portinfo *mptsas_find_portinfo_by_sas_address + (MPT_ADAPTER *ioc, u64 sas_address); +static void mptsas_expander_delete(MPT_ADAPTER *ioc, + struct mptsas_portinfo *port_info, u8 force); +static void mptsas_send_expander_event(struct fw_event_work *fw_event); static void mptsas_print_phy_data(MPT_ADAPTER *ioc, MPI_SAS_IO_UNIT0_PHY_DATA *phy_data) @@ -342,20 +348,6 @@ static inline MPT_ADAPTER *rphy_to_ioc(struct sas_rphy *rphy) return ((MPT_SCSI_HOST *)shost->hostdata)->ioc; } -static struct mptsas_portinfo * -mptsas_get_hba_portinfo(MPT_ADAPTER *ioc) -{ - struct list_head *head = &ioc->sas_topology; - struct mptsas_portinfo *pi = NULL; - - /* always the first entry on sas_topology list */ - - if (!list_empty(head)) - pi = list_entry(head->next, struct mptsas_portinfo, list); - - return pi; -} - /* * mptsas_find_portinfo_by_handle * @@ -377,6 +369,38 @@ mptsas_find_portinfo_by_handle(MPT_ADAPTER *ioc, u16 handle) return rc; } +/** + * mptsas_find_portinfo_by_sas_address - + * @ioc: Pointer to MPT_ADAPTER structure + * @handle: + * + * This function should be called with the sas_topology_mutex already held + * + **/ +static struct mptsas_portinfo * +mptsas_find_portinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address) +{ + struct mptsas_portinfo *port_info, *rc = NULL; + int i; + + if (sas_address >= ioc->hba_port_sas_addr && + sas_address < (ioc->hba_port_sas_addr + + ioc->hba_port_num_phy)) + return ioc->hba_port_info; + + mutex_lock(&ioc->sas_topology_mutex); + list_for_each_entry(port_info, &ioc->sas_topology, list) + for (i = 0; i < port_info->num_phys; i++) + if (port_info->phy_info[i].identify.sas_address == + sas_address) { + rc = port_info; + goto out; + } + out: + mutex_unlock(&ioc->sas_topology_mutex); + return rc; +} + /* * Returns true if there is a scsi end device */ @@ -940,7 +964,6 @@ mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) { MPT_SCSI_HOST *hd = shost_priv(ioc->sh); struct list_head *head = &hd->target_reset_list; - EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data; u8 id, channel; struct mptsas_target_reset_event *target_reset_list; SCSITaskMgmtReply_t *pScsiTmReply; @@ -995,7 +1018,6 @@ mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) ioc->name, jiffies_to_msecs(jiffies - target_reset_list->time_count)/1000)); - sas_event_data = &target_reset_list->sas_event_data; id = pScsiTmReply->TargetID; channel = pScsiTmReply->Bus; target_reset_list->time_count = jiffies; @@ -1410,6 +1432,12 @@ mptsas_firmware_event_work(struct work_struct *work) MPI_SAS_OP_CLEAR_NOT_PRESENT); mptsas_free_fw_event(ioc, fw_event); break; + case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE: + mptsas_send_expander_event(fw_event); + break; + case MPI_EVENT_SAS_PHY_LINK_STATUS: + mptsas_send_link_status_event(fw_event); + break; } } @@ -1909,7 +1937,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, struct mptsas_portinfo *port_info; mutex_lock(&ioc->sas_topology_mutex); - port_info = mptsas_get_hba_portinfo(ioc); + port_info = ioc->hba_port_info; if (port_info && port_info->phy_info) sas_address = port_info->phy_info[0].phy->identify.sas_address; @@ -2646,9 +2674,7 @@ static int mptsas_probe_one_phy(struct device *dev, struct mptsas_portinfo *port_info; int i; - mutex_lock(&ioc->sas_topology_mutex); - port_info = mptsas_get_hba_portinfo(ioc); - mutex_unlock(&ioc->sas_topology_mutex); + port_info = ioc->hba_port_info; for (i = 0; i < port_info->num_phys; i++) if (port_info->phy_info[i].identify.sas_address == @@ -2707,7 +2733,7 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc) struct mptsas_portinfo *port_info, *hba; int error = -ENOMEM, i; - hba = kzalloc(sizeof(*port_info), GFP_KERNEL); + hba = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL); if (! hba) goto out; @@ -2717,9 +2743,10 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc) mptsas_sas_io_unit_pg1(ioc); mutex_lock(&ioc->sas_topology_mutex); - port_info = mptsas_get_hba_portinfo(ioc); + port_info = ioc->hba_port_info; if (!port_info) { - port_info = hba; + ioc->hba_port_info = port_info = hba; + ioc->hba_port_num_phy = port_info->num_phys; list_add_tail(&port_info->list, &ioc->sas_topology); } else { for (i = 0; i < hba->num_phys; i++) { @@ -2735,15 +2762,22 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc) hba = NULL; } mutex_unlock(&ioc->sas_topology_mutex); +#if defined(CPQ_CIM) + ioc->num_ports = port_info->num_phys; +#endif for (i = 0; i < port_info->num_phys; i++) { mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i], (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER << MPI_SAS_PHY_PGAD_FORM_SHIFT), i); - + port_info->phy_info[i].identify.handle = + port_info->phy_info[i].handle; mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify, (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << MPI_SAS_DEVICE_PGAD_FORM_SHIFT), - port_info->phy_info[i].handle); + port_info->phy_info[i].identify.handle); + if (!ioc->hba_port_sas_addr) + ioc->hba_port_sas_addr = + port_info->phy_info[i].identify.sas_address; port_info->phy_info[i].identify.phy_id = port_info->phy_info[i].phy_id = i; if (port_info->phy_info[i].attached.handle) @@ -2768,248 +2802,497 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc) return error; } -static int -mptsas_probe_expander_phys(MPT_ADAPTER *ioc, u32 *handle) +static void +mptsas_expander_refresh(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info) { - struct mptsas_portinfo *port_info, *p, *ex; - struct device *parent; - struct sas_rphy *rphy; - int error = -ENOMEM, i, j; - - ex = kzalloc(sizeof(*port_info), GFP_KERNEL); - if (!ex) - goto out; - - error = mptsas_sas_expander_pg0(ioc, ex, - (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE << - MPI_SAS_EXPAND_PGAD_FORM_SHIFT), *handle); - if (error) - goto out_free_port_info; - - *handle = ex->phy_info[0].handle; - - mutex_lock(&ioc->sas_topology_mutex); - port_info = mptsas_find_portinfo_by_handle(ioc, *handle); - if (!port_info) { - port_info = ex; - list_add_tail(&port_info->list, &ioc->sas_topology); - } else { - for (i = 0; i < ex->num_phys; i++) { - port_info->phy_info[i].handle = - ex->phy_info[i].handle; - port_info->phy_info[i].port_id = - ex->phy_info[i].port_id; - } - kfree(ex->phy_info); - kfree(ex); - ex = NULL; - } - mutex_unlock(&ioc->sas_topology_mutex); + struct mptsas_portinfo *parent; + struct device *parent_dev; + struct sas_rphy *rphy; + int i; + u64 sas_address; /* expander sas address */ + u32 handle; + handle = port_info->phy_info[0].handle; + sas_address = port_info->phy_info[0].identify.sas_address; for (i = 0; i < port_info->num_phys; i++) { mptsas_sas_expander_pg1(ioc, &port_info->phy_info[i], - (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM << - MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + *handle); + (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM << + MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + handle); - if (port_info->phy_info[i].identify.handle) { - mptsas_sas_device_pg0(ioc, - &port_info->phy_info[i].identify, - (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << - MPI_SAS_DEVICE_PGAD_FORM_SHIFT), - port_info->phy_info[i].identify.handle); - port_info->phy_info[i].identify.phy_id = - port_info->phy_info[i].phy_id; - } + mptsas_sas_device_pg0(ioc, + &port_info->phy_info[i].identify, + (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << + MPI_SAS_DEVICE_PGAD_FORM_SHIFT), + port_info->phy_info[i].identify.handle); + port_info->phy_info[i].identify.phy_id = + port_info->phy_info[i].phy_id; if (port_info->phy_info[i].attached.handle) { mptsas_sas_device_pg0(ioc, - &port_info->phy_info[i].attached, - (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << - MPI_SAS_DEVICE_PGAD_FORM_SHIFT), - port_info->phy_info[i].attached.handle); + &port_info->phy_info[i].attached, + (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << + MPI_SAS_DEVICE_PGAD_FORM_SHIFT), + port_info->phy_info[i].attached.handle); port_info->phy_info[i].attached.phy_id = port_info->phy_info[i].phy_id; } } - parent = &ioc->sh->shost_gendev; - for (i = 0; i < port_info->num_phys; i++) { - mutex_lock(&ioc->sas_topology_mutex); - list_for_each_entry(p, &ioc->sas_topology, list) { - for (j = 0; j < p->num_phys; j++) { - if (port_info->phy_info[i].identify.handle != - p->phy_info[j].attached.handle) - continue; - rphy = mptsas_get_rphy(&p->phy_info[j]); - parent = &rphy->dev; - } - } + mutex_lock(&ioc->sas_topology_mutex); + parent = mptsas_find_portinfo_by_handle(ioc, + port_info->phy_info[0].identify.handle_parent); + if (!parent) { mutex_unlock(&ioc->sas_topology_mutex); + return; + } + for (i = 0, parent_dev = NULL; i < parent->num_phys && !parent_dev; + i++) { + if (parent->phy_info[i].attached.sas_address == sas_address) { + rphy = mptsas_get_rphy(&parent->phy_info[i]); + parent_dev = &rphy->dev; + } } + mutex_unlock(&ioc->sas_topology_mutex); mptsas_setup_wide_ports(ioc, port_info); - for (i = 0; i < port_info->num_phys; i++, ioc->sas_index++) - mptsas_probe_one_phy(parent, &port_info->phy_info[i], + mptsas_probe_one_phy(parent_dev, &port_info->phy_info[i], ioc->sas_index, 0); +} - return 0; +static void +mptsas_expander_event_add(MPT_ADAPTER *ioc, + MpiEventDataSasExpanderStatusChange_t *expander_data) +{ + struct mptsas_portinfo *port_info; + int i; + __le64 sas_address; - out_free_port_info: - if (ex) { - kfree(ex->phy_info); - kfree(ex); + port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL); + if (!port_info) + BUG(); + port_info->num_phys = (expander_data->NumPhys) ? + expander_data->NumPhys : 1; + port_info->phy_info = kcalloc(port_info->num_phys, + sizeof(struct mptsas_phyinfo), GFP_KERNEL); + if (!port_info->phy_info) + BUG(); + memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64)); + for (i = 0; i < port_info->num_phys; i++) { + port_info->phy_info[i].portinfo = port_info; + port_info->phy_info[i].handle = + le16_to_cpu(expander_data->DevHandle); + port_info->phy_info[i].identify.sas_address = + le64_to_cpu(sas_address); + port_info->phy_info[i].identify.handle_parent = + le16_to_cpu(expander_data->ParentDevHandle); } - out: - return error; + + mutex_lock(&ioc->sas_topology_mutex); + list_add_tail(&port_info->list, &ioc->sas_topology); + mutex_unlock(&ioc->sas_topology_mutex); + + printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, " + "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys, + (unsigned long long)sas_address); + + mptsas_expander_refresh(ioc, port_info); } -/* - * mptsas_delete_expander_phys - * - * - * This will traverse topology, and remove expanders - * that are no longer present - */ +/** + * mptsas_delete_expander_siblings - remove siblings attached to expander + * @ioc: Pointer to MPT_ADAPTER structure + * @parent: the parent port_info object + * @expander: the expander port_info object + **/ static void -mptsas_delete_expander_phys(MPT_ADAPTER *ioc) +mptsas_delete_expander_siblings(MPT_ADAPTER *ioc, struct mptsas_portinfo + *parent, struct mptsas_portinfo *expander) { - struct mptsas_portinfo buffer; - struct mptsas_portinfo *port_info, *n, *parent; struct mptsas_phyinfo *phy_info; - struct sas_port * port; + struct mptsas_portinfo *port_info; + struct sas_rphy *rphy; int i; - u64 expander_sas_address; - mutex_lock(&ioc->sas_topology_mutex); - list_for_each_entry_safe(port_info, n, &ioc->sas_topology, list) { + phy_info = expander->phy_info; + for (i = 0; i < expander->num_phys; i++, phy_info++) { + rphy = mptsas_get_rphy(phy_info); + if (!rphy) + continue; + if (rphy->identify.device_type == SAS_END_DEVICE) + mptsas_del_end_device(ioc, phy_info); + } - if (!(port_info->phy_info[0].identify.device_info & - MPI_SAS_DEVICE_INFO_SMP_TARGET)) + phy_info = expander->phy_info; + for (i = 0; i < expander->num_phys; i++, phy_info++) { + rphy = mptsas_get_rphy(phy_info); + if (!rphy) continue; + if (rphy->identify.device_type == + MPI_SAS_DEVICE_INFO_EDGE_EXPANDER || + rphy->identify.device_type == + MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER) { + port_info = mptsas_find_portinfo_by_sas_address(ioc, + rphy->identify.sas_address); + if (!port_info) + continue; + if (port_info == parent) /* backlink rphy */ + continue; + /* + Delete this expander even if the expdevpage is exists + because the parent expander is already deleted + */ + mptsas_expander_delete(ioc, port_info, 1); + } + } +} - if (mptsas_sas_expander_pg0(ioc, &buffer, - (MPI_SAS_EXPAND_PGAD_FORM_HANDLE << - MPI_SAS_EXPAND_PGAD_FORM_SHIFT), - port_info->phy_info[0].handle)) { - /* - * Obtain the port_info instance to the parent port - */ - parent = mptsas_find_portinfo_by_handle(ioc, - port_info->phy_info[0].identify.handle_parent); +/** + * mptsas_expander_delete - remove this expander + * @ioc: Pointer to MPT_ADAPTER structure + * @port_info: expander port_info struct + * @force: Flag to forcefully delete the expander + * + **/ - if (!parent) - goto next_port; +static void mptsas_expander_delete(MPT_ADAPTER *ioc, + struct mptsas_portinfo *port_info, u8 force) +{ - expander_sas_address = - port_info->phy_info[0].identify.sas_address; + struct mptsas_portinfo *parent; + int i; + u64 expander_sas_address; + struct mptsas_phyinfo *phy_info; + struct mptsas_portinfo buffer; + struct mptsas_portinfo_details *port_details; + struct sas_port *port; - /* - * Delete rphys in the parent that point - * to this expander. The transport layer will - * cleanup all the children. - */ - phy_info = parent->phy_info; - for (i = 0; i < parent->num_phys; i++, phy_info++) { - port = mptsas_get_port(phy_info); - if (!port) - continue; - if (phy_info->attached.sas_address != - expander_sas_address) - continue; - dsaswideprintk(ioc, - dev_printk(KERN_DEBUG, &port->dev, - MYIOC_s_FMT "delete port (%d)\n", ioc->name, - port->port_identifier)); - sas_port_delete(port); - mptsas_port_delete(ioc, phy_info->port_details); - } - next_port: + if (!port_info) + return; - phy_info = port_info->phy_info; - for (i = 0; i < port_info->num_phys; i++, phy_info++) - mptsas_port_delete(ioc, phy_info->port_details); + /* see if expander is still there before deleting */ + mptsas_sas_expander_pg0(ioc, &buffer, + (MPI_SAS_EXPAND_PGAD_FORM_HANDLE << + MPI_SAS_EXPAND_PGAD_FORM_SHIFT), + port_info->phy_info[0].identify.handle); - list_del(&port_info->list); - kfree(port_info->phy_info); - kfree(port_info); - } - /* - * Free this memory allocated from inside - * mptsas_sas_expander_pg0 - */ + if (buffer.num_phys) { kfree(buffer.phy_info); + if (!force) + return; } - mutex_unlock(&ioc->sas_topology_mutex); + + + /* + * Obtain the port_info instance to the parent port + */ + port_details = NULL; + expander_sas_address = + port_info->phy_info[0].identify.sas_address; + parent = mptsas_find_portinfo_by_handle(ioc, + port_info->phy_info[0].identify.handle_parent); + mptsas_delete_expander_siblings(ioc, parent, port_info); + if (!parent) + goto out; + + /* + * Delete rphys in the parent that point + * to this expander. + */ + phy_info = parent->phy_info; + port = NULL; + for (i = 0; i < parent->num_phys; i++, phy_info++) { + if (!phy_info->phy) + continue; + if (phy_info->attached.sas_address != + expander_sas_address) + continue; + if (!port) { + port = mptsas_get_port(phy_info); + port_details = phy_info->port_details; + } + dev_printk(KERN_DEBUG, &phy_info->phy->dev, + MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n", ioc->name, + phy_info->phy_id, phy_info->phy); + sas_port_delete_phy(port, phy_info->phy); + } + if (port) { + dev_printk(KERN_DEBUG, &port->dev, + MYIOC_s_FMT "delete port %d, sas_addr (0x%llx)\n", + ioc->name, port->port_identifier, + (unsigned long long)expander_sas_address); + sas_port_delete(port); + mptsas_port_delete(ioc, port_details); + } + out: + + printk(MYIOC_s_INFO_FMT "delete expander: num_phys %d, " + "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys, + (unsigned long long)expander_sas_address); + + /* + * free link + */ + list_del(&port_info->list); + kfree(port_info->phy_info); + kfree(port_info); } -/* - * Start of day discovery + +/** + * mptsas_send_expander_event - expanders events + * @ioc: Pointer to MPT_ADAPTER structure + * @expander_data: event data + * + * + * This function handles adding, removing, and refreshing + * device handles within the expander objects. */ static void -mptsas_scan_sas_topology(MPT_ADAPTER *ioc) +mptsas_send_expander_event(struct fw_event_work *fw_event) { - u32 handle = 0xFFFF; + MPT_ADAPTER *ioc; + MpiEventDataSasExpanderStatusChange_t *expander_data; + struct mptsas_portinfo *port_info; + __le64 sas_address; int i; - mutex_lock(&ioc->sas_discovery_mutex); - mptsas_probe_hba_phys(ioc); - while (!mptsas_probe_expander_phys(ioc, &handle)) - ; - /* - Reporting RAID volumes. - */ - if (!ioc->ir_firmware) - goto out; - if (!ioc->raid_data.pIocPg2) - goto out; - if (!ioc->raid_data.pIocPg2->NumActiveVolumes) - goto out; - for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) { - scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, - ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0); + ioc = fw_event->ioc; + expander_data = (MpiEventDataSasExpanderStatusChange_t *) + fw_event->event_data; + memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64)); + port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address); + + if (expander_data->ReasonCode == MPI_EVENT_SAS_EXP_RC_ADDED) { + if (port_info) { + for (i = 0; i < port_info->num_phys; i++) { + port_info->phy_info[i].portinfo = port_info; + port_info->phy_info[i].handle = + le16_to_cpu(expander_data->DevHandle); + port_info->phy_info[i].identify.sas_address = + le64_to_cpu(sas_address); + port_info->phy_info[i].identify.handle_parent = + le16_to_cpu(expander_data->ParentDevHandle); + } + mptsas_expander_refresh(ioc, port_info); + } else if (!port_info && expander_data->NumPhys) + mptsas_expander_event_add(ioc, expander_data); + } else if (expander_data->ReasonCode == + MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING) + mptsas_expander_delete(ioc, port_info, 0); + + mptsas_free_fw_event(ioc, fw_event); +} + + +/** + * mptsas_expander_add - + * @ioc: Pointer to MPT_ADAPTER structure + * @handle: + * + */ +struct mptsas_portinfo * +mptsas_expander_add(MPT_ADAPTER *ioc, u16 handle) +{ + struct mptsas_portinfo buffer, *port_info; + int i; + + if ((mptsas_sas_expander_pg0(ioc, &buffer, + (MPI_SAS_EXPAND_PGAD_FORM_HANDLE << + MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle))) + return NULL; + + port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_ATOMIC); + if (!port_info) { + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT + "%s: exit at line=%d\n", ioc->name, + __func__, __LINE__)); + return NULL; + } + port_info->num_phys = buffer.num_phys; + port_info->phy_info = buffer.phy_info; + for (i = 0; i < port_info->num_phys; i++) + port_info->phy_info[i].portinfo = port_info; + mutex_lock(&ioc->sas_topology_mutex); + list_add_tail(&port_info->list, &ioc->sas_topology); + mutex_unlock(&ioc->sas_topology_mutex); + printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, " + "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys, + (unsigned long long)buffer.phy_info[0].identify.sas_address); + mptsas_expander_refresh(ioc, port_info); + return port_info; +} + +static void +mptsas_send_link_status_event(struct fw_event_work *fw_event) +{ + MPT_ADAPTER *ioc; + MpiEventDataSasPhyLinkStatus_t *link_data; + struct mptsas_portinfo *port_info; + struct mptsas_phyinfo *phy_info = NULL; + __le64 sas_address; + u8 phy_num; + u8 link_rate; + + ioc = fw_event->ioc; + link_data = (MpiEventDataSasPhyLinkStatus_t *)fw_event->event_data; + + memcpy(&sas_address, &link_data->SASAddress, sizeof(__le64)); + sas_address = le64_to_cpu(sas_address); + link_rate = link_data->LinkRates >> 4; + phy_num = link_data->PhyNum; + + port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address); + if (port_info) { + phy_info = &port_info->phy_info[phy_num]; + if (phy_info) + phy_info->negotiated_link_rate = link_rate; + } + + if (link_rate == MPI_SAS_IOUNIT0_RATE_1_5 || + link_rate == MPI_SAS_IOUNIT0_RATE_3_0) { + + if (!port_info) + goto out; + + if (port_info == ioc->hba_port_info) + mptsas_probe_hba_phys(ioc); + else + mptsas_expander_refresh(ioc, port_info); + } else if (phy_info && phy_info->phy) { + if (link_rate == MPI_SAS_IOUNIT0_RATE_PHY_DISABLED) + phy_info->phy->negotiated_linkrate = + SAS_PHY_DISABLED; + else if (link_rate == + MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION) + phy_info->phy->negotiated_linkrate = + SAS_LINK_RATE_FAILED; + else + phy_info->phy->negotiated_linkrate = + SAS_LINK_RATE_UNKNOWN; } out: - mutex_unlock(&ioc->sas_discovery_mutex); + mptsas_free_fw_event(ioc, fw_event); } -/* - * Work queue thread to handle Runtime discovery - * Mere purpose is the hot add/delete of expanders - *(Mutex UNLOCKED) - */ +/** + * mptsas_probe_expanders - adding expanders + * @ioc: Pointer to MPT_ADAPTER structure + * + **/ static void -__mptsas_discovery_work(MPT_ADAPTER *ioc) +mptsas_probe_expanders(MPT_ADAPTER *ioc) { - u32 handle = 0xFFFF; + struct mptsas_portinfo buffer, *port_info; + u32 handle; + int i; - ioc->sas_discovery_runtime=1; - mptsas_delete_expander_phys(ioc); - mptsas_probe_hba_phys(ioc); - while (!mptsas_probe_expander_phys(ioc, &handle)) - ; - ioc->sas_discovery_runtime=0; + handle = 0xFFFF; + while (!mptsas_sas_expander_pg0(ioc, &buffer, + (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE << + MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)) { + + handle = buffer.phy_info[0].handle; + port_info = mptsas_find_portinfo_by_sas_address(ioc, + buffer.phy_info[0].identify.sas_address); + + if (port_info) { + /* refreshing handles */ + for (i = 0; i < buffer.num_phys; i++) { + port_info->phy_info[i].handle = handle; + port_info->phy_info[i].identify.handle_parent = + buffer.phy_info[0].identify.handle_parent; + } + mptsas_expander_refresh(ioc, port_info); + kfree(buffer.phy_info); + continue; + } + + port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL); + if (!port_info) { + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT + "%s: exit at line=%d\n", ioc->name, + __func__, __LINE__)); + return; + } + port_info->num_phys = buffer.num_phys; + port_info->phy_info = buffer.phy_info; + for (i = 0; i < port_info->num_phys; i++) + port_info->phy_info[i].portinfo = port_info; + mutex_lock(&ioc->sas_topology_mutex); + list_add_tail(&port_info->list, &ioc->sas_topology); + mutex_unlock(&ioc->sas_topology_mutex); + printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, " + "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys, + (unsigned long long)buffer.phy_info[0].identify.sas_address); + mptsas_expander_refresh(ioc, port_info); + } +} + +static void +mptsas_probe_devices(MPT_ADAPTER *ioc) +{ + u16 handle; + struct mptsas_devinfo sas_device; + struct mptsas_phyinfo *phy_info; + + handle = 0xFFFF; + while (!(mptsas_sas_device_pg0(ioc, &sas_device, + MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, handle))) { + + handle = sas_device.handle; + + if ((sas_device.device_info & + (MPI_SAS_DEVICE_INFO_SSP_TARGET | + MPI_SAS_DEVICE_INFO_STP_TARGET | + MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0) + continue; + + phy_info = mptsas_refreshing_device_handles(ioc, &sas_device); + if (!phy_info) + continue; + + if (mptsas_get_rphy(phy_info)) + continue; + + mptsas_add_end_device(ioc, phy_info); + } } /* - * Work queue thread to handle Runtime discovery - * Mere purpose is the hot add/delete of expanders - *(Mutex LOCKED) + * Start of day discovery */ static void -mptsas_discovery_work(struct work_struct *work) +mptsas_scan_sas_topology(MPT_ADAPTER *ioc) { - struct mptsas_discovery_event *ev = - container_of(work, struct mptsas_discovery_event, work); - MPT_ADAPTER *ioc = ev->ioc; + struct scsi_device *sdev; + int i; - mutex_lock(&ioc->sas_discovery_mutex); - __mptsas_discovery_work(ioc); - mutex_unlock(&ioc->sas_discovery_mutex); - kfree(ev); -} + mptsas_probe_hba_phys(ioc); + mptsas_probe_expanders(ioc); + mptsas_probe_devices(ioc); + /* + Reporting RAID volumes. + */ + if (!ioc->ir_firmware || !ioc->raid_data.pIocPg2 || + !ioc->raid_data.pIocPg2->NumActiveVolumes) + return; + for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) { + sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, + ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0); + if (sdev) { + scsi_device_put(sdev); + continue; + } + printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, " + "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL, + ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID); + scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, + ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0); + } +} static struct mptsas_phyinfo * mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address) @@ -3544,33 +3827,6 @@ mptsas_send_raid_event(struct fw_event_work *fw_event) mptsas_free_fw_event(ioc, fw_event); } -static void -mptsas_send_discovery_event(MPT_ADAPTER *ioc, - EVENT_DATA_SAS_DISCOVERY *discovery_data) -{ - struct mptsas_discovery_event *ev; - u32 discovery_status; - - /* - * DiscoveryStatus - * - * This flag will be non-zero when firmware - * kicks off discovery, and return to zero - * once its completed. - */ - discovery_status = le32_to_cpu(discovery_data->DiscoveryStatus); - ioc->sas_discovery_quiesce_io = discovery_status ? 1 : 0; - if (discovery_status) - return; - - ev = kzalloc(sizeof(*ev), GFP_ATOMIC); - if (!ev) - return; - INIT_WORK(&ev->work, mptsas_discovery_work); - ev->ioc = ioc; - schedule_work(&ev->work); -}; - /* * mptsas_send_ir2_event - handle exposing hidden disk when * an inactive raid volume is added @@ -3634,10 +3890,28 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply) } break; } - case MPI_EVENT_SAS_DISCOVERY: - mptsas_send_discovery_event(ioc, - (EVENT_DATA_SAS_DISCOVERY *)reply->Data); + case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE: + { + MpiEventDataSasExpanderStatusChange_t *expander_data = + (MpiEventDataSasExpanderStatusChange_t *)reply->Data; + + + if (expander_data->ReasonCode == + MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING && + ioc->device_missing_delay) + delay = HZ * ioc->device_missing_delay; break; + } + case MPI_EVENT_SAS_DISCOVERY: + { + u32 discovery_status; + EventDataSasDiscovery_t *discovery_data = + (EventDataSasDiscovery_t *)reply->Data; + + discovery_status = le32_to_cpu(discovery_data->DiscoveryStatus); + ioc->sas_discovery_quiesce_io = discovery_status ? 1 : 0; + return 0; + } case MPI_EVENT_INTEGRATED_RAID: case MPI_EVENT_PERSISTENT_TABLE_FULL: case MPI_EVENT_IR2: @@ -3885,7 +4159,7 @@ static void __devexit mptsas_remove(struct pci_dev *pdev) kfree(p); } mutex_unlock(&ioc->sas_topology_mutex); - + ioc->hba_port_info = NULL; mptscsih_remove(pdev); } -- cgit v1.2.3 From eedf92b99806aeff18dc21c9dfb28999ce374413 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Fri, 29 May 2009 16:51:32 +0530 Subject: [SCSI] mpt fusion: Rescan SAS topology added 1.) SAS topology Rescan is added. If Firmware is doing Reset and we get Device add interrupt from Firmware, we will not receive it as part of Reset is going ON. After Reset we will do special Rescan of SAS topology. 2.) Driver version changed from 3.04.08 to 3.04.09. Added proper lock/unlock in mptsas_not_responding_devices() as per James' comment. Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptbase.h | 6 +- drivers/message/fusion/mptsas.c | 169 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 172 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index 8d1aadb6b4ac..11fc8f3960a6 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -76,8 +76,8 @@ #define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR #endif -#define MPT_LINUX_VERSION_COMMON "3.04.08" -#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.08" +#define MPT_LINUX_VERSION_COMMON "3.04.09" +#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.09" #define WHAT_MAGIC_STRING "@" "(" "#" ")" #define show_mptmod_ver(s,ver) \ @@ -711,6 +711,7 @@ typedef struct _MPT_ADAPTER u16 hba_port_num_phy; struct list_head sas_device_info_list; struct mutex sas_device_info_mutex; + u8 old_sas_discovery_protocal; u8 sas_discovery_quiesce_io; int sas_index; /* index refrencing */ MPT_MGMT sas_mgmt; @@ -741,6 +742,7 @@ typedef struct _MPT_ADAPTER spinlock_t fault_reset_work_lock; u8 sg_addr_size; + u8 in_rescan; u8 SGE_size; } MPT_ADAPTER; diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 7f2f76f0db3f..6aa91268afe9 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -119,6 +119,8 @@ static struct mptsas_portinfo *mptsas_find_portinfo_by_sas_address static void mptsas_expander_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info, u8 force); static void mptsas_send_expander_event(struct fw_event_work *fw_event); +static void mptsas_not_responding_devices(MPT_ADAPTER *ioc); +static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc); static void mptsas_print_phy_data(MPT_ADAPTER *ioc, MPI_SAS_IO_UNIT0_PHY_DATA *phy_data) @@ -844,6 +846,24 @@ mptsas_queue_device_delete(MPT_ADAPTER *ioc, mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1)); } +static void +mptsas_queue_rescan(MPT_ADAPTER *ioc) +{ + struct fw_event_work *fw_event; + int sz; + + sz = offsetof(struct fw_event_work, event_data); + fw_event = kzalloc(sz, GFP_ATOMIC); + if (!fw_event) { + printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n", + ioc->name, __func__, __LINE__); + return; + } + fw_event->event = -1; + fw_event->ioc = ioc; + mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1)); +} + /** * mptsas_target_reset @@ -1100,6 +1120,7 @@ mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) complete(&ioc->sas_mgmt.done); } mptsas_cleanup_fw_event_q(ioc); + mptsas_queue_rescan(ioc); mptsas_fw_event_on(ioc); break; default: @@ -1406,6 +1427,23 @@ mptsas_firmware_event_work(struct work_struct *work) container_of(work, struct fw_event_work, work.work); MPT_ADAPTER *ioc = fw_event->ioc; + /* special rescan topology handling */ + if (fw_event->event == -1) { + if (ioc->in_rescan) { + devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: rescan ignored as it is in progress\n", + ioc->name, __func__)); + return; + } + devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: rescan after " + "reset\n", ioc->name, __func__)); + ioc->in_rescan = 1; + mptsas_not_responding_devices(ioc); + mptsas_scan_sas_topology(ioc); + ioc->in_rescan = 0; + mptsas_free_fw_event(ioc, fw_event); + return; + } /* events handling turned off during host reset */ if (ioc->fw_events_off) { @@ -3153,8 +3191,15 @@ mptsas_send_link_status_event(struct fw_event_work *fw_event) if (link_rate == MPI_SAS_IOUNIT0_RATE_1_5 || link_rate == MPI_SAS_IOUNIT0_RATE_3_0) { - if (!port_info) + if (!port_info) { + if (ioc->old_sas_discovery_protocal) { + port_info = mptsas_expander_add(ioc, + le16_to_cpu(link_data->DevHandle)); + if (port_info) + goto out; + } goto out; + } if (port_info == ioc->hba_port_info) mptsas_probe_hba_phys(ioc); @@ -3176,6 +3221,121 @@ mptsas_send_link_status_event(struct fw_event_work *fw_event) mptsas_free_fw_event(ioc, fw_event); } +static void +mptsas_not_responding_devices(MPT_ADAPTER *ioc) +{ + struct mptsas_portinfo buffer, *port_info; + struct mptsas_device_info *sas_info; + struct mptsas_devinfo sas_device; + u32 handle; + VirtTarget *vtarget = NULL; + struct mptsas_phyinfo *phy_info; + u8 found_expander; + int retval, retry_count; + unsigned long flags; + + mpt_findImVolumes(ioc); + + spin_lock_irqsave(&ioc->taskmgmt_lock, flags); + if (ioc->ioc_reset_in_progress) { + dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: exiting due to a parallel reset \n", ioc->name, + __func__)); + spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); + return; + } + spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); + + /* devices, logical volumes */ + mutex_lock(&ioc->sas_device_info_mutex); + redo_device_scan: + list_for_each_entry(sas_info, &ioc->sas_device_info_list, list) { + sas_device.handle = 0; + retry_count = 0; +retry_page: + retval = mptsas_sas_device_pg0(ioc, &sas_device, + (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID + << MPI_SAS_DEVICE_PGAD_FORM_SHIFT), + (sas_info->fw.channel << 8) + + sas_info->fw.id); + + if (sas_device.handle) + continue; + if (retval == -EBUSY) { + spin_lock_irqsave(&ioc->taskmgmt_lock, flags); + if (ioc->ioc_reset_in_progress) { + dfailprintk(ioc, + printk(MYIOC_s_DEBUG_FMT + "%s: exiting due to reset\n", + ioc->name, __func__)); + spin_unlock_irqrestore + (&ioc->taskmgmt_lock, flags); + mutex_unlock(&ioc->sas_device_info_mutex); + return; + } + spin_unlock_irqrestore(&ioc->taskmgmt_lock, + flags); + } + + if (retval && (retval != -ENODEV)) { + if (retry_count < 10) { + retry_count++; + goto retry_page; + } else { + devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: Config page retry exceeded retry " + "count deleting device 0x%llx\n", + ioc->name, __func__, + sas_info->sas_address)); + } + } + + /* delete device */ + vtarget = mptsas_find_vtarget(ioc, + sas_info->fw.channel, sas_info->fw.id); + if (vtarget) + vtarget->deleted = 1; + phy_info = mptsas_find_phyinfo_by_sas_address(ioc, + sas_info->sas_address); + if (phy_info) { + mptsas_del_end_device(ioc, phy_info); + goto redo_device_scan; + } + } + mutex_unlock(&ioc->sas_device_info_mutex); + + /* expanders */ + mutex_lock(&ioc->sas_topology_mutex); + redo_expander_scan: + list_for_each_entry(port_info, &ioc->sas_topology, list) { + + if (port_info->phy_info && + (!(port_info->phy_info[0].identify.device_info & + MPI_SAS_DEVICE_INFO_SMP_TARGET))) + continue; + found_expander = 0; + handle = 0xFFFF; + while (!mptsas_sas_expander_pg0(ioc, &buffer, + (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE << + MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle) && + !found_expander) { + + handle = buffer.phy_info[0].handle; + if (buffer.phy_info[0].identify.sas_address == + port_info->phy_info[0].identify.sas_address) { + found_expander = 1; + } + kfree(buffer.phy_info); + } + + if (!found_expander) { + mptsas_expander_delete(ioc, port_info, 0); + goto redo_expander_scan; + } + } + mutex_lock(&ioc->sas_topology_mutex); +} + /** * mptsas_probe_expanders - adding expanders * @ioc: Pointer to MPT_ADAPTER structure @@ -3895,6 +4055,8 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply) MpiEventDataSasExpanderStatusChange_t *expander_data = (MpiEventDataSasExpanderStatusChange_t *)reply->Data; + if (ioc->old_sas_discovery_protocal) + return 0; if (expander_data->ReasonCode == MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING && @@ -3910,6 +4072,8 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply) discovery_status = le32_to_cpu(discovery_data->DiscoveryStatus); ioc->sas_discovery_quiesce_io = discovery_status ? 1 : 0; + if (ioc->old_sas_discovery_protocal && !discovery_status) + mptsas_queue_rescan(ioc); return 0; } case MPI_EVENT_INTEGRATED_RAID: @@ -4117,6 +4281,9 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto out_mptsas_probe; } + /* older firmware doesn't support expander events */ + if ((ioc->facts.HeaderVersion >> 8) < 0xE) + ioc->old_sas_discovery_protocal = 1; mptsas_scan_sas_topology(ioc); mptsas_fw_event_on(ioc); return 0; -- cgit v1.2.3 From 2f187862e579f1f5e883188cab6bd867cb60387f Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Fri, 29 May 2009 16:52:37 +0530 Subject: [SCSI] mpt fusion: Code Cleanup patch Resending patch considering Grants G's code review. Main goal to submit this patch is code cleaup. 1. Better driver debug prints and code indentation. 2. fault_reset_work_lock is not used anywhere. driver is using taskmgmt_lock instead of fault_reset_work_lock. 3. setting pci_set_drvdata properly. 4. Ingore config request when IOC is in reset state.( ioc_reset_in_progress is set). 5. Init/clear managment frame proprely.(INITIALIZE_MGMT_STATUS and CLEAR_MGMT_STATUS) Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptbase.c | 416 ++++++++++++++++++++++++++------------ drivers/message/fusion/mptbase.h | 46 +++-- drivers/message/fusion/mptctl.c | 6 +- drivers/message/fusion/mptsas.c | 94 ++++++--- drivers/message/fusion/mptscsih.c | 174 ++++++---------- drivers/message/fusion/mptscsih.h | 1 + 6 files changed, 443 insertions(+), 294 deletions(-) (limited to 'drivers') diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index d67b26378a52..8f04d37fb359 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -207,7 +207,6 @@ static int procmpt_iocinfo_read(char *buf, char **start, off_t offset, #endif static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc); -//int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag); static int ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *evReply, int *evHandlers); static void mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf); @@ -374,11 +373,11 @@ mpt_fault_reset_work(struct work_struct *work) ioc = ioc->alt_ioc; /* rearm the timer */ - spin_lock_irqsave(&ioc->fault_reset_work_lock, flags); + spin_lock_irqsave(&ioc->taskmgmt_lock, flags); if (ioc->reset_work_q) queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work, msecs_to_jiffies(MPT_POLLING_INTERVAL)); - spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags); + spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); } @@ -972,11 +971,15 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf) /* Put Request back on FreeQ! */ spin_lock_irqsave(&ioc->FreeQlock, flags); - mf->u.frame.linkage.arg1 = 0xdeadbeaf; /* signature to know if this mf is freed */ + if (cpu_to_le32(mf->u.frame.linkage.arg1) == 0xdeadbeaf) + goto out; + /* signature to know if this mf is freed */ + mf->u.frame.linkage.arg1 = cpu_to_le32(0xdeadbeaf); list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ); #ifdef MFCNT ioc->mfcnt--; #endif + out: spin_unlock_irqrestore(&ioc->FreeQlock, flags); } @@ -1731,6 +1734,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) ioc->id = mpt_ids++; sprintf(ioc->name, "ioc%d", ioc->id); + dinitprintk(ioc, printk(KERN_WARNING MYNAM ": mpt_adapter_install\n")); /* * set initial debug level @@ -1771,7 +1775,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) ioc->reply_sz = MPT_REPLY_FRAME_SIZE; ioc->pcidev = pdev; - spin_lock_init(&ioc->initializing_hba_lock); spin_lock_init(&ioc->taskmgmt_lock); mutex_init(&ioc->internal_cmds.mutex); @@ -1792,6 +1795,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) ioc->mfcnt = 0; #endif + ioc->sh = NULL; ioc->cached_fw = NULL; /* Initilize SCSI Config Data structure @@ -1808,9 +1812,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) /* Initialize workqueue */ INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work); - spin_lock_init(&ioc->fault_reset_work_lock); - snprintf(ioc->reset_work_q_name, sizeof(ioc->reset_work_q_name), + snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN, "mpt_poll_%d", ioc->id); ioc->reset_work_q = create_singlethread_workqueue(ioc->reset_work_q_name); @@ -1885,11 +1888,14 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) case MPI_MANUFACTPAGE_DEVID_SAS1064: case MPI_MANUFACTPAGE_DEVID_SAS1068: ioc->errata_flag_1064 = 1; + ioc->bus_type = SAS; + break; case MPI_MANUFACTPAGE_DEVID_SAS1064E: case MPI_MANUFACTPAGE_DEVID_SAS1068E: case MPI_MANUFACTPAGE_DEVID_SAS1078: ioc->bus_type = SAS; + break; } @@ -1933,7 +1939,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) INIT_LIST_HEAD(&ioc->fw_event_list); spin_lock_init(&ioc->fw_event_lock); - snprintf(ioc->fw_event_q_name, 20, "mpt/%d", ioc->id); + snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id); ioc->fw_event_q = create_singlethread_workqueue(ioc->fw_event_q_name); if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP, @@ -2008,10 +2014,10 @@ mpt_detach(struct pci_dev *pdev) /* * Stop polling ioc for fault condition */ - spin_lock_irqsave(&ioc->fault_reset_work_lock, flags); + spin_lock_irqsave(&ioc->taskmgmt_lock, flags); wq = ioc->reset_work_q; ioc->reset_work_q = NULL; - spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags); + spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); cancel_delayed_work(&ioc->fault_reset_work); destroy_workqueue(wq); @@ -2234,12 +2240,16 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) ioc->active = 0; if (ioc->alt_ioc) { - if (ioc->alt_ioc->active) + if (ioc->alt_ioc->active || + reason == MPT_HOSTEVENT_IOC_RECOVER) { reset_alt_ioc_active = 1; - - /* Disable alt-IOC's reply interrupts (and FreeQ) for a bit ... */ - CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, 0xFFFFFFFF); - ioc->alt_ioc->active = 0; + /* Disable alt-IOC's reply interrupts + * (and FreeQ) for a bit + **/ + CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, + 0xFFFFFFFF); + ioc->alt_ioc->active = 0; + } } hard = 1; @@ -2260,9 +2270,11 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) } } else { - printk(MYIOC_s_WARN_FMT "NOT READY!\n", ioc->name); + printk(MYIOC_s_WARN_FMT + "NOT READY WARNING!\n", ioc->name); } - return -1; + ret = -1; + goto out; } /* hard_reset_done = 0 if a soft reset was performed @@ -2272,7 +2284,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) if ((rc = MakeIocReady(ioc->alt_ioc, 0, sleepFlag)) == 0) alt_ioc_ready = 1; else - printk(MYIOC_s_WARN_FMT "alt_ioc not ready!\n", ioc->alt_ioc->name); + printk(MYIOC_s_WARN_FMT + ": alt-ioc Not ready WARNING!\n", + ioc->alt_ioc->name); } for (ii=0; ii<5; ii++) { @@ -2293,7 +2307,8 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) if (alt_ioc_ready) { if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) { dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "Initial Alt IocFacts failed rc=%x\n", ioc->name, rc)); + "Initial Alt IocFacts failed rc=%x\n", + ioc->name, rc)); /* Retry - alt IOC was initialized once */ rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason); @@ -2337,16 +2352,20 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) IRQF_SHARED, ioc->name, ioc); if (rc < 0) { printk(MYIOC_s_ERR_FMT "Unable to allocate " - "interrupt %d!\n", ioc->name, ioc->pcidev->irq); + "interrupt %d!\n", + ioc->name, ioc->pcidev->irq); if (ioc->msi_enable) pci_disable_msi(ioc->pcidev); - return -EBUSY; + ret = -EBUSY; + goto out; } irq_allocated = 1; ioc->pci_irq = ioc->pcidev->irq; pci_set_master(ioc->pcidev); /* ?? */ - dprintk(ioc, printk(MYIOC_s_INFO_FMT "installed at interrupt " - "%d\n", ioc->name, ioc->pcidev->irq)); + pci_set_drvdata(ioc->pcidev, ioc); + dinitprintk(ioc, printk(MYIOC_s_INFO_FMT + "installed at interrupt %d\n", ioc->name, + ioc->pcidev->irq)); } } @@ -2355,17 +2374,22 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) * init as upper addresses are needed for init. * If fails, continue with alt-ioc processing */ + dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "PrimeIocFifos\n", + ioc->name)); if ((ret == 0) && ((rc = PrimeIocFifos(ioc)) != 0)) ret = -3; /* May need to check/upload firmware & data here! * If fails, continue with alt-ioc processing */ + dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "SendIocInit\n", + ioc->name)); if ((ret == 0) && ((rc = SendIocInit(ioc, sleepFlag)) != 0)) ret = -4; // NEW! if (alt_ioc_ready && ((rc = PrimeIocFifos(ioc->alt_ioc)) != 0)) { - printk(MYIOC_s_WARN_FMT ": alt_ioc (%d) FIFO mgmt alloc!\n", + printk(MYIOC_s_WARN_FMT + ": alt-ioc (%d) FIFO mgmt alloc WARNING!\n", ioc->alt_ioc->name, rc); alt_ioc_ready = 0; reset_alt_ioc_active = 0; @@ -2375,8 +2399,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) if ((rc = SendIocInit(ioc->alt_ioc, sleepFlag)) != 0) { alt_ioc_ready = 0; reset_alt_ioc_active = 0; - printk(MYIOC_s_WARN_FMT "alt_ioc (%d) init failure!\n", - ioc->alt_ioc->name, rc); + printk(MYIOC_s_WARN_FMT + ": alt-ioc: (%d) init failure WARNING!\n", + ioc->alt_ioc->name, rc); } } @@ -2457,8 +2482,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) mutex_init(&ioc->raid_data.inactive_list_mutex); INIT_LIST_HEAD(&ioc->raid_data.inactive_list); - if (ioc->bus_type == SAS) { + switch (ioc->bus_type) { + case SAS: /* clear persistency table */ if(ioc->facts.IOCExceptions & MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL) { @@ -2472,8 +2498,15 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) */ mpt_findImVolumes(ioc); - } else if (ioc->bus_type == FC) { - if ((ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) && + /* Check, and possibly reset, the coalescing value + */ + mpt_read_ioc_pg_1(ioc); + + break; + + case FC: + if ((ioc->pfacts[0].ProtocolFlags & + MPI_PORTFACTS_PROTOCOL_LAN) && (ioc->lan_cnfg_page0.Header.PageLength == 0)) { /* * Pre-fetch the ports LAN MAC address! @@ -2482,11 +2515,14 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) (void) GetLanConfigPages(ioc); a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow; dprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n", - ioc->name, a[5], a[4], a[3], a[2], a[1], a[0])); - + "LanAddr = %02X:%02X:%02X" + ":%02X:%02X:%02X\n", + ioc->name, a[5], a[4], + a[3], a[2], a[1], a[0])); } - } else { + break; + + case SPI: /* Get NVRAM and adapter maximums from SPP 0 and 2 */ mpt_GetScsiPortSettings(ioc, 0); @@ -2505,6 +2541,8 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) mpt_read_ioc_pg_1(ioc); mpt_read_ioc_pg_4(ioc); + + break; } GetIoUnitPage2(ioc); @@ -2586,16 +2624,20 @@ mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev) if (_pcidev == peer) { /* Paranoia checks */ if (ioc->alt_ioc != NULL) { - printk(MYIOC_s_WARN_FMT "Oops, already bound to %s!\n", - ioc->name, ioc->alt_ioc->name); + printk(MYIOC_s_WARN_FMT + "Oops, already bound (%s <==> %s)!\n", + ioc->name, ioc->name, ioc->alt_ioc->name); break; } else if (ioc_srch->alt_ioc != NULL) { - printk(MYIOC_s_WARN_FMT "Oops, already bound to %s!\n", - ioc_srch->name, ioc_srch->alt_ioc->name); + printk(MYIOC_s_WARN_FMT + "Oops, already bound (%s <==> %s)!\n", + ioc_srch->name, ioc_srch->name, + ioc_srch->alt_ioc->name); break; } - dprintk(ioc, printk(MYIOC_s_INFO_FMT "FOUND! binding to %s\n", - ioc->name, ioc_srch->name)); + dprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "FOUND! binding %s <==> %s\n", + ioc->name, ioc->name, ioc_srch->name)); ioc_srch->alt_ioc = ioc; ioc->alt_ioc = ioc_srch; } @@ -2615,8 +2657,8 @@ mpt_adapter_disable(MPT_ADAPTER *ioc) int ret; if (ioc->cached_fw != NULL) { - ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: Pushing FW onto " - "adapter\n", __func__, ioc->name)); + ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: Pushing FW onto adapter\n", __func__, ioc->name)); if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *) ioc->cached_fw, CAN_SLEEP)) < 0) { printk(MYIOC_s_WARN_FMT @@ -2626,10 +2668,13 @@ mpt_adapter_disable(MPT_ADAPTER *ioc) } /* Disable adapter interrupts! */ + synchronize_irq(ioc->pcidev->irq); CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF); ioc->active = 0; + /* Clear any lingering interrupt */ CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); + CHIPREG_READ32(&ioc->chip->IntStatus); if (ioc->alloc != NULL) { sz = ioc->alloc_sz; @@ -2689,19 +2734,22 @@ mpt_adapter_disable(MPT_ADAPTER *ioc) if((ret = mpt_host_page_access_control(ioc, MPI_DB_HPBAC_FREE_BUFFER, NO_SLEEP)) != 0) { printk(MYIOC_s_ERR_FMT - "host page buffers free failed (%d)!\n", - ioc->name, ret); + ": %s: host page buffers free failed (%d)!\n", + ioc->name, __func__, ret); } - dexitprintk(ioc, printk(MYIOC_s_INFO_FMT "HostPageBuffer free @ %p, sz=%d bytes\n", - ioc->name, ioc->HostPageBuffer, ioc->HostPageBuffer_sz)); + dexitprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "HostPageBuffer free @ %p, sz=%d bytes\n", + ioc->name, ioc->HostPageBuffer, + ioc->HostPageBuffer_sz)); pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz, ioc->HostPageBuffer, ioc->HostPageBuffer_dma); ioc->HostPageBuffer = NULL; ioc->HostPageBuffer_sz = 0; ioc->alloc_total -= ioc->HostPageBuffer_sz; } -} + pci_set_drvdata(ioc->pcidev, NULL); +} /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * mpt_adapter_dispose - Free all resources associated with an MPT adapter @@ -2841,8 +2889,12 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag) } /* Is it already READY? */ - if (!statefault && (ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY) + if (!statefault && + ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY)) { + dinitprintk(ioc, printk(MYIOC_s_INFO_FMT + "IOC is in READY state\n", ioc->name)); return 0; + } /* * Check to see if IOC is in FAULT state. @@ -2915,8 +2967,9 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag) ii++; cntdn--; if (!cntdn) { - printk(MYIOC_s_ERR_FMT "Wait IOC_READY state timeout(%d)!\n", - ioc->name, (int)((ii+5)/HZ)); + printk(MYIOC_s_ERR_FMT + "Wait IOC_READY state (0x%x) timeout(%d)!\n", + ioc->name, ioc_state, (int)((ii+5)/HZ)); return -ETIME; } @@ -2929,9 +2982,8 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag) } if (statefault < 3) { - printk(MYIOC_s_INFO_FMT "Recovered from %s\n", - ioc->name, - statefault==1 ? "stuck handshake" : "IOC FAULT"); + printk(MYIOC_s_INFO_FMT "Recovered from %s\n", ioc->name, + statefault == 1 ? "stuck handshake" : "IOC FAULT"); } return hard_reset_done; @@ -2984,8 +3036,9 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason) /* IOC *must* NOT be in RESET state! */ if (ioc->last_state == MPI_IOC_STATE_RESET) { - printk(MYIOC_s_ERR_FMT "Can't get IOCFacts NOT READY! (%08x)\n", - ioc->name, ioc->last_state ); + printk(KERN_ERR MYNAM + ": ERROR - Can't get IOCFacts, %s NOT READY! (%08x)\n", + ioc->name, ioc->last_state); return -44; } @@ -3047,7 +3100,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason) * Old: u16{Major(4),Minor(4),SubMinor(8)} * New: u32{Major(8),Minor(8),Unit(8),Dev(8)} */ - if (facts->MsgVersion < 0x0102) { + if (facts->MsgVersion < MPI_VERSION_01_02) { /* * Handle old FC f/w style, convert to new... */ @@ -3059,9 +3112,11 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason) facts->FWVersion.Word = le32_to_cpu(facts->FWVersion.Word); facts->ProductID = le16_to_cpu(facts->ProductID); + if ((ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK) > MPI_FW_HEADER_PID_PROD_TARGET_SCSI) ioc->ir_firmware = 1; + facts->CurrentHostMfaHighAddr = le32_to_cpu(facts->CurrentHostMfaHighAddr); facts->GlobalCredits = le16_to_cpu(facts->GlobalCredits); @@ -3077,7 +3132,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason) * to 14 in MPI-1.01.0x. */ if (facts->MsgLength >= (offsetof(IOCFactsReply_t,FWImageSize) + 7)/4 && - facts->MsgVersion > 0x0100) { + facts->MsgVersion > MPI_VERSION_01_00) { facts->FWImageSize = le32_to_cpu(facts->FWImageSize); } @@ -3259,6 +3314,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag) ioc_init.MaxDevices = (U8)ioc->devices_per_bus; ioc_init.MaxBuses = (U8)ioc->number_of_buses; + dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "facts.MsgVersion=%x\n", ioc->name, ioc->facts.MsgVersion)); if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) { @@ -3273,7 +3329,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag) } ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */ - if (sizeof(dma_addr_t) == sizeof(u64)) { + if (ioc->sg_addr_size == sizeof(u64)) { /* Save the upper 32-bits of the request * (reply) and sense buffers. */ @@ -3526,29 +3582,29 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag) ii = mpt_handshake_req_reply_wait(ioc, request_size, (u32 *)prequest, reply_sz, (u16 *)preply, 65 /*seconds*/, sleepFlag); - dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": FW Upload completed rc=%x \n", ioc->name, ii)); + dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FW Upload completed " + "rc=%x \n", ioc->name, ii)); cmdStatus = -EFAULT; if (ii == 0) { /* Handshake transfer was complete and successful. * Check the Reply Frame. */ - int status, transfer_sz; - status = le16_to_cpu(preply->IOCStatus); - if (status == MPI_IOCSTATUS_SUCCESS) { - transfer_sz = le32_to_cpu(preply->ActualImageSize); - if (transfer_sz == sz) + int status; + status = le16_to_cpu(preply->IOCStatus) & + MPI_IOCSTATUS_MASK; + if (status == MPI_IOCSTATUS_SUCCESS && + ioc->facts.FWImageSize == + le32_to_cpu(preply->ActualImageSize)) cmdStatus = 0; - } } dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": do_upload cmdStatus=%d \n", ioc->name, cmdStatus)); if (cmdStatus) { - - ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": fw upload failed, freeing image \n", - ioc->name)); + ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "fw upload failed, " + "freeing image \n", ioc->name)); mpt_free_fw_memory(ioc); } kfree(prequest); @@ -3872,6 +3928,10 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag) CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) { + + if (!ignore) + return 0; + drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset " "address=%p\n", ioc->name, __func__, &ioc->chip->Doorbell, &ioc->chip->Reset_1078)); @@ -3889,6 +3949,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag) "looking for READY STATE: doorbell=%x" " count=%d\n", ioc->name, doorbell, count)); + if (doorbell == MPI_IOC_STATE_READY) { return 1; } @@ -4039,6 +4100,10 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag) doorbell = CHIPREG_READ32(&ioc->chip->Doorbell); doorbell &= MPI_IOC_STATE_MASK; + drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "looking for READY STATE: doorbell=%x" + " count=%d\n", ioc->name, doorbell, count)); + if (doorbell == MPI_IOC_STATE_READY) { break; } @@ -4050,6 +4115,11 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag) mdelay (1000); } } + + if (doorbell != MPI_IOC_STATE_READY) + printk(MYIOC_s_ERR_FMT "Failed to come READY " + "after reset! IocState=%x", ioc->name, + doorbell); } } @@ -4168,8 +4238,9 @@ SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag) if (sleepFlag != CAN_SLEEP) count *= 10; - printk(MYIOC_s_ERR_FMT "Wait IOC_READY state timeout(%d)!\n", - ioc->name, (int)((count+5)/HZ)); + printk(MYIOC_s_ERR_FMT + "Wait IOC_READY state (0x%x) timeout(%d)!\n", + ioc->name, state, (int)((count+5)/HZ)); return -ETIME; } @@ -4255,8 +4326,13 @@ initChainBuffers(MPT_ADAPTER *ioc) dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "num_sge=%d numSGE=%d\n", ioc->name, num_sge, numSGE)); - if ( numSGE > MPT_SCSI_SG_DEPTH ) - numSGE = MPT_SCSI_SG_DEPTH; + if (ioc->bus_type == FC) { + if (numSGE > MPT_SCSI_FC_SG_DEPTH) + numSGE = MPT_SCSI_FC_SG_DEPTH; + } else { + if (numSGE > MPT_SCSI_SG_DEPTH) + numSGE = MPT_SCSI_SG_DEPTH; + } num_chain = 1; while (numSGE - num_sge > 0) { @@ -4493,6 +4569,7 @@ PrimeIocFifos(MPT_ADAPTER *ioc) return 0; out_fail: + if (ioc->alloc != NULL) { sz = ioc->alloc_sz; pci_free_consistent(ioc->pcidev, @@ -5610,17 +5687,20 @@ mpt_inactive_raid_volumes(MPT_ADAPTER *ioc, u8 channel, u8 id) * -ENOMEM if pci_alloc failed **/ int -mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk) +mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, + RaidPhysDiskPage0_t *phys_disk) { - CONFIGPARMS cfg; - ConfigPageHeader_t hdr; + CONFIGPARMS cfg; + ConfigPageHeader_t hdr; dma_addr_t dma_handle; pRaidPhysDiskPage0_t buffer = NULL; int rc; memset(&cfg, 0 , sizeof(CONFIGPARMS)); memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); + memset(phys_disk, 0, sizeof(RaidPhysDiskPage0_t)); + hdr.PageVersion = MPI_RAIDPHYSDISKPAGE0_PAGEVERSION; hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK; cfg.cfghdr.hdr = &hdr; cfg.physAddr = -1; @@ -6074,7 +6154,8 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) int ret; u8 page_type = 0, extend_page; unsigned long timeleft; - int in_isr; + unsigned long flags; + int in_isr; u8 issue_hard_reset = 0; u8 retry_count = 0; @@ -6086,7 +6167,17 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) dcprintk(ioc, printk(MYIOC_s_WARN_FMT "Config request not allowed in ISR context!\n", ioc->name)); return -EPERM; + } + + /* don't send a config page during diag reset */ + spin_lock_irqsave(&ioc->taskmgmt_lock, flags); + if (ioc->ioc_reset_in_progress) { + dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: busy with host reset\n", ioc->name, __func__)); + spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); + return -EBUSY; } + spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); /* don't send if no chance of success */ if (!ioc->active || @@ -6270,6 +6361,12 @@ mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) MPT_MGMT_STATUS_DID_IOCRESET; complete(&ioc->mptbase_cmds.done); } +/* wake up taskmgmt_cmds */ + if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) { + ioc->taskmgmt_cmds.status |= + MPT_MGMT_STATUS_DID_IOCRESET; + complete(&ioc->taskmgmt_cmds.done); + } break; default: break; @@ -6690,7 +6787,9 @@ int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) { int rc; + u8 cb_idx; unsigned long flags; + unsigned long time_count; dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler Entered!\n", ioc->name)); #ifdef MFCNT @@ -6721,30 +6820,24 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) * Prevents timeouts occurring during a diagnostic reset...very bad. * For all other protocol drivers, this is a no-op. */ - { - u8 cb_idx; - int r = 0; - - for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { - if (MptResetHandlers[cb_idx]) { - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling IOC reset_setup handler #%d\n", - ioc->name, cb_idx)); - r += mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET); - if (ioc->alt_ioc) { - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling alt-%s setup reset handler #%d\n", - ioc->name, ioc->alt_ioc->name, cb_idx)); - r += mpt_signal_reset(cb_idx, ioc->alt_ioc, MPT_IOC_SETUP_RESET); - } - } + for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { + if (MptResetHandlers[cb_idx]) { + mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET); + if (ioc->alt_ioc) + mpt_signal_reset(cb_idx, ioc->alt_ioc, + MPT_IOC_SETUP_RESET); } } - if ((rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag)) != 0) { - printk(MYIOC_s_WARN_FMT "Cannot recover rc = %d!\n", ioc->name, rc); + time_count = jiffies; + rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag); + if (rc != 0) { + printk(KERN_WARNING MYNAM + ": WARNING - (%d) Cannot recover %s\n", rc, ioc->name); + } else { + if (ioc->hard_resets < -1) + ioc->hard_resets++; } - ioc->reload_fw = 0; - if (ioc->alt_ioc) - ioc->alt_ioc->reload_fw = 0; spin_lock_irqsave(&ioc->taskmgmt_lock, flags); ioc->ioc_reset_in_progress = 0; @@ -6757,16 +6850,27 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) } spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler rc = %d!\n", ioc->name, rc)); + dtmprintk(ioc, + printk(MYIOC_s_DEBUG_FMT + "HardResetHandler: completed (%d seconds): %s\n", ioc->name, + jiffies_to_msecs(jiffies - time_count)/1000, ((rc == 0) ? + "SUCCESS" : "FAILED"))); return rc; } -/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +#ifdef CONFIG_FUSION_LOGGING static void -EventDescriptionStr(u8 event, u32 evData0, char *evStr) +mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply) { char *ds = NULL; + u32 evData0; + int ii; + u8 event; + char *evStr = ioc->evStr; + + event = le32_to_cpu(pEventReply->Event) & 0xFF; + evData0 = le32_to_cpu(pEventReply->Data[0]); switch(event) { case MPI_EVENT_NONE: @@ -6800,9 +6904,9 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr) if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP) ds = "Loop State(LIP) Change"; else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE) - ds = "Loop State(LPE) Change"; /* ??? */ + ds = "Loop State(LPE) Change"; else - ds = "Loop State(LPB) Change"; /* ??? */ + ds = "Loop State(LPB) Change"; break; case MPI_EVENT_LOGOUT: ds = "Logout"; @@ -7002,28 +7106,53 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr) } case MPI_EVENT_IR2: { + u8 id = (u8)(evData0); + u8 channel = (u8)(evData0 >> 8); + u8 phys_num = (u8)(evData0 >> 24); u8 ReasonCode = (u8)(evData0 >> 16); + switch (ReasonCode) { case MPI_EVENT_IR2_RC_LD_STATE_CHANGED: - ds = "IR2: LD State Changed"; + snprintf(evStr, EVENT_DESCR_STR_SZ, + "IR2: LD State Changed: " + "id=%d channel=%d phys_num=%d", + id, channel, phys_num); break; case MPI_EVENT_IR2_RC_PD_STATE_CHANGED: - ds = "IR2: PD State Changed"; + snprintf(evStr, EVENT_DESCR_STR_SZ, + "IR2: PD State Changed " + "id=%d channel=%d phys_num=%d", + id, channel, phys_num); break; case MPI_EVENT_IR2_RC_BAD_BLOCK_TABLE_FULL: - ds = "IR2: Bad Block Table Full"; + snprintf(evStr, EVENT_DESCR_STR_SZ, + "IR2: Bad Block Table Full: " + "id=%d channel=%d phys_num=%d", + id, channel, phys_num); break; case MPI_EVENT_IR2_RC_PD_INSERTED: - ds = "IR2: PD Inserted"; + snprintf(evStr, EVENT_DESCR_STR_SZ, + "IR2: PD Inserted: " + "id=%d channel=%d phys_num=%d", + id, channel, phys_num); break; case MPI_EVENT_IR2_RC_PD_REMOVED: - ds = "IR2: PD Removed"; + snprintf(evStr, EVENT_DESCR_STR_SZ, + "IR2: PD Removed: " + "id=%d channel=%d phys_num=%d", + id, channel, phys_num); break; case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED: - ds = "IR2: Foreign CFG Detected"; + snprintf(evStr, EVENT_DESCR_STR_SZ, + "IR2: Foreign CFG Detected: " + "id=%d channel=%d phys_num=%d", + id, channel, phys_num); break; case MPI_EVENT_IR2_RC_REBUILD_MEDIUM_ERROR: - ds = "IR2: Rebuild Medium Error"; + snprintf(evStr, EVENT_DESCR_STR_SZ, + "IR2: Rebuild Medium Error: " + "id=%d channel=%d phys_num=%d", + id, channel, phys_num); break; default: ds = "IR2"; @@ -7059,13 +7188,18 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr) case MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: { u8 reason = (u8)(evData0); - u8 port_num = (u8)(evData0 >> 8); - u16 handle = le16_to_cpu(evData0 >> 16); - snprintf(evStr, EVENT_DESCR_STR_SZ, - "SAS Initiator Device Status Change: reason=0x%02x " - "port=%d handle=0x%04x", - reason, port_num, handle); + switch (reason) { + case MPI_EVENT_SAS_INIT_RC_ADDED: + ds = "SAS Initiator Status Change: Added"; + break; + case MPI_EVENT_SAS_INIT_RC_REMOVED: + ds = "SAS Initiator Status Change: Deleted"; + break; + default: + ds = "SAS Initiator Status Change"; + break; + } break; } @@ -7113,6 +7247,24 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr) break; } + case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE: + { + u8 reason = (u8)(evData0); + + switch (reason) { + case MPI_EVENT_SAS_EXP_RC_ADDED: + ds = "Expander Status Change: Added"; + break; + case MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING: + ds = "Expander Status Change: Deleted"; + break; + default: + ds = "Expander Status Change"; + break; + } + break; + } + /* * MPT base "custom" events may be added here... */ @@ -7122,8 +7274,20 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr) } if (ds) strncpy(evStr, ds, EVENT_DESCR_STR_SZ); -} + + devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "MPT event:(%02Xh) : %s\n", + ioc->name, event, evStr)); + + devtverboseprintk(ioc, printk(KERN_DEBUG MYNAM + ": Event data:\n")); + for (ii = 0; ii < le16_to_cpu(pEventReply->EventDataLength); ii++) + devtverboseprintk(ioc, printk(" %08x", + le32_to_cpu(pEventReply->Data[ii]))); + devtverboseprintk(ioc, printk(KERN_DEBUG "\n")); +} +#endif /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * ProcessEventNotification - Route EventNotificationReply to all event handlers @@ -7140,37 +7304,24 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply { u16 evDataLen; u32 evData0 = 0; -// u32 evCtx; int ii; u8 cb_idx; int r = 0; int handlers = 0; - char evStr[EVENT_DESCR_STR_SZ]; u8 event; /* * Do platform normalization of values */ event = le32_to_cpu(pEventReply->Event) & 0xFF; -// evCtx = le32_to_cpu(pEventReply->EventContext); evDataLen = le16_to_cpu(pEventReply->EventDataLength); if (evDataLen) { evData0 = le32_to_cpu(pEventReply->Data[0]); } - EventDescriptionStr(event, evData0, evStr); - devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event:(%02Xh) : %s\n", - ioc->name, - event, - evStr)); - #ifdef CONFIG_FUSION_LOGGING - devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT - ": Event data:\n", ioc->name)); - for (ii = 0; ii < evDataLen; ii++) - devtverboseprintk(ioc, printk(" %08x", - le32_to_cpu(pEventReply->Data[ii]))); - devtverboseprintk(ioc, printk("\n")); + if (evDataLen) + mpt_display_event_info(ioc, pEventReply); #endif /* @@ -7225,8 +7376,9 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply */ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { if (MptEvHandlers[cb_idx]) { - devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Routing Event to event handler #%d\n", - ioc->name, cb_idx)); + devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "Routing Event to event handler #%d\n", + ioc->name, cb_idx)); r += (*(MptEvHandlers[cb_idx]))(ioc, pEventReply); handlers++; } @@ -7310,8 +7462,6 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info) switch (info) { case 0x00010000: desc = "bug! MID not found"; - if (ioc->reload_fw == 0) - ioc->reload_fw++; break; case 0x00020000: diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index 11fc8f3960a6..91499d1275c4 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -76,7 +76,7 @@ #define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR #endif -#define MPT_LINUX_VERSION_COMMON "3.04.09" +#define MPT_LINUX_VERSION_COMMON "3.04.10" #define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.09" #define WHAT_MAGIC_STRING "@" "(" "#" ")" @@ -104,6 +104,7 @@ #endif #define MPT_NAME_LENGTH 32 +#define MPT_KOBJ_NAME_LEN 20 #define MPT_PROCFS_MPTBASEDIR "mpt" /* chg it to "driver/fusion" ? */ @@ -162,10 +163,10 @@ /* * Set the MAX_SGE value based on user input. */ -#ifdef CONFIG_FUSION_MAX_SGE -#if CONFIG_FUSION_MAX_SGE < 16 +#ifdef CONFIG_FUSION_MAX_SGE +#if CONFIG_FUSION_MAX_SGE < 16 #define MPT_SCSI_SG_DEPTH 16 -#elif CONFIG_FUSION_MAX_SGE > 128 +#elif CONFIG_FUSION_MAX_SGE > 128 #define MPT_SCSI_SG_DEPTH 128 #else #define MPT_SCSI_SG_DEPTH CONFIG_FUSION_MAX_SGE @@ -174,6 +175,18 @@ #define MPT_SCSI_SG_DEPTH 40 #endif +#ifdef CONFIG_FUSION_MAX_FC_SGE +#if CONFIG_FUSION_MAX_FC_SGE < 16 +#define MPT_SCSI_FC_SG_DEPTH 16 +#elif CONFIG_FUSION_MAX_FC_SGE > 256 +#define MPT_SCSI_FC_SG_DEPTH 256 +#else +#define MPT_SCSI_FC_SG_DEPTH CONFIG_FUSION_MAX_FC_SGE +#endif +#else +#define MPT_SCSI_FC_SG_DEPTH 40 +#endif + /* debug print string length used for events and iocstatus */ # define EVENT_DESCR_STR_SZ 100 @@ -576,6 +589,10 @@ typedef struct _MPT_ADAPTER int pci_irq; /* This irq */ char name[MPT_NAME_LENGTH]; /* "iocN" */ char prod_name[MPT_NAME_LENGTH]; /* "LSIFC9x9" */ +#ifdef CONFIG_FUSION_LOGGING + /* used in mpt_display_event_info */ + char evStr[EVENT_DESCR_STR_SZ]; +#endif char board_name[16]; char board_assembly[16]; char board_tracer[16]; @@ -682,14 +699,11 @@ typedef struct _MPT_ADAPTER int aen_event_read_flag; /* flag to indicate event log was read*/ u8 FirstWhoInit; u8 upload_fw; /* If set, do a fw upload */ - u8 reload_fw; /* Force a FW Reload on next reset */ u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */ u8 pad1[4]; u8 DoneCtx; u8 TaskCtx; u8 InternalCtx; - spinlock_t initializing_hba_lock; - int initializing_hba_lock_flag; struct list_head list; struct net_device *netdev; struct list_head sas_topology; @@ -699,7 +713,7 @@ typedef struct _MPT_ADAPTER struct list_head fw_event_list; spinlock_t fw_event_lock; u8 fw_events_off; /* if '1', then ignore events */ - char fw_event_q_name[20]; + char fw_event_q_name[MPT_KOBJ_NAME_LEN]; struct mutex sas_discovery_mutex; u8 sas_discovery_runtime; @@ -731,15 +745,22 @@ typedef struct _MPT_ADAPTER u8 fc_link_speed[2]; spinlock_t fc_rescan_work_lock; struct work_struct fc_rescan_work; - char fc_rescan_work_q_name[20]; + char fc_rescan_work_q_name[MPT_KOBJ_NAME_LEN]; struct workqueue_struct *fc_rescan_work_q; + + /* driver forced bus resets count */ + unsigned long hard_resets; + /* fw/external bus resets count */ + unsigned long soft_resets; + /* cmd timeouts */ + unsigned long timeouts; + struct scsi_cmnd **ScsiLookup; spinlock_t scsi_lookup_lock; u64 dma_mask; - char reset_work_q_name[20]; + char reset_work_q_name[MPT_KOBJ_NAME_LEN]; struct workqueue_struct *reset_work_q; struct delayed_work fault_reset_work; - spinlock_t fault_reset_work_lock; u8 sg_addr_size; u8 in_rescan; @@ -870,9 +891,6 @@ typedef struct _MPT_SCSI_HOST { MPT_FRAME_HDR *cmdPtr; /* Ptr to nonOS request */ struct scsi_cmnd *abortSCpnt; MPT_LOCAL_REPLY localReply; /* internal cmd reply struct */ - unsigned long hard_resets; /* driver forced bus resets count */ - unsigned long soft_resets; /* fw/external bus resets count */ - unsigned long timeouts; /* cmd timeouts */ ushort sel_timeout[MPT_MAX_FC_DEVICES]; char *info_kbuf; long last_queue_full; diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index ab620132d9a9..9b2e2198aee9 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -2534,9 +2534,9 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) MPT_SCSI_HOST *hd = shost_priv(ioc->sh); if (hd && (cim_rev == 1)) { - karg.hard_resets = hd->hard_resets; - karg.soft_resets = hd->soft_resets; - karg.timeouts = hd->timeouts; + karg.hard_resets = ioc->hard_resets; + karg.soft_resets = ioc->soft_resets; + karg.timeouts = ioc->timeouts; } } diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 6aa91268afe9..da22141152d7 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -724,8 +724,8 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info) * Forming a port */ if (!port_details) { - port_details = kzalloc(sizeof(*port_details), - GFP_KERNEL); + port_details = kzalloc(sizeof(struct + mptsas_portinfo_details), GFP_KERNEL); if (!port_details) goto out; port_details->num_phys = 1; @@ -952,7 +952,7 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc, vtarget->deleted = 1; /* block IO */ - target_reset_list = kzalloc(sizeof(*target_reset_list), + target_reset_list = kzalloc(sizeof(struct mptsas_target_reset_event), GFP_ATOMIC); if (!target_reset_list) { dfailprintk(ioc, printk(MYIOC_s_WARN_FMT @@ -1791,8 +1791,13 @@ static int mptsas_mgmt_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, memcpy(ioc->sas_mgmt.reply, reply, min(ioc->reply_sz, 4 * reply->u.reply.MsgLength)); } - complete(&ioc->sas_mgmt.done); - return 1; + + if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) { + ioc->sas_mgmt.status &= ~MPT_MGMT_STATUS_PENDING; + complete(&ioc->sas_mgmt.done); + return 1; + } + return 0; } static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset) @@ -1831,6 +1836,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset) MPI_SAS_OP_PHY_HARD_RESET : MPI_SAS_OP_PHY_LINK_RESET; req->PhyNum = phy->identify.phy_identifier; + INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status) mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf); timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, @@ -1862,6 +1868,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset) error = 0; out_unlock: + CLEAR_MGMT_STATUS(ioc->sas_mgmt.status) mutex_unlock(&ioc->sas_mgmt.mutex); out: return error; @@ -1999,10 +2006,15 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, if (!dma_addr_out) goto put_mf; ioc->add_sge(psge, flagsLength, dma_addr_out); - psge += (sizeof(u32) + sizeof(dma_addr_t)); + psge += ioc->SGE_size; /* response */ - flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; + flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT | + MPI_SGE_FLAGS_SYSTEM_ADDRESS | + MPI_SGE_FLAGS_IOC_TO_HOST | + MPI_SGE_FLAGS_END_OF_BUFFER; + + flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT; flagsLength |= rsp->data_len + 4; dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio), rsp->data_len, PCI_DMA_BIDIRECTIONAL); @@ -2010,6 +2022,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, goto unmap; ioc->add_sge(psge, flagsLength, dma_addr_in); + INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status) mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf); timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ); @@ -2031,7 +2044,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, req->data_len = 0; rsp->data_len -= smprep->ResponseDataLength; } else { - printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n", + printk(MYIOC_s_ERR_FMT + "%s: smp passthru reply failed to be returned\n", ioc->name, __func__); ret = -ENXIO; } @@ -2046,6 +2060,7 @@ put_mf: if (mf) mpt_free_msg_frame(ioc, mf); out_unlock: + CLEAR_MGMT_STATUS(ioc->sas_mgmt.status) mutex_unlock(&ioc->sas_mgmt.mutex); out: return ret; @@ -2109,7 +2124,7 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info) port_info->num_phys = buffer->NumPhys; port_info->phy_info = kcalloc(port_info->num_phys, - sizeof(*port_info->phy_info),GFP_KERNEL); + sizeof(struct mptsas_phyinfo), GFP_KERNEL); if (!port_info->phy_info) { error = -ENOMEM; goto out_free_consistent; @@ -2271,10 +2286,6 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info, __le64 sas_address; int error=0; - if (ioc->sas_discovery_runtime && - mptsas_is_end_device(device_info)) - goto out; - hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION; hdr.ExtPageLength = 0; hdr.PageNumber = 0; @@ -2315,6 +2326,7 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info, mptsas_print_device_pg0(ioc, buffer); + memset(device_info, 0, sizeof(struct mptsas_devinfo)); device_info->handle = le16_to_cpu(buffer->DevHandle); device_info->handle_parent = le16_to_cpu(buffer->ParentDevHandle); device_info->handle_enclosure = @@ -2346,7 +2358,9 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info, SasExpanderPage0_t *buffer; dma_addr_t dma_handle; int i, error; + __le64 sas_address; + memset(port_info, 0, sizeof(struct mptsas_portinfo)); hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION; hdr.ExtPageLength = 0; hdr.PageNumber = 0; @@ -2392,18 +2406,23 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info, } /* save config data */ - port_info->num_phys = buffer->NumPhys; + port_info->num_phys = (buffer->NumPhys) ? buffer->NumPhys : 1; port_info->phy_info = kcalloc(port_info->num_phys, - sizeof(*port_info->phy_info),GFP_KERNEL); + sizeof(struct mptsas_phyinfo), GFP_KERNEL); if (!port_info->phy_info) { error = -ENOMEM; goto out_free_consistent; } + memcpy(&sas_address, &buffer->SASAddress, sizeof(__le64)); for (i = 0; i < port_info->num_phys; i++) { port_info->phy_info[i].portinfo = port_info; port_info->phy_info[i].handle = le16_to_cpu(buffer->DevHandle); + port_info->phy_info[i].identify.sas_address = + le64_to_cpu(sas_address); + port_info->phy_info[i].identify.handle_parent = + le16_to_cpu(buffer->ParentDevHandle); } out_free_consistent: @@ -2423,11 +2442,7 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, dma_addr_t dma_handle; int error=0; - if (ioc->sas_discovery_runtime && - mptsas_is_end_device(&phy_info->attached)) - goto out; - - hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION; + hdr.PageVersion = MPI_SASEXPANDER1_PAGEVERSION; hdr.ExtPageLength = 0; hdr.PageNumber = 1; hdr.Reserved1 = 0; @@ -2462,6 +2477,12 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; error = mpt_config(ioc, &cfg); + + if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) { + error = -ENODEV; + goto out; + } + if (error) goto out_free_consistent; @@ -2681,16 +2702,21 @@ static int mptsas_probe_one_phy(struct device *dev, goto out; } mptsas_set_port(ioc, phy_info, port); - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "sas_port_alloc: port=%p dev=%p port_id=%d\n", - ioc->name, port, dev, port->port_identifier)); + devtprintk(ioc, dev_printk(KERN_DEBUG, &port->dev, + MYIOC_s_FMT "add port %d, sas_addr (0x%llx)\n", + ioc->name, port->port_identifier, + (unsigned long long)phy_info-> + attached.sas_address)); } - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_port_add_phy: phy_id=%d\n", - ioc->name, phy_info->phy_id)); + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "sas_port_add_phy: phy_id=%d\n", + ioc->name, phy_info->phy_id)); sas_port_add_phy(port, phy_info->phy); phy_info->sas_port_add_phy = 0; + devtprintk(ioc, dev_printk(KERN_DEBUG, &phy_info->phy->dev, + MYIOC_s_FMT "add phy %d, phy-obj (0x%p)\n", ioc->name, + phy_info->phy_id, phy_info->phy)); } - if (!mptsas_get_rphy(phy_info) && port && !port->rphy) { struct sas_rphy *rphy; @@ -2703,9 +2729,10 @@ static int mptsas_probe_one_phy(struct device *dev, * the adding/removing of devices that occur * after start of day. */ - if (ioc->sas_discovery_runtime && - mptsas_is_end_device(&phy_info->attached)) - goto out; + if (mptsas_is_end_device(&phy_info->attached) && + phy_info->attached.handle_parent) { + goto out; + } mptsas_parse_device_info(&identify, &phy_info->attached); if (scsi_is_host_device(parent)) { @@ -3420,9 +3447,12 @@ mptsas_probe_devices(MPT_ADAPTER *ioc) } } -/* - * Start of day discovery - */ +/** + * mptsas_scan_sas_topology - + * @ioc: Pointer to MPT_ADAPTER structure + * @sas_address: + * + **/ static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc) { diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 477f6f8251e5..6424dcbd5908 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -80,7 +80,6 @@ MODULE_VERSION(my_VERSION); /* * Other private/forward protos... */ -static struct scsi_cmnd * mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i); static struct scsi_cmnd * mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i); static void mptscsih_set_scsi_lookup(MPT_ADAPTER *ioc, int i, struct scsi_cmnd *scmd); static int SCPNT_TO_LOOKUP_IDX(MPT_ADAPTER *ioc, struct scsi_cmnd *scmd); @@ -236,7 +235,8 @@ nextSGEset: for (ii=0; ii < (numSgeThisFrame-1); ii++) { thisxfer = sg_dma_len(sg); if (thisxfer == 0) { - sg = sg_next(sg); /* Get next SG element from the OS */ + /* Get next SG element from the OS */ + sg = sg_next(sg); sg_done++; continue; } @@ -244,7 +244,8 @@ nextSGEset: v2 = sg_dma_address(sg); ioc->add_sge(psge, sgflags | thisxfer, v2); - sg = sg_next(sg); /* Get next SG element from the OS */ + /* Get next SG element from the OS */ + sg = sg_next(sg); psge += ioc->SGE_size; sgeOffset += ioc->SGE_size; sg_done++; @@ -533,14 +534,15 @@ mptscsih_info_scsiio(MPT_ADAPTER *ioc, struct scsi_cmnd *sc, SCSIIOReply_t * pSc } scsi_print_command(sc); - printk(MYIOC_s_DEBUG_FMT "\tfw_channel = %d, fw_id = %d\n", - ioc->name, pScsiReply->Bus, pScsiReply->TargetID); + printk(MYIOC_s_DEBUG_FMT "\tfw_channel = %d, fw_id = %d, lun = %d\n", + ioc->name, pScsiReply->Bus, pScsiReply->TargetID, sc->device->lun); printk(MYIOC_s_DEBUG_FMT "\trequest_len = %d, underflow = %d, " "resid = %d\n", ioc->name, scsi_bufflen(sc), sc->underflow, scsi_get_resid(sc)); printk(MYIOC_s_DEBUG_FMT "\ttag = %d, transfer_count = %d, " "sc->result = %08X\n", ioc->name, le16_to_cpu(pScsiReply->TaskTag), le32_to_cpu(pScsiReply->TransferCount), sc->result); + printk(MYIOC_s_DEBUG_FMT "\tiocstatus = %s (0x%04x), " "scsi_status = %s (0x%02x), scsi_state = (0x%02x)\n", ioc->name, desc, ioc_status, desc1, pScsiReply->SCSIStatus, @@ -595,16 +597,14 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); req_idx_MR = (mr != NULL) ? le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx) : req_idx; + + /* Special case, where already freed message frame is received from + * Firmware. It happens with Resetting IOC. + * Return immediately. Do not care + */ if ((req_idx != req_idx_MR) || - (mf->u.frame.linkage.arg1 == 0xdeadbeaf)) { - printk(MYIOC_s_ERR_FMT "Received a mf that was already freed\n", - ioc->name); - printk (MYIOC_s_ERR_FMT - "req_idx=%x req_idx_MR=%x mf=%p mr=%p sc=%p\n", - ioc->name, req_idx, req_idx_MR, mf, mr, - mptscsih_get_scsi_lookup(ioc, req_idx_MR)); + (le32_to_cpu(mf->u.frame.linkage.arg1) == 0xdeadbeaf)) return 0; - } sc = mptscsih_getclear_scsi_lookup(ioc, req_idx); if (sc == NULL) { @@ -751,12 +751,16 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) */ case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */ - case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */ /* Linux handles an unsolicited DID_RESET better * than an unsolicited DID_ABORT. */ sc->result = DID_RESET << 16; + case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */ + if (ioc->bus_type == FC) + sc->result = DID_ERROR << 16; + else + sc->result = DID_RESET << 16; break; case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */ @@ -933,9 +937,9 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd) scsi_dma_unmap(sc); sc->result = DID_RESET << 16; sc->host_scribble = NULL; - sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT - "completing cmds: fw_channel %d, fw_id %d, sc=%p," - " mf = %p, idx=%x\n", ioc->name, channel, id, sc, mf, ii); + dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT + "completing cmds: fw_channel %d, fw_id %d, sc=%p, mf = %p, " + "idx=%x\n", ioc->name, channel, id, sc, mf, ii)); sc->scsi_done(sc); } } @@ -994,9 +998,11 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice) scsi_dma_unmap(sc); sc->host_scribble = NULL; sc->result = DID_NO_CONNECT << 16; - sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT "completing cmds: fw_channel %d," - "fw_id %d, sc=%p, mf = %p, idx=%x\n", ioc->name, vdevice->vtarget->channel, - vdevice->vtarget->id, sc, mf, ii); + dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device, + MYIOC_s_FMT "completing cmds: fw_channel %d, " + "fw_id %d, sc=%p, mf = %p, idx=%x\n", ioc->name, + vdevice->vtarget->channel, vdevice->vtarget->id, + sc, mf, ii)); sc->scsi_done(sc); spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); } @@ -1287,7 +1293,6 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) MPT_FRAME_HDR *mf; SCSIIORequest_t *pScsiReq; VirtDevice *vdevice = SCpnt->device->hostdata; - int lun; u32 datalen; u32 scsictl; u32 scsidir; @@ -1298,7 +1303,6 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) hd = shost_priv(SCpnt->device->host); ioc = hd->ioc; - lun = SCpnt->device->lun; SCpnt->scsi_done = done; dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p, done()=%p\n", @@ -1709,8 +1713,8 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) goto out; } - if (hd->timeouts < -1) - hd->timeouts++; + if (ioc->timeouts < -1) + ioc->timeouts++; if (mpt_fwfault_debug) mpt_halt_firmware(ioc); @@ -1734,17 +1738,23 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) ctx2abort, mptscsih_get_tm_timeout(ioc)); if (SCPNT_TO_LOOKUP_IDX(ioc, SCpnt) == scpnt_idx && - SCpnt->serial_number == sn) + SCpnt->serial_number == sn) { + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "task abort: command still in active list! (sc=%p)\n", + ioc->name, SCpnt)); retval = FAILED; + } else { + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "task abort: command cleared from active list! (sc=%p)\n", + ioc->name, SCpnt)); + retval = SUCCESS; + } out: printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n", - ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt); + ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt); - if (retval == 0) - return SUCCESS; - else - return FAILED; + return retval; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -1779,7 +1789,7 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt) vdevice = SCpnt->device->hostdata; if (!vdevice || !vdevice->vtarget) { - retval = 0; + retval = SUCCESS; goto out; } @@ -1837,10 +1847,12 @@ mptscsih_bus_reset(struct scsi_cmnd * SCpnt) ioc->name, SCpnt); scsi_print_command(SCpnt); - if (hd->timeouts < -1) - hd->timeouts++; + if (ioc->timeouts < -1) + ioc->timeouts++; vdevice = SCpnt->device->hostdata; + if (!vdevice || !vdevice->vtarget) + return SUCCESS; retval = mptscsih_IssueTaskMgmt(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, vdevice->vtarget->channel, 0, 0, 0, @@ -1868,8 +1880,9 @@ int mptscsih_host_reset(struct scsi_cmnd *SCpnt) { MPT_SCSI_HOST * hd; - int retval; + int status = SUCCESS; MPT_ADAPTER *ioc; + int retval; /* If we can't locate the host to reset, then we failed. */ if ((hd = shost_priv(SCpnt->device->host)) == NULL){ @@ -1888,19 +1901,16 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt) /* If our attempts to reset the host failed, then return a failed * status. The host will be taken off line by the SCSI mid-layer. */ - if (mpt_HardResetHandler(ioc, CAN_SLEEP) < 0) { - retval = FAILED; - } else { - /* Make sure TM pending is cleared and TM state is set to - * NONE. - */ - retval = 0; - } + retval = mpt_HardResetHandler(ioc, CAN_SLEEP); + if (retval < 0) + status = FAILED; + else + status = SUCCESS; printk(MYIOC_s_INFO_FMT "host reset: %s (sc=%p)\n", ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt); - return retval; + return status; } static int @@ -2244,7 +2254,6 @@ mptscsih_slave_configure(struct scsi_device *sdev) sdev->ppr, sdev->inquiry_len)); vdevice->configured_lun = 1; - mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH); dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Queue depth=%d, tflags=%x\n", @@ -2256,6 +2265,7 @@ mptscsih_slave_configure(struct scsi_device *sdev) ioc->name, vtarget->negoFlags, vtarget->maxOffset, vtarget->minSyncFactor)); + mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH); dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "tagged %d, simple %d, ordered %d\n", ioc->name,sdev->tagged_supported, sdev->simple_tags, @@ -2328,36 +2338,17 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR } } -/** - * mptscsih_get_scsi_lookup - * @ioc: Pointer to MPT_ADAPTER structure - * @i: index into the array - * - * retrieves scmd entry from ScsiLookup[] array list - * - * Returns the scsi_cmd pointer - **/ -static struct scsi_cmnd * -mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) -{ - unsigned long flags; - struct scsi_cmnd *scmd; - - spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); - scmd = ioc->ScsiLookup[i]; - spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); - - return scmd; -} /** * mptscsih_getclear_scsi_lookup - * @ioc: Pointer to MPT_ADAPTER structure - * @i: index into the array * * retrieves and clears scmd entry from ScsiLookup[] array list * + * @ioc: Pointer to MPT_ADAPTER structure + * @i: index into the array + * * Returns the scsi_cmd pointer + * **/ static struct scsi_cmnd * mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i) @@ -2456,57 +2447,16 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) { - MPT_SCSI_HOST *hd; u8 event = le32_to_cpu(pEvReply->Event) & 0xFF; devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n", ioc->name, event)); - if (ioc->sh == NULL || - ((hd = shost_priv(ioc->sh)) == NULL)) - return 1; - - switch (event) { - case MPI_EVENT_UNIT_ATTENTION: /* 03 */ - /* FIXME! */ - break; - case MPI_EVENT_IOC_BUS_RESET: /* 04 */ - case MPI_EVENT_EXT_BUS_RESET: /* 05 */ - if (hd && (ioc->bus_type == SPI) && (hd->soft_resets < -1)) - hd->soft_resets++; - break; - case MPI_EVENT_LOGOUT: /* 09 */ - /* FIXME! */ - break; - - case MPI_EVENT_RESCAN: /* 06 */ - break; - - /* - * CHECKME! Don't think we need to do - * anything for these, but... - */ - case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */ - case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */ - /* - * CHECKME! Falling thru... - */ - break; - - case MPI_EVENT_INTEGRATED_RAID: /* 0B */ - break; - - case MPI_EVENT_NONE: /* 00 */ - case MPI_EVENT_LOG_DATA: /* 01 */ - case MPI_EVENT_STATE_CHANGE: /* 02 */ - case MPI_EVENT_EVENT_CHANGE: /* 0A */ - default: - dprintk(ioc, printk(MYIOC_s_DEBUG_FMT - ": Ignoring event (=%02Xh)\n", - ioc->name, event)); - break; - } + if ((event == MPI_EVENT_IOC_BUS_RESET || + event == MPI_EVENT_EXT_BUS_RESET) && + (ioc->bus_type == SPI) && (ioc->soft_resets < -1)) + ioc->soft_resets++; return 1; /* currently means nothing really */ } diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h index 91e9e9fcd0e4..572739565f6a 100644 --- a/drivers/message/fusion/mptscsih.h +++ b/drivers/message/fusion/mptscsih.h @@ -90,6 +90,7 @@ #endif + typedef struct _internal_cmd { char *data; /* data pointer */ dma_addr_t data_dma; /* data dma address */ -- cgit v1.2.3 From 71278192a887d7da3e768809c6fe9979d172ff23 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Fri, 29 May 2009 16:53:14 +0530 Subject: [SCSI] mpt fusion: Put IOC into ready state if it not already in ready state Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptbase.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'drivers') diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 8f04d37fb359..9f6b315624aa 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -2667,6 +2667,22 @@ mpt_adapter_disable(MPT_ADAPTER *ioc) } } + /* + * Put the controller into ready state (if its not already) + */ + if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY) { + if (!SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, + CAN_SLEEP)) { + if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY) + printk(MYIOC_s_ERR_FMT "%s: IOC msg unit " + "reset failed to put ioc in ready state!\n", + ioc->name, __func__); + } else + printk(MYIOC_s_ERR_FMT "%s: IOC msg unit reset " + "failed!\n", ioc->name, __func__); + } + + /* Disable adapter interrupts! */ synchronize_irq(ioc->pcidev->irq); CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF); -- cgit v1.2.3 From a7938b0bb3b458fe0723608be3db6c4ed8d79a8c Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Fri, 29 May 2009 16:53:56 +0530 Subject: [SCSI] mpt fusion: RAID device handling and Dual port Raid support is added 1. Handle integrated Raid device(Add/Delete) and error condition and check related to Raid device. is_logical_volume will represent logical volume device. 2. Raid device dual port support is added. Main functions to support this feature are mpt_raid_phys_disk_get_num_paths and mpt_raid_phys_disk_pg1. Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptbase.c | 167 +++++++++++++++++++ drivers/message/fusion/mptbase.h | 4 + drivers/message/fusion/mptsas.c | 332 +++++++++++++++++++++++++++++++------- drivers/message/fusion/mptsas.h | 1 + drivers/message/fusion/mptscsih.c | 85 +++++++++- 5 files changed, 527 insertions(+), 62 deletions(-) (limited to 'drivers') diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 9f6b315624aa..44b931504457 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -5762,6 +5762,161 @@ mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, return rc; } +/** + * mpt_raid_phys_disk_get_num_paths - returns number paths associated to this phys_num + * @ioc: Pointer to a Adapter Structure + * @phys_disk_num: io unit unique phys disk num generated by the ioc + * + * Return: + * returns number paths + **/ +int +mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, u8 phys_disk_num) +{ + CONFIGPARMS cfg; + ConfigPageHeader_t hdr; + dma_addr_t dma_handle; + pRaidPhysDiskPage1_t buffer = NULL; + int rc; + + memset(&cfg, 0 , sizeof(CONFIGPARMS)); + memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); + + hdr.PageVersion = MPI_RAIDPHYSDISKPAGE1_PAGEVERSION; + hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK; + hdr.PageNumber = 1; + cfg.cfghdr.hdr = &hdr; + cfg.physAddr = -1; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + + if (mpt_config(ioc, &cfg) != 0) { + rc = 0; + goto out; + } + + if (!hdr.PageLength) { + rc = 0; + goto out; + } + + buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, + &dma_handle); + + if (!buffer) { + rc = 0; + goto out; + } + + cfg.physAddr = dma_handle; + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + cfg.pageAddr = phys_disk_num; + + if (mpt_config(ioc, &cfg) != 0) { + rc = 0; + goto out; + } + + rc = buffer->NumPhysDiskPaths; + out: + + if (buffer) + pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer, + dma_handle); + + return rc; +} +EXPORT_SYMBOL(mpt_raid_phys_disk_get_num_paths); + +/** + * mpt_raid_phys_disk_pg1 - returns phys disk page 1 + * @ioc: Pointer to a Adapter Structure + * @phys_disk_num: io unit unique phys disk num generated by the ioc + * @phys_disk: requested payload data returned + * + * Return: + * 0 on success + * -EFAULT if read of config page header fails or data pointer not NULL + * -ENOMEM if pci_alloc failed + **/ +int +mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num, + RaidPhysDiskPage1_t *phys_disk) +{ + CONFIGPARMS cfg; + ConfigPageHeader_t hdr; + dma_addr_t dma_handle; + pRaidPhysDiskPage1_t buffer = NULL; + int rc; + int i; + __le64 sas_address; + + memset(&cfg, 0 , sizeof(CONFIGPARMS)); + memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); + rc = 0; + + hdr.PageVersion = MPI_RAIDPHYSDISKPAGE1_PAGEVERSION; + hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK; + hdr.PageNumber = 1; + cfg.cfghdr.hdr = &hdr; + cfg.physAddr = -1; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + + if (mpt_config(ioc, &cfg) != 0) { + rc = -EFAULT; + goto out; + } + + if (!hdr.PageLength) { + rc = -EFAULT; + goto out; + } + + buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, + &dma_handle); + + if (!buffer) { + rc = -ENOMEM; + goto out; + } + + cfg.physAddr = dma_handle; + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + cfg.pageAddr = phys_disk_num; + + if (mpt_config(ioc, &cfg) != 0) { + rc = -EFAULT; + goto out; + } + + phys_disk->NumPhysDiskPaths = buffer->NumPhysDiskPaths; + phys_disk->PhysDiskNum = phys_disk_num; + for (i = 0; i < phys_disk->NumPhysDiskPaths; i++) { + phys_disk->Path[i].PhysDiskID = buffer->Path[i].PhysDiskID; + phys_disk->Path[i].PhysDiskBus = buffer->Path[i].PhysDiskBus; + phys_disk->Path[i].OwnerIdentifier = + buffer->Path[i].OwnerIdentifier; + phys_disk->Path[i].Flags = le16_to_cpu(buffer->Path[i].Flags); + memcpy(&sas_address, &buffer->Path[i].WWID, sizeof(__le64)); + sas_address = le64_to_cpu(sas_address); + memcpy(&phys_disk->Path[i].WWID, &sas_address, sizeof(__le64)); + memcpy(&sas_address, + &buffer->Path[i].OwnerWWID, sizeof(__le64)); + sas_address = le64_to_cpu(sas_address); + memcpy(&phys_disk->Path[i].OwnerWWID, + &sas_address, sizeof(__le64)); + } + + out: + + if (buffer) + pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer, + dma_handle); + + return rc; +} +EXPORT_SYMBOL(mpt_raid_phys_disk_pg1); + + /** * mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes * @ioc: Pointer to a Adapter Strucutre @@ -7170,6 +7325,18 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply) "id=%d channel=%d phys_num=%d", id, channel, phys_num); break; + case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED: + snprintf(evStr, EVENT_DESCR_STR_SZ, + "IR2: Dual Port Added: " + "id=%d channel=%d phys_num=%d", + id, channel, phys_num); + break; + case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED: + snprintf(evStr, EVENT_DESCR_STR_SZ, + "IR2: Dual Port Removed: " + "id=%d channel=%d phys_num=%d", + id, channel, phys_num); + break; default: ds = "IR2"; break; diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index 91499d1275c4..4f3d4c34bcd8 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -958,6 +958,10 @@ extern void mpt_free_fw_memory(MPT_ADAPTER *ioc); extern int mpt_findImVolumes(MPT_ADAPTER *ioc); extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode); extern int mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk); +extern int mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num, + pRaidPhysDiskPage1_t phys_disk); +extern int mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, + u8 phys_disk_num); extern int mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc); extern void mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc); extern void mpt_halt_firmware(MPT_ADAPTER *ioc); diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index da22141152d7..72158237f5e8 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -121,6 +121,7 @@ static void mptsas_expander_delete(MPT_ADAPTER *ioc, static void mptsas_send_expander_event(struct fw_event_work *fw_event); static void mptsas_not_responding_devices(MPT_ADAPTER *ioc); static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc); +static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id); static void mptsas_print_phy_data(MPT_ADAPTER *ioc, MPI_SAS_IO_UNIT0_PHY_DATA *phy_data) @@ -542,9 +543,10 @@ mptsas_add_device_component(MPT_ADAPTER *ioc, u8 channel, u8 id, mutex_lock(&ioc->sas_device_info_mutex); list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list, list) { - if ((sas_info->sas_address == sas_address || - (sas_info->fw.channel == channel && - sas_info->fw.id == id))) { + if (!sas_info->is_logical_volume && + (sas_info->sas_address == sas_address || + (sas_info->fw.channel == channel && + sas_info->fw.id == id))) { list_del(&sas_info->list); kfree(sas_info); } @@ -616,6 +618,100 @@ mptsas_add_device_component_by_fw(MPT_ADAPTER *ioc, u8 channel, u8 id) sas_device.slot, enclosure_info.enclosure_logical_id); } +/** + * mptsas_add_device_component_starget_ir - Handle Integrated RAID, adding + * each individual device to list + * @ioc: Pointer to MPT_ADAPTER structure + * @channel: fw mapped id's + * @id: + * + **/ +static void +mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc, + struct scsi_target *starget) +{ + CONFIGPARMS cfg; + ConfigPageHeader_t hdr; + dma_addr_t dma_handle; + pRaidVolumePage0_t buffer = NULL; + int i; + RaidPhysDiskPage0_t phys_disk; + struct mptsas_device_info *sas_info, *next; + + memset(&cfg, 0 , sizeof(CONFIGPARMS)); + memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); + hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_VOLUME; + /* assumption that all volumes on channel = 0 */ + cfg.pageAddr = starget->id; + cfg.cfghdr.hdr = &hdr; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.timeout = 10; + + if (mpt_config(ioc, &cfg) != 0) + goto out; + + if (!hdr.PageLength) + goto out; + + buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, + &dma_handle); + + if (!buffer) + goto out; + + cfg.physAddr = dma_handle; + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + + if (mpt_config(ioc, &cfg) != 0) + goto out; + + if (!buffer->NumPhysDisks) + goto out; + + /* + * Adding entry for hidden components + */ + for (i = 0; i < buffer->NumPhysDisks; i++) { + + if (mpt_raid_phys_disk_pg0(ioc, + buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0) + continue; + + mptsas_add_device_component_by_fw(ioc, phys_disk.PhysDiskBus, + phys_disk.PhysDiskID); + + } + + /* + * Delete all matching devices out of the list + */ + mutex_lock(&ioc->sas_device_info_mutex); + list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list, + list) { + if (sas_info->is_logical_volume && sas_info->fw.id == + starget->id) { + list_del(&sas_info->list); + kfree(sas_info); + } + } + + sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL); + if (sas_info) { + sas_info->fw.id = starget->id; + sas_info->os.id = starget->id; + sas_info->os.channel = starget->channel; + sas_info->is_logical_volume = 1; + INIT_LIST_HEAD(&sas_info->list); + list_add_tail(&sas_info->list, &ioc->sas_device_info_list); + } + mutex_unlock(&ioc->sas_device_info_mutex); + + out: + if (buffer) + pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer, + dma_handle); +} + /** * mptsas_add_device_component_starget - * @ioc: Pointer to MPT_ADAPTER structure @@ -817,6 +913,10 @@ mptsas_find_vtarget(MPT_ADAPTER *ioc, u8 channel, u8 id) if ((vdevice == NULL) || (vdevice->vtarget == NULL)) continue; + if ((vdevice->vtarget->tflags & + MPT_TARGET_FLAGS_RAID_COMPONENT || + vdevice->vtarget->raidVolume)) + continue; if (vdevice->vtarget->id == id && vdevice->vtarget->channel == channel) vtarget = vdevice->vtarget; @@ -1487,9 +1587,21 @@ mptsas_slave_configure(struct scsi_device *sdev) struct Scsi_Host *host = sdev->host; MPT_SCSI_HOST *hd = shost_priv(host); MPT_ADAPTER *ioc = hd->ioc; + VirtDevice *vdevice = sdev->hostdata; - if (sdev->channel == MPTSAS_RAID_CHANNEL) + if (vdevice->vtarget->deleted) { + sdev_printk(KERN_INFO, sdev, "clearing deleted flag\n"); + vdevice->vtarget->deleted = 0; + } + + /* + * RAID volumes placed beyond the last expected port. + * Ignore sending sas mode pages in that case.. + */ + if (sdev->channel == MPTSAS_RAID_CHANNEL) { + mptsas_add_device_component_starget_ir(ioc, scsi_target(sdev)); goto out; + } sas_read_port_mode_page(sdev); @@ -1525,9 +1637,18 @@ mptsas_target_alloc(struct scsi_target *starget) * RAID volumes placed beyond the last expected port. */ if (starget->channel == MPTSAS_RAID_CHANNEL) { - for (i=0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) - if (id == ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID) - channel = ioc->raid_data.pIocPg2->RaidVolume[i].VolumeBus; + if (!ioc->raid_data.pIocPg2) { + kfree(vtarget); + return -ENXIO; + } + for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) { + if (id == ioc->raid_data.pIocPg2-> + RaidVolume[i].VolumeID) { + channel = ioc->raid_data.pIocPg2-> + RaidVolume[i].VolumeBus; + } + } + vtarget->raidVolume = 1; goto out; } @@ -3277,59 +3398,66 @@ mptsas_not_responding_devices(MPT_ADAPTER *ioc) mutex_lock(&ioc->sas_device_info_mutex); redo_device_scan: list_for_each_entry(sas_info, &ioc->sas_device_info_list, list) { - sas_device.handle = 0; - retry_count = 0; + if (!sas_info->is_logical_volume) { + sas_device.handle = 0; + retry_count = 0; retry_page: - retval = mptsas_sas_device_pg0(ioc, &sas_device, + retval = mptsas_sas_device_pg0(ioc, &sas_device, (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID << MPI_SAS_DEVICE_PGAD_FORM_SHIFT), (sas_info->fw.channel << 8) + sas_info->fw.id); - if (sas_device.handle) - continue; - if (retval == -EBUSY) { - spin_lock_irqsave(&ioc->taskmgmt_lock, flags); - if (ioc->ioc_reset_in_progress) { - dfailprintk(ioc, - printk(MYIOC_s_DEBUG_FMT - "%s: exiting due to reset\n", - ioc->name, __func__)); - spin_unlock_irqrestore - (&ioc->taskmgmt_lock, flags); - mutex_unlock(&ioc->sas_device_info_mutex); - return; + if (sas_device.handle) + continue; + if (retval == -EBUSY) { + spin_lock_irqsave(&ioc->taskmgmt_lock, flags); + if (ioc->ioc_reset_in_progress) { + dfailprintk(ioc, + printk(MYIOC_s_DEBUG_FMT + "%s: exiting due to reset\n", + ioc->name, __func__)); + spin_unlock_irqrestore + (&ioc->taskmgmt_lock, flags); + mutex_unlock(&ioc-> + sas_device_info_mutex); + return; + } + spin_unlock_irqrestore(&ioc->taskmgmt_lock, + flags); } - spin_unlock_irqrestore(&ioc->taskmgmt_lock, - flags); - } - if (retval && (retval != -ENODEV)) { - if (retry_count < 10) { - retry_count++; - goto retry_page; - } else { - devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s: Config page retry exceeded retry " - "count deleting device 0x%llx\n", - ioc->name, __func__, - sas_info->sas_address)); + if (retval && (retval != -ENODEV)) { + if (retry_count < 10) { + retry_count++; + goto retry_page; + } else { + devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: Config page retry exceeded retry " + "count deleting device 0x%llx\n", + ioc->name, __func__, + sas_info->sas_address)); + } } - } - /* delete device */ - vtarget = mptsas_find_vtarget(ioc, + /* delete device */ + vtarget = mptsas_find_vtarget(ioc, sas_info->fw.channel, sas_info->fw.id); - if (vtarget) - vtarget->deleted = 1; - phy_info = mptsas_find_phyinfo_by_sas_address(ioc, - sas_info->sas_address); - if (phy_info) { - mptsas_del_end_device(ioc, phy_info); - goto redo_device_scan; - } + + if (vtarget) + vtarget->deleted = 1; + + phy_info = mptsas_find_phyinfo_by_sas_address(ioc, + sas_info->sas_address); + + if (phy_info) { + mptsas_del_end_device(ioc, phy_info); + goto redo_device_scan; + } + } else + mptsas_volume_delete(ioc, sas_info->fw.id); } - mutex_unlock(&ioc->sas_device_info_mutex); + mutex_lock(&ioc->sas_device_info_mutex); /* expanders */ mutex_lock(&ioc->sas_topology_mutex); @@ -3508,28 +3636,74 @@ mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address) return phy_info; } - +/** + * mptsas_find_phyinfo_by_phys_disk_num - + * @ioc: Pointer to MPT_ADAPTER structure + * @phys_disk_num: + * @channel: + * @id: + * + **/ static struct mptsas_phyinfo * -mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 channel, u8 id) +mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 phys_disk_num, + u8 channel, u8 id) { - struct mptsas_portinfo *port_info; struct mptsas_phyinfo *phy_info = NULL; + struct mptsas_portinfo *port_info; + RaidPhysDiskPage1_t *phys_disk = NULL; + int num_paths; + u64 sas_address = 0; int i; + phy_info = NULL; + if (!ioc->raid_data.pIocPg3) + return NULL; + /* dual port support */ + num_paths = mpt_raid_phys_disk_get_num_paths(ioc, phys_disk_num); + if (!num_paths) + goto out; + phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) + + (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL); + if (!phys_disk) + goto out; + mpt_raid_phys_disk_pg1(ioc, phys_disk_num, phys_disk); + for (i = 0; i < num_paths; i++) { + if ((phys_disk->Path[i].Flags & 1) != 0) + /* entry no longer valid */ + continue; + if ((id == phys_disk->Path[i].PhysDiskID) && + (channel == phys_disk->Path[i].PhysDiskBus)) { + memcpy(&sas_address, &phys_disk->Path[i].WWID, + sizeof(u64)); + phy_info = mptsas_find_phyinfo_by_sas_address(ioc, + sas_address); + goto out; + } + } + + out: + kfree(phys_disk); + if (phy_info) + return phy_info; + + /* + * Extra code to handle RAID0 case, where the sas_address is not updated + * in phys_disk_page_1 when hotswapped + */ mutex_lock(&ioc->sas_topology_mutex); list_for_each_entry(port_info, &ioc->sas_topology, list) { - for (i = 0; i < port_info->num_phys; i++) { + for (i = 0; i < port_info->num_phys && !phy_info; i++) { if (!mptsas_is_end_device( &port_info->phy_info[i].attached)) continue; if (port_info->phy_info[i].attached.phys_disk_num == ~0) continue; - if (port_info->phy_info[i].attached.phys_disk_num != id) - continue; - if (port_info->phy_info[i].attached.channel != channel) - continue; - phy_info = &port_info->phy_info[i]; - break; + if ((port_info->phy_info[i].attached.phys_disk_num == + phys_disk_num) && + (port_info->phy_info[i].attached.id == id) && + (port_info->phy_info[i].attached.channel == + channel)) + phy_info = &port_info->phy_info[i]; } } mutex_unlock(&ioc->sas_topology_mutex); @@ -3683,8 +3857,9 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event, mpt_findImVolumes(ioc); phy_info = mptsas_find_phyinfo_by_phys_disk_num( - ioc, hot_plug_info->channel, - hot_plug_info->phys_disk_num); + ioc, hot_plug_info->phys_disk_num, + hot_plug_info->channel, + hot_plug_info->id); mptsas_del_end_device(ioc, phy_info); break; @@ -4032,6 +4207,7 @@ mptsas_send_ir2_event(struct fw_event_work *fw_event) struct mptsas_hotplug_event hot_plug_info; MPI_EVENT_DATA_IR2 *ir2_data; u8 reasonCode; + RaidPhysDiskPage0_t phys_disk; ioc = fw_event->ioc; ir2_data = (MPI_EVENT_DATA_IR2 *)fw_event->event_data; @@ -4047,6 +4223,17 @@ mptsas_send_ir2_event(struct fw_event_work *fw_event) case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED: hot_plug_info.event_type = MPTSAS_ADD_INACTIVE_VOLUME; break; + case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED: + hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum; + hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK; + break; + case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED: + hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum; + mpt_raid_phys_disk_pg0(ioc, + ir2_data->PhysDiskNum, &phys_disk); + hot_plug_info.id = phys_disk.PhysDiskID; + hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK; + break; default: mptsas_free_fw_event(ioc, fw_event); return; @@ -4132,6 +4319,31 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply) return 0; } +/* Delete a volume when no longer listed in ioc pg2 + */ +static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id) +{ + struct scsi_device *sdev; + int i; + + sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, id, 0); + if (!sdev) + return; + if (!ioc->raid_data.pIocPg2) + goto out; + if (!ioc->raid_data.pIocPg2->NumActiveVolumes) + goto out; + for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) + if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID == id) + goto release_sdev; + out: + printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, " + "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL, id); + scsi_remove_device(sdev); + release_sdev: + scsi_device_put(sdev); +} + static int mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id) { diff --git a/drivers/message/fusion/mptsas.h b/drivers/message/fusion/mptsas.h index 9e0885a86d23..57258b60369e 100644 --- a/drivers/message/fusion/mptsas.h +++ b/drivers/message/fusion/mptsas.h @@ -82,6 +82,7 @@ struct mptsas_device_info { u32 device_info; /* specific bits for devices */ u16 slot; /* enclosure slot id */ u64 enclosure_logical_id; /*enclosure address */ + u8 is_logical_volume; /* is this logical volume */ }; struct mptsas_hotplug_event { diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 6424dcbd5908..cf1aba18a09f 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -2087,8 +2087,10 @@ int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id) { struct inactive_raid_component_info *component_info; - int i; + int i, j; + RaidPhysDiskPage1_t *phys_disk; int rc = 0; + int num_paths; if (!ioc->raid_data.pIocPg3) goto out; @@ -2100,6 +2102,45 @@ mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id) } } + if (ioc->bus_type != SAS) + goto out; + + /* + * Check if dual path + */ + for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) { + num_paths = mpt_raid_phys_disk_get_num_paths(ioc, + ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum); + if (num_paths < 2) + continue; + phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) + + (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL); + if (!phys_disk) + continue; + if ((mpt_raid_phys_disk_pg1(ioc, + ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum, + phys_disk))) { + kfree(phys_disk); + continue; + } + for (j = 0; j < num_paths; j++) { + if ((phys_disk->Path[j].Flags & + MPI_RAID_PHYSDISK1_FLAG_INVALID)) + continue; + if ((phys_disk->Path[j].Flags & + MPI_RAID_PHYSDISK1_FLAG_BROKEN)) + continue; + if ((id == phys_disk->Path[j].PhysDiskID) && + (channel == phys_disk->Path[j].PhysDiskBus)) { + rc = 1; + kfree(phys_disk); + goto out; + } + } + kfree(phys_disk); + } + + /* * Check inactive list for matching phys disks */ @@ -2124,8 +2165,10 @@ u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id) { struct inactive_raid_component_info *component_info; - int i; + int i, j; + RaidPhysDiskPage1_t *phys_disk; int rc = -ENXIO; + int num_paths; if (!ioc->raid_data.pIocPg3) goto out; @@ -2137,6 +2180,44 @@ mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id) } } + if (ioc->bus_type != SAS) + goto out; + + /* + * Check if dual path + */ + for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) { + num_paths = mpt_raid_phys_disk_get_num_paths(ioc, + ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum); + if (num_paths < 2) + continue; + phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) + + (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL); + if (!phys_disk) + continue; + if ((mpt_raid_phys_disk_pg1(ioc, + ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum, + phys_disk))) { + kfree(phys_disk); + continue; + } + for (j = 0; j < num_paths; j++) { + if ((phys_disk->Path[j].Flags & + MPI_RAID_PHYSDISK1_FLAG_INVALID)) + continue; + if ((phys_disk->Path[j].Flags & + MPI_RAID_PHYSDISK1_FLAG_BROKEN)) + continue; + if ((id == phys_disk->Path[j].PhysDiskID) && + (channel == phys_disk->Path[j].PhysDiskBus)) { + rc = phys_disk->PhysDiskNum; + kfree(phys_disk); + goto out; + } + } + kfree(phys_disk); + } + /* * Check inactive list for matching phys disks */ -- cgit v1.2.3 From 57e985136bfafdfcd72c4c7d91115955d225677e Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Fri, 29 May 2009 16:55:09 +0530 Subject: [SCSI] mpt fusion: Queue full event handling FW will report Queue full event to Driver and driver will handle this queue full event to SCSI Mid layer. Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptsas.c | 133 ++++++++++++++++++++++++++++++++++++++++ drivers/message/fusion/mptsas.h | 6 ++ 2 files changed, 139 insertions(+) (limited to 'drivers') diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 72158237f5e8..10a12d846e85 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -121,6 +121,7 @@ static void mptsas_expander_delete(MPT_ADAPTER *ioc, static void mptsas_send_expander_event(struct fw_event_work *fw_event); static void mptsas_not_responding_devices(MPT_ADAPTER *ioc); static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc); +static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event); static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id); static void mptsas_print_phy_data(MPT_ADAPTER *ioc, @@ -680,6 +681,18 @@ mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc, mptsas_add_device_component_by_fw(ioc, phys_disk.PhysDiskBus, phys_disk.PhysDiskID); + mutex_lock(&ioc->sas_device_info_mutex); + list_for_each_entry(sas_info, &ioc->sas_device_info_list, + list) { + if (!sas_info->is_logical_volume && + (sas_info->fw.channel == phys_disk.PhysDiskBus && + sas_info->fw.id == phys_disk.PhysDiskID)) { + sas_info->is_hidden_raid_component = 1; + sas_info->volume_id = starget->id; + } + } + mutex_unlock(&ioc->sas_device_info_mutex); + } /* @@ -746,6 +759,29 @@ mptsas_add_device_component_starget(MPT_ADAPTER *ioc, phy_info->attached.slot, enclosure_info.enclosure_logical_id); } +/** + * mptsas_del_device_component_by_os - Once a device has been removed, we + * mark the entry in the list as being cached + * @ioc: Pointer to MPT_ADAPTER structure + * @channel: os mapped id's + * @id: + * + **/ +static void +mptsas_del_device_component_by_os(MPT_ADAPTER *ioc, u8 channel, u8 id) +{ + struct mptsas_device_info *sas_info, *next; + + /* + * Set is_cached flag + */ + list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list, + list) { + if (sas_info->os.channel == channel && sas_info->os.id == id) + sas_info->is_cached = 1; + } +} + /** * mptsas_del_device_components - Cleaning the list * @ioc: Pointer to MPT_ADAPTER structure @@ -1576,6 +1612,9 @@ mptsas_firmware_event_work(struct work_struct *work) case MPI_EVENT_SAS_PHY_LINK_STATUS: mptsas_send_link_status_event(fw_event); break; + case MPI_EVENT_QUEUE_FULL: + mptsas_handle_queue_full_event(fw_event); + break; } } @@ -1705,6 +1744,9 @@ mptsas_target_destroy(struct scsi_target *starget) vtarget = starget->hostdata; + mptsas_del_device_component_by_os(ioc, starget->channel, + starget->id); + if (starget->channel == MPTSAS_RAID_CHANNEL) goto out; @@ -3398,6 +3440,8 @@ mptsas_not_responding_devices(MPT_ADAPTER *ioc) mutex_lock(&ioc->sas_device_info_mutex); redo_device_scan: list_for_each_entry(sas_info, &ioc->sas_device_info_list, list) { + if (sas_info->is_cached) + continue; if (!sas_info->is_logical_volume) { sas_device.handle = 0; retry_count = 0; @@ -3612,6 +3656,95 @@ mptsas_scan_sas_topology(MPT_ADAPTER *ioc) } } + +static void +mptsas_handle_queue_full_event(struct fw_event_work *fw_event) +{ + MPT_ADAPTER *ioc; + EventDataQueueFull_t *qfull_data; + struct mptsas_device_info *sas_info; + struct scsi_device *sdev; + int depth; + int id = -1; + int channel = -1; + int fw_id, fw_channel; + u16 current_depth; + + + ioc = fw_event->ioc; + qfull_data = (EventDataQueueFull_t *)fw_event->event_data; + fw_id = qfull_data->TargetID; + fw_channel = qfull_data->Bus; + current_depth = le16_to_cpu(qfull_data->CurrentDepth); + + /* if hidden raid component, look for the volume id */ + mutex_lock(&ioc->sas_device_info_mutex); + if (mptscsih_is_phys_disk(ioc, fw_channel, fw_id)) { + list_for_each_entry(sas_info, &ioc->sas_device_info_list, + list) { + if (sas_info->is_cached || + sas_info->is_logical_volume) + continue; + if (sas_info->is_hidden_raid_component && + (sas_info->fw.channel == fw_channel && + sas_info->fw.id == fw_id)) { + id = sas_info->volume_id; + channel = MPTSAS_RAID_CHANNEL; + goto out; + } + } + } else { + list_for_each_entry(sas_info, &ioc->sas_device_info_list, + list) { + if (sas_info->is_cached || + sas_info->is_hidden_raid_component || + sas_info->is_logical_volume) + continue; + if (sas_info->fw.channel == fw_channel && + sas_info->fw.id == fw_id) { + id = sas_info->os.id; + channel = sas_info->os.channel; + goto out; + } + } + + } + + out: + mutex_unlock(&ioc->sas_device_info_mutex); + + if (id != -1) { + shost_for_each_device(sdev, ioc->sh) { + if (sdev->id == id && sdev->channel == channel) { + if (current_depth > sdev->queue_depth) { + sdev_printk(KERN_INFO, sdev, + "strange observation, the queue " + "depth is (%d) meanwhile fw queue " + "depth (%d)\n", sdev->queue_depth, + current_depth); + continue; + } + depth = scsi_track_queue_full(sdev, + current_depth - 1); + if (depth > 0) + sdev_printk(KERN_INFO, sdev, + "Queue depth reduced to (%d)\n", + depth); + else if (depth < 0) + sdev_printk(KERN_INFO, sdev, + "Tagged Command Queueing is being " + "disabled\n"); + else if (depth == 0) + sdev_printk(KERN_INFO, sdev, + "Queue depth not changed yet\n"); + } + } + } + + mptsas_free_fw_event(ioc, fw_event); +} + + static struct mptsas_phyinfo * mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address) { diff --git a/drivers/message/fusion/mptsas.h b/drivers/message/fusion/mptsas.h index 57258b60369e..953c2bfcf6aa 100644 --- a/drivers/message/fusion/mptsas.h +++ b/drivers/message/fusion/mptsas.h @@ -83,6 +83,12 @@ struct mptsas_device_info { u16 slot; /* enclosure slot id */ u64 enclosure_logical_id; /*enclosure address */ u8 is_logical_volume; /* is this logical volume */ + /* this belongs to volume */ + u8 is_hidden_raid_component; + /* this valid when is_hidden_raid_component set */ + u8 volume_id; + /* cached data for a removed device */ + u8 is_cached; }; struct mptsas_hotplug_event { -- cgit v1.2.3 From db7051b2984d2c7d44b6178ad4c523500dff7f7c Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Fri, 29 May 2009 16:56:59 +0530 Subject: [SCSI] mpt fusion: Added support for Broadcast primitives Event handling Firmware is able to handle Broadcast primitives, but upstream driver does not have support for broadcast primitive handling. Now this patch is mainly to support broadcast primitives. Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptbase.h | 1 + drivers/message/fusion/mptsas.c | 207 ++++++++++++++++++++++++++++++++++++++ drivers/message/fusion/mptscsih.c | 21 ++++ drivers/message/fusion/mptscsih.h | 1 + 4 files changed, 230 insertions(+) (limited to 'drivers') diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index 4f3d4c34bcd8..1c8514dc31ca 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -758,6 +758,7 @@ typedef struct _MPT_ADAPTER struct scsi_cmnd **ScsiLookup; spinlock_t scsi_lookup_lock; u64 dma_mask; + u32 broadcast_aen_busy; char reset_work_q_name[MPT_KOBJ_NAME_LEN]; struct workqueue_struct *reset_work_q; struct delayed_work fault_reset_work; diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 10a12d846e85..88a1a6d3bc04 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -121,6 +121,7 @@ static void mptsas_expander_delete(MPT_ADAPTER *ioc, static void mptsas_send_expander_event(struct fw_event_work *fw_event); static void mptsas_not_responding_devices(MPT_ADAPTER *ioc); static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc); +static void mptsas_broadcast_primative_work(struct fw_event_work *fw_event); static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event); static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id); @@ -287,6 +288,21 @@ mptsas_add_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event, spin_unlock_irqrestore(&ioc->fw_event_lock, flags); } +/* requeue a sas firmware event */ +static void +mptsas_requeue_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event, + unsigned long delay) +{ + unsigned long flags; + spin_lock_irqsave(&ioc->fw_event_lock, flags); + devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: reschedule task " + "(fw_event=0x%p)\n", ioc->name, __func__, fw_event)); + fw_event->retries++; + queue_delayed_work(ioc->fw_event_q, &fw_event->work, + msecs_to_jiffies(delay)); + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + /* free memory assoicated to a sas firmware event */ static void mptsas_free_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event) @@ -1606,6 +1622,9 @@ mptsas_firmware_event_work(struct work_struct *work) MPI_SAS_OP_CLEAR_NOT_PRESENT); mptsas_free_fw_event(ioc, fw_event); break; + case MPI_EVENT_SAS_BROADCAST_PRIMITIVE: + mptsas_broadcast_primative_work(fw_event); + break; case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE: mptsas_send_expander_event(fw_event); break; @@ -4325,6 +4344,182 @@ mptsas_send_raid_event(struct fw_event_work *fw_event) mptsas_free_fw_event(ioc, fw_event); } +/** + * mptsas_issue_tm - send mptsas internal tm request + * @ioc: Pointer to MPT_ADAPTER structure + * @type: Task Management type + * @channel: channel number for task management + * @id: Logical Target ID for reset (if appropriate) + * @lun: Logical unit for reset (if appropriate) + * @task_context: Context for the task to be aborted + * @timeout: timeout for task management control + * + * return 0 on success and -1 on failure: + * + */ +static int +mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun, + int task_context, ulong timeout, u8 *issue_reset) +{ + MPT_FRAME_HDR *mf; + SCSITaskMgmt_t *pScsiTm; + int retval; + unsigned long timeleft; + + *issue_reset = 0; + mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc); + if (mf == NULL) { + retval = -1; /* return failure */ + dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "TaskMgmt request: no " + "msg frames!!\n", ioc->name)); + goto out; + } + + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request: mr = %p, " + "task_type = 0x%02X,\n\t timeout = %ld, fw_channel = %d, " + "fw_id = %d, lun = %lld,\n\t task_context = 0x%x\n", ioc->name, mf, + type, timeout, channel, id, (unsigned long long)lun, + task_context)); + + pScsiTm = (SCSITaskMgmt_t *) mf; + memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t)); + pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; + pScsiTm->TaskType = type; + pScsiTm->MsgFlags = 0; + pScsiTm->TargetID = id; + pScsiTm->Bus = channel; + pScsiTm->ChainOffset = 0; + pScsiTm->Reserved = 0; + pScsiTm->Reserved1 = 0; + pScsiTm->TaskMsgContext = task_context; + int_to_scsilun(lun, (struct scsi_lun *)pScsiTm->LUN); + + INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status) + CLEAR_MGMT_STATUS(ioc->internal_cmds.status) + retval = 0; + mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf); + + /* Now wait for the command to complete */ + timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, + timeout*HZ); + if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { + retval = -1; /* return failure */ + dtmprintk(ioc, printk(MYIOC_s_ERR_FMT + "TaskMgmt request: TIMED OUT!(mr=%p)\n", ioc->name, mf)); + mpt_free_msg_frame(ioc, mf); + if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) + goto out; + *issue_reset = 1; + goto out; + } + + if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { + retval = -1; /* return failure */ + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "TaskMgmt request: failed with no reply\n", ioc->name)); + goto out; + } + + out: + CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status) + return retval; +} + +/** + * mptsas_broadcast_primative_work - Handle broadcast primitives + * @work: work queue payload containing info describing the event + * + * this will be handled in workqueue context. + */ +static void +mptsas_broadcast_primative_work(struct fw_event_work *fw_event) +{ + MPT_ADAPTER *ioc = fw_event->ioc; + MPT_FRAME_HDR *mf; + VirtDevice *vdevice; + int ii; + struct scsi_cmnd *sc; + SCSITaskMgmtReply_t *pScsiTmReply; + u8 issue_reset; + int task_context; + u8 channel, id; + int lun; + u32 termination_count; + u32 query_count; + + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s - enter\n", ioc->name, __func__)); + + mutex_lock(&ioc->taskmgmt_cmds.mutex); + if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) { + mutex_unlock(&ioc->taskmgmt_cmds.mutex); + mptsas_requeue_fw_event(ioc, fw_event, 1000); + return; + } + + issue_reset = 0; + termination_count = 0; + query_count = 0; + mpt_findImVolumes(ioc); + pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply; + + for (ii = 0; ii < ioc->req_depth; ii++) { + if (ioc->fw_events_off) + goto out; + sc = mptscsih_get_scsi_lookup(ioc, ii); + if (!sc) + continue; + mf = MPT_INDEX_2_MFPTR(ioc, ii); + if (!mf) + continue; + task_context = mf->u.frame.hwhdr.msgctxu.MsgContext; + vdevice = sc->device->hostdata; + if (!vdevice || !vdevice->vtarget) + continue; + if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) + continue; /* skip hidden raid components */ + if (vdevice->vtarget->raidVolume) + continue; /* skip hidden raid components */ + channel = vdevice->vtarget->channel; + id = vdevice->vtarget->id; + lun = vdevice->lun; + if (mptsas_issue_tm(ioc, MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK, + channel, id, (u64)lun, task_context, 30, &issue_reset)) + goto out; + query_count++; + termination_count += + le32_to_cpu(pScsiTmReply->TerminationCount); + if ((pScsiTmReply->IOCStatus == MPI_IOCSTATUS_SUCCESS) && + (pScsiTmReply->ResponseCode == + MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED || + pScsiTmReply->ResponseCode == + MPI_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC)) + continue; + if (mptsas_issue_tm(ioc, + MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, + channel, id, (u64)lun, 0, 30, &issue_reset)) + goto out; + termination_count += + le32_to_cpu(pScsiTmReply->TerminationCount); + } + + out: + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s - exit, query_count = %d termination_count = %d\n", + ioc->name, __func__, query_count, termination_count)); + + ioc->broadcast_aen_busy = 0; + mpt_clear_taskmgmt_in_progress_flag(ioc); + mutex_unlock(&ioc->taskmgmt_cmds.mutex); + + if (issue_reset) { + printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", + ioc->name, __func__); + mpt_HardResetHandler(ioc, CAN_SLEEP); + } + mptsas_free_fw_event(ioc, fw_event); +} + /* * mptsas_send_ir2_event - handle exposing hidden disk when * an inactive raid volume is added @@ -4388,6 +4583,18 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply) delay = msecs_to_jiffies(1); switch (event) { + case MPI_EVENT_SAS_BROADCAST_PRIMITIVE: + { + EVENT_DATA_SAS_BROADCAST_PRIMITIVE *broadcast_event_data = + (EVENT_DATA_SAS_BROADCAST_PRIMITIVE *)reply->Data; + if (broadcast_event_data->Primitive != + MPI_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT) + return 0; + if (ioc->broadcast_aen_busy) + return 0; + ioc->broadcast_aen_busy = 1; + break; + } case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: { EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data = diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index cf1aba18a09f..96681203d4a9 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -80,6 +80,7 @@ MODULE_VERSION(my_VERSION); /* * Other private/forward protos... */ +struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i); static struct scsi_cmnd * mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i); static void mptscsih_set_scsi_lookup(MPT_ADAPTER *ioc, int i, struct scsi_cmnd *scmd); static int SCPNT_TO_LOOKUP_IDX(MPT_ADAPTER *ioc, struct scsi_cmnd *scmd); @@ -2419,6 +2420,26 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR } } +/** + * mptscsih_get_scsi_lookup - retrieves scmd entry + * @ioc: Pointer to MPT_ADAPTER structure + * @i: index into the array + * + * Returns the scsi_cmd pointer + */ +struct scsi_cmnd * +mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) +{ + unsigned long flags; + struct scsi_cmnd *scmd; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + scmd = ioc->ScsiLookup[i]; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + + return scmd; +} +EXPORT_SYMBOL(mptscsih_get_scsi_lookup); /** * mptscsih_getclear_scsi_lookup diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h index 572739565f6a..eb3f677528ac 100644 --- a/drivers/message/fusion/mptscsih.h +++ b/drivers/message/fusion/mptscsih.h @@ -133,4 +133,5 @@ extern void mptscsih_timer_expired(unsigned long data); extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id); extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id); extern struct device_attribute *mptscsih_host_attrs[]; +extern struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i); extern void mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code); -- cgit v1.2.3 From fc847ab4318cd6ab6c231739ad51d2502d19a87a Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Tue, 9 Jun 2009 23:01:01 +0000 Subject: [SCSI] mpt fusion: fix up doc book comments Several of the doc book in the previous patches had incorrect multi-line short function descriptors. Fixed it all to be the correct single line descriptor. Signed-off-by: James Bottomley --- drivers/message/fusion/mptsas.c | 16 +++++++--------- drivers/message/fusion/mptscsih.c | 5 +---- 2 files changed, 8 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 88a1a6d3bc04..14c490a767a4 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -636,8 +636,7 @@ mptsas_add_device_component_by_fw(MPT_ADAPTER *ioc, u8 channel, u8 id) } /** - * mptsas_add_device_component_starget_ir - Handle Integrated RAID, adding - * each individual device to list + * mptsas_add_device_component_starget_ir - Handle Integrated RAID, adding each individual device to list * @ioc: Pointer to MPT_ADAPTER structure * @channel: fw mapped id's * @id: @@ -776,8 +775,7 @@ mptsas_add_device_component_starget(MPT_ADAPTER *ioc, } /** - * mptsas_del_device_component_by_os - Once a device has been removed, we - * mark the entry in the list as being cached + * mptsas_del_device_component_by_os - Once a device has been removed, we mark the entry in the list as being cached * @ioc: Pointer to MPT_ADAPTER structure * @channel: os mapped id's * @id: @@ -1125,11 +1123,12 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc, } /** - * mptsas_taskmgmt_complete - Completion for TARGET_RESET after - * NOT_RESPONDING_EVENT, enable work queue to finish off removing device - * from upper layers. then send next TARGET_RESET in the queue. + * mptsas_taskmgmt_complete - complete SAS task management function * @ioc: Pointer to MPT_ADAPTER structure * + * Completion for TARGET_RESET after NOT_RESPONDING_EVENT, enable work + * queue to finish off removing device from upper layers. then send next + * TARGET_RESET in the queue. **/ static int mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) @@ -1441,8 +1440,7 @@ mptsas_add_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info) } /** - * mptsas_del_end_device - report a deleted end device to sas transport - * layer + * mptsas_del_end_device - report a deleted end device to sas transport layer * @ioc: Pointer to MPT_ADAPTER structure * @phy_info: decribes attached device * diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 96681203d4a9..024e8305bcf2 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -2442,10 +2442,7 @@ mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) EXPORT_SYMBOL(mptscsih_get_scsi_lookup); /** - * mptscsih_getclear_scsi_lookup - * - * retrieves and clears scmd entry from ScsiLookup[] array list - * + * mptscsih_getclear_scsi_lookup - retrieves and clears scmd entry from ScsiLookup[] array list * @ioc: Pointer to MPT_ADAPTER structure * @i: index into the array * -- cgit v1.2.3 From fa047e4f6fa63a6e9d0ae4d7749538830d14a343 Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Wed, 10 Jun 2009 10:25:56 +0200 Subject: HID: fix inverted wheel for bluetooth version of apple mighty mouse Bluetooth version of Apple Mighty mouse (0x05ac/0x030c) doesn't, according to multiple reports on linux-input@, need the same quirk as the USB version of this mouse (0x05ac/0x0304) does. Signed-off-by: Jiri Kosina --- drivers/hid/hid-apple.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers') diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index acbce5745b0c..303ccce05bb3 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -436,10 +436,6 @@ static const struct hid_device_id apple_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY), .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, - /* Apple wireless Mighty Mouse */ - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, 0x030c), - .driver_data = APPLE_MIGHTYMOUSE | APPLE_INVERT_HWHEEL }, - { } }; MODULE_DEVICE_TABLE(hid, apple_devices); -- cgit v1.2.3 From 5f4417a156a6e44359effa9492de3ed5638a9b13 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 10 Jun 2009 14:37:20 +0200 Subject: ide: fix PowerMac bootup oops PowerMac bootup with CONFIG_IDE=y oopses in ide_pio_cycle_time(): because "ide: try to use PIO Mode 0 during probe if possible" causes pmac_ide_set_pio_mode() to be called before drive->id has been set. Bart points out other places which now need drive->id set earlier, so follow his advice to allocate it in ide_port_alloc_devices() (using kzalloc_node, without error message, as when allocating drive) and memset it for reuse in ide_port_init_devices_data(). Fixed in passing: ide_host_alloc() was missing ide_port_free_devices() from an error path. Signed-off-by: Hugh Dickins Cc: Joao Ramos Cc: Sergei Shtylyov Cc: Benjamin Herrenschmidt Cc: Andrew Morton Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/ide-probe.c | 47 +++++++++++++++++++++-------------------------- 1 file changed, 21 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index f9c2fb7d0005..f371b0de314f 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -465,23 +465,8 @@ static u8 probe_for_drive(ide_drive_t *drive) int rc; u8 cmd; - /* - * In order to keep things simple we have an id - * block for all drives at all times. If the device - * is pre ATA or refuses ATA/ATAPI identify we - * will add faked data to this. - * - * Also note that 0 everywhere means "can't do X" - */ - drive->dev_flags &= ~IDE_DFLAG_ID_READ; - drive->id = kzalloc(SECTOR_SIZE, GFP_KERNEL); - if (drive->id == NULL) { - printk(KERN_ERR "ide: out of memory for id data.\n"); - return 0; - } - m = (char *)&drive->id[ATA_ID_PROD]; strcpy(m, "UNKNOWN"); @@ -497,7 +482,7 @@ static u8 probe_for_drive(ide_drive_t *drive) } if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0) - goto out_free; + return 0; /* identification failed? */ if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) { @@ -521,7 +506,7 @@ static u8 probe_for_drive(ide_drive_t *drive) } if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0) - goto out_free; + return 0; /* The drive wasn't being helpful. Add generic info only */ if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) { @@ -535,9 +520,6 @@ static u8 probe_for_drive(ide_drive_t *drive) } return 1; -out_free: - kfree(drive->id); - return 0; } static void hwif_release_dev(struct device *dev) @@ -825,8 +807,6 @@ static int ide_port_setup_devices(ide_hwif_t *hwif) if (ide_init_queue(drive)) { printk(KERN_ERR "ide: failed to init %s\n", drive->name); - kfree(drive->id); - drive->id = NULL; drive->dev_flags &= ~IDE_DFLAG_PRESENT; continue; } @@ -955,9 +935,6 @@ static void drive_release_dev (struct device *dev) blk_cleanup_queue(drive->queue); drive->queue = NULL; - kfree(drive->id); - drive->id = NULL; - drive->dev_flags &= ~IDE_DFLAG_PRESENT; complete(&drive->gendev_rel_comp); @@ -1140,8 +1117,11 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif) ide_port_for_each_dev(i, drive, hwif) { u8 j = (hwif->index * MAX_DRIVES) + i; + u16 *saved_id = drive->id; memset(drive, 0, sizeof(*drive)); + memset(saved_id, 0, SECTOR_SIZE); + drive->id = saved_id; drive->media = ide_disk; drive->select = (i << 4) | ATA_DEVICE_OBS; @@ -1248,8 +1228,10 @@ static void ide_port_free_devices(ide_hwif_t *hwif) ide_drive_t *drive; int i; - ide_port_for_each_dev(i, drive, hwif) + ide_port_for_each_dev(i, drive, hwif) { + kfree(drive->id); kfree(drive); + } } static int ide_port_alloc_devices(ide_hwif_t *hwif, int node) @@ -1263,6 +1245,18 @@ static int ide_port_alloc_devices(ide_hwif_t *hwif, int node) if (drive == NULL) goto out_nomem; + /* + * In order to keep things simple we have an id + * block for all drives at all times. If the device + * is pre ATA or refuses ATA/ATAPI identify we + * will add faked data to this. + * + * Also note that 0 everywhere means "can't do X" + */ + drive->id = kzalloc_node(SECTOR_SIZE, GFP_KERNEL, node); + if (drive->id == NULL) + goto out_nomem; + hwif->devices[i] = drive; } return 0; @@ -1304,6 +1298,7 @@ struct ide_host *ide_host_alloc(const struct ide_port_info *d, if (idx < 0) { printk(KERN_ERR "%s: no free slot for interface\n", d ? d->name : "ide"); + ide_port_free_devices(hwif); kfree(hwif); continue; } -- cgit v1.2.3 From 5df3bc2d35bd5cd08053f71679b27577b42676d6 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Wed, 10 Jun 2009 14:37:21 +0200 Subject: ide: unexport ide_find_dma_mode() Acked-by: Sergei Shtylyov Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/ide-dma.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index d9123ecae4a9..0bbf71f8e499 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c @@ -347,7 +347,6 @@ u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode) return mode; } -EXPORT_SYMBOL_GPL(ide_find_dma_mode); static int ide_tune_dma(ide_drive_t *drive) { -- cgit v1.2.3 From ad7c52d0988a8965989dc06d630c52a5bde849d5 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Wed, 10 Jun 2009 14:37:21 +0200 Subject: ide: re-implement ide_pci_init_one() on top of ide_pci_init_two() There should be no functional changes caused by this patch. Acked-by: Sergei Shtylyov Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/ide/setup-pci.c | 70 +++++++++++-------------------------------------- 1 file changed, 16 insertions(+), 54 deletions(-) (limited to 'drivers') diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c index 5314edffc303..ab3db61d2ba0 100644 --- a/drivers/ide/setup-pci.c +++ b/drivers/ide/setup-pci.c @@ -1,7 +1,7 @@ /* * Copyright (C) 1998-2000 Andre Hedrick * Copyright (C) 1995-1998 Mark Lord - * Copyright (C) 2007 Bartlomiej Zolnierkiewicz + * Copyright (C) 2007-2009 Bartlomiej Zolnierkiewicz * * May be copied or modified under the terms of the GNU General Public License */ @@ -534,61 +534,15 @@ out: return ret; } -int ide_pci_init_one(struct pci_dev *dev, const struct ide_port_info *d, - void *priv) -{ - struct ide_host *host; - struct ide_hw hw[2], *hws[] = { NULL, NULL }; - int ret; - - ret = ide_setup_pci_controller(dev, d, 1); - if (ret < 0) - goto out; - - ide_pci_setup_ports(dev, d, &hw[0], &hws[0]); - - host = ide_host_alloc(d, hws, 2); - if (host == NULL) { - ret = -ENOMEM; - goto out; - } - - host->dev[0] = &dev->dev; - - host->host_priv = priv; - - host->irq_flags = IRQF_SHARED; - - pci_set_drvdata(dev, host); - - ret = do_ide_setup_pci_device(dev, d, 1); - if (ret < 0) - goto out; - - /* fixup IRQ */ - if (ide_pci_is_in_compatibility_mode(dev)) { - hw[0].irq = pci_get_legacy_ide_irq(dev, 0); - hw[1].irq = pci_get_legacy_ide_irq(dev, 1); - } else - hw[1].irq = hw[0].irq = ret; - - ret = ide_host_register(host, d, hws); - if (ret) - ide_host_free(host); -out: - return ret; -} -EXPORT_SYMBOL_GPL(ide_pci_init_one); - int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2, const struct ide_port_info *d, void *priv) { struct pci_dev *pdev[] = { dev1, dev2 }; struct ide_host *host; - int ret, i; + int ret, i, n_ports = dev2 ? 4 : 2; struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL }; - for (i = 0; i < 2; i++) { + for (i = 0; i < n_ports / 2; i++) { ret = ide_setup_pci_controller(pdev[i], d, !i); if (ret < 0) goto out; @@ -596,23 +550,24 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2, ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]); } - host = ide_host_alloc(d, hws, 4); + host = ide_host_alloc(d, hws, n_ports); if (host == NULL) { ret = -ENOMEM; goto out; } host->dev[0] = &dev1->dev; - host->dev[1] = &dev2->dev; + if (dev2) + host->dev[1] = &dev2->dev; host->host_priv = priv; - host->irq_flags = IRQF_SHARED; pci_set_drvdata(pdev[0], host); - pci_set_drvdata(pdev[1], host); + if (dev2) + pci_set_drvdata(pdev[1], host); - for (i = 0; i < 2; i++) { + for (i = 0; i < n_ports / 2; i++) { ret = do_ide_setup_pci_device(pdev[i], d, !i); /* @@ -638,6 +593,13 @@ out: } EXPORT_SYMBOL_GPL(ide_pci_init_two); +int ide_pci_init_one(struct pci_dev *dev, const struct ide_port_info *d, + void *priv) +{ + return ide_pci_init_two(dev, NULL, d, priv); +} +EXPORT_SYMBOL_GPL(ide_pci_init_one); + void ide_pci_remove(struct pci_dev *dev) { struct ide_host *host = pci_get_drvdata(dev); -- cgit v1.2.3 From de6b20385b1c14f97ccdf7da173b4c9a7405083b Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Sun, 24 May 2009 20:01:26 +0300 Subject: [SCSI] libosd: Better printout of OSD target system information Shorten out the Attributes names. Align all results on column 24. Print system ID in a new line. Signed-off-by: Boaz Harrosh Signed-off-by: James Bottomley --- drivers/scsi/osd/osd_initiator.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index 1ce6b24abab2..15f0bbc19c9c 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -118,39 +118,39 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps) _osd_ver_desc(or)); pFirst = get_attrs[a++].val_ptr; - OSD_INFO("OSD_ATTR_RI_VENDOR_IDENTIFICATION [%s]\n", + OSD_INFO("VENDOR_IDENTIFICATION [%s]\n", (char *)pFirst); pFirst = get_attrs[a++].val_ptr; - OSD_INFO("OSD_ATTR_RI_PRODUCT_IDENTIFICATION [%s]\n", + OSD_INFO("PRODUCT_IDENTIFICATION [%s]\n", (char *)pFirst); pFirst = get_attrs[a++].val_ptr; - OSD_INFO("OSD_ATTR_RI_PRODUCT_MODEL [%s]\n", + OSD_INFO("PRODUCT_MODEL [%s]\n", (char *)pFirst); pFirst = get_attrs[a++].val_ptr; - OSD_INFO("OSD_ATTR_RI_PRODUCT_REVISION_LEVEL [%u]\n", + OSD_INFO("PRODUCT_REVISION_LEVEL [%u]\n", pFirst ? get_unaligned_be32(pFirst) : ~0U); pFirst = get_attrs[a++].val_ptr; - OSD_INFO("OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER [%s]\n", + OSD_INFO("PRODUCT_SERIAL_NUMBER [%s]\n", (char *)pFirst); pFirst = get_attrs[a].val_ptr; - OSD_INFO("OSD_ATTR_RI_OSD_NAME [%s]\n", (char *)pFirst); + OSD_INFO("OSD_NAME [%s]\n", (char *)pFirst); a++; pFirst = get_attrs[a++].val_ptr; - OSD_INFO("OSD_ATTR_RI_TOTAL_CAPACITY [0x%llx]\n", + OSD_INFO("TOTAL_CAPACITY [0x%llx]\n", pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); pFirst = get_attrs[a++].val_ptr; - OSD_INFO("OSD_ATTR_RI_USED_CAPACITY [0x%llx]\n", + OSD_INFO("USED_CAPACITY [0x%llx]\n", pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); pFirst = get_attrs[a++].val_ptr; - OSD_INFO("OSD_ATTR_RI_NUMBER_OF_PARTITIONS [%llu]\n", + OSD_INFO("NUMBER_OF_PARTITIONS [%llu]\n", pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); if (a >= nelem) @@ -158,7 +158,7 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps) /* FIXME: Where are the time utilities */ pFirst = get_attrs[a++].val_ptr; - OSD_INFO("OSD_ATTR_RI_CLOCK [0x%02x%02x%02x%02x%02x%02x]\n", + OSD_INFO("CLOCK [0x%02x%02x%02x%02x%02x%02x]\n", ((char *)pFirst)[0], ((char *)pFirst)[1], ((char *)pFirst)[2], ((char *)pFirst)[3], ((char *)pFirst)[4], ((char *)pFirst)[5]); @@ -169,7 +169,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps) hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1, sid_dump, sizeof(sid_dump), true); - OSD_INFO("OSD_ATTR_RI_OSD_SYSTEM_ID(%d) [%s]\n", len, sid_dump); + OSD_INFO("OSD_SYSTEM_ID(%d)\n" + " [%s]\n", len, sid_dump); a++; } out: -- cgit v1.2.3 From 0e35afbc8b054e04a35faa796c72abb3b82bd33b Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Sun, 24 May 2009 20:02:22 +0300 Subject: [SCSI] libosd: osd_req_{read,write}_kern new API By popular demand, define usefull wrappers for osd_req_read/write that recieve kernel pointers. All users had their own. Also remove these from exofs Signed-off-by: Boaz Harrosh Signed-off-by: James Bottomley --- drivers/scsi/osd/osd_initiator.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index 15f0bbc19c9c..c98153bfb36d 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -789,6 +789,20 @@ void osd_req_write(struct osd_request *or, } EXPORT_SYMBOL(osd_req_write); +int osd_req_write_kern(struct osd_request *or, + const struct osd_obj_id *obj, u64 offset, void* buff, u64 len) +{ + struct request_queue *req_q = or->osd_dev->scsi_device->request_queue; + struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL); + + if (IS_ERR(bio)) + return PTR_ERR(bio); + + osd_req_write(or, obj, bio, offset); + return 0; +} +EXPORT_SYMBOL(osd_req_write_kern); + /*TODO: void osd_req_append(struct osd_request *, const struct osd_obj_id *, struct bio *data_out); */ /*TODO: void osd_req_create_write(struct osd_request *, @@ -824,6 +838,20 @@ void osd_req_read(struct osd_request *or, } EXPORT_SYMBOL(osd_req_read); +int osd_req_read_kern(struct osd_request *or, + const struct osd_obj_id *obj, u64 offset, void* buff, u64 len) +{ + struct request_queue *req_q = or->osd_dev->scsi_device->request_queue; + struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL); + + if (IS_ERR(bio)) + return PTR_ERR(bio); + + osd_req_read(or, obj, bio, offset); + return 0; +} +EXPORT_SYMBOL(osd_req_read_kern); + void osd_req_get_attributes(struct osd_request *or, const struct osd_obj_id *obj) { -- cgit v1.2.3 From 546881aea9787ed5c626ac99ab80158ea9ae0515 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Sun, 24 May 2009 20:04:00 +0300 Subject: [SCSI] libosd: Let _osd_req_finalize_data_integrity receive number of out_bytes _osd_req_finalize_data_integrity was trying to deduce the number of out_bytes from passed osd_request->out.bio. This is wrong when the bio is chained. The caller of _osd_req_finalize_data_integrity has more ready available information and should just pass it. Also in the light of future support for CDB-continuation segment this is a better solution. Signed-off-by: Boaz Harrosh Signed-off-by: James Bottomley --- drivers/scsi/osd/osd_initiator.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index c98153bfb36d..ba2ebae305cd 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -1262,7 +1262,7 @@ static inline void osd_sec_parms_set_in_offset(bool is_v1, } static int _osd_req_finalize_data_integrity(struct osd_request *or, - bool has_in, bool has_out, const u8 *cap_key) + bool has_in, bool has_out, u64 out_data_bytes, const u8 *cap_key) { struct osd_security_parameters *sec_parms = _osd_req_sec_params(or); int ret; @@ -1277,8 +1277,7 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or, }; unsigned pad; - or->out_data_integ.data_bytes = cpu_to_be64( - or->out.bio ? or->out.bio->bi_size : 0); + or->out_data_integ.data_bytes = cpu_to_be64(out_data_bytes); or->out_data_integ.set_attributes_bytes = cpu_to_be64( or->set_attr.total_bytes); or->out_data_integ.get_attributes_bytes = cpu_to_be64( @@ -1370,6 +1369,7 @@ int osd_finalize_request(struct osd_request *or, { struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); bool has_in, has_out; + u64 out_data_bytes = or->out.total_bytes; int ret; if (options & OSD_REQ_FUA) @@ -1439,7 +1439,8 @@ int osd_finalize_request(struct osd_request *or, } } - ret = _osd_req_finalize_data_integrity(or, has_in, has_out, cap_key); + ret = _osd_req_finalize_data_integrity(or, has_in, has_out, + out_data_bytes, cap_key); if (ret) return ret; -- cgit v1.2.3 From 62f469b596dd0aadf046a69027087c18db43734e Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Sun, 24 May 2009 20:04:26 +0300 Subject: [SCSI] libosd: osd_req_{read,write} takes a length parameter For supporting of chained-bios we can not inspect the first bio only, as before. Caller shall pass the total length of the request, ie. sum_bytes(bio-chain). Also since the bio might be a chain we don't set it's direction on behalf of it's callers. The bio direction should be properly set prior to this call. So fix a couple of write users that now need to set the bio direction properly [In this patch I change both library code and user sites at exofs, to make it easy on integration. It should be submitted via James's scsi-misc tree.] Signed-off-by: Boaz Harrosh CC: Jeff Garzik Signed-off-by: James Bottomley --- drivers/scsi/osd/osd_initiator.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index ba2ebae305cd..3f5ec578e6c6 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -779,13 +779,14 @@ EXPORT_SYMBOL(osd_req_remove_object); */ void osd_req_write(struct osd_request *or, - const struct osd_obj_id *obj, struct bio *bio, u64 offset) + const struct osd_obj_id *obj, u64 offset, + struct bio *bio, u64 len) { - _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, bio->bi_size); + _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len); WARN_ON(or->out.bio || or->out.total_bytes); - bio->bi_rw |= (1 << BIO_RW); + WARN_ON(0 == bio_rw_flagged(bio, BIO_RW)); or->out.bio = bio; - or->out.total_bytes = bio->bi_size; + or->out.total_bytes = len; } EXPORT_SYMBOL(osd_req_write); @@ -798,7 +799,8 @@ int osd_req_write_kern(struct osd_request *or, if (IS_ERR(bio)) return PTR_ERR(bio); - osd_req_write(or, obj, bio, offset); + bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */ + osd_req_write(or, obj, offset, bio, len); return 0; } EXPORT_SYMBOL(osd_req_write_kern); @@ -828,13 +830,14 @@ void osd_req_flush_object(struct osd_request *or, EXPORT_SYMBOL(osd_req_flush_object); void osd_req_read(struct osd_request *or, - const struct osd_obj_id *obj, struct bio *bio, u64 offset) + const struct osd_obj_id *obj, u64 offset, + struct bio *bio, u64 len) { - _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, bio->bi_size); + _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); WARN_ON(or->in.bio || or->in.total_bytes); - bio->bi_rw &= ~(1 << BIO_RW); + WARN_ON(1 == bio_rw_flagged(bio, BIO_RW)); or->in.bio = bio; - or->in.total_bytes = bio->bi_size; + or->in.total_bytes = len; } EXPORT_SYMBOL(osd_req_read); @@ -847,7 +850,7 @@ int osd_req_read_kern(struct osd_request *or, if (IS_ERR(bio)) return PTR_ERR(bio); - osd_req_read(or, obj, bio, offset); + osd_req_read(or, obj, offset, bio, len); return 0; } EXPORT_SYMBOL(osd_req_read_kern); -- cgit v1.2.3 From fc2fac5b5f11e2bee3bf37215c8746236f5ea188 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Sun, 24 May 2009 20:04:43 +0300 Subject: [SCSI] libosd: Define an osd_dev wrapper to retrieve the request_queue libosd users that need to work with bios, must sometime use the request_queue associated with the osd_dev. Make a wrapper for that, and convert all in-tree users. Signed-off-by: Boaz Harrosh Signed-off-by: James Bottomley --- drivers/scsi/osd/osd_initiator.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index 3f5ec578e6c6..3959797149fd 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -670,7 +670,7 @@ static int _osd_req_list_objects(struct osd_request *or, __be16 action, const struct osd_obj_id *obj, osd_id initial_id, struct osd_obj_id_list *list, unsigned nelem) { - struct request_queue *q = or->osd_dev->scsi_device->request_queue; + struct request_queue *q = osd_request_queue(or->osd_dev); u64 len = nelem * sizeof(osd_id) + sizeof(*list); struct bio *bio; @@ -793,7 +793,7 @@ EXPORT_SYMBOL(osd_req_write); int osd_req_write_kern(struct osd_request *or, const struct osd_obj_id *obj, u64 offset, void* buff, u64 len) { - struct request_queue *req_q = or->osd_dev->scsi_device->request_queue; + struct request_queue *req_q = osd_request_queue(or->osd_dev); struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL); if (IS_ERR(bio)) @@ -844,7 +844,7 @@ EXPORT_SYMBOL(osd_req_read); int osd_req_read_kern(struct osd_request *or, const struct osd_obj_id *obj, u64 offset, void* buff, u64 len) { - struct request_queue *req_q = or->osd_dev->scsi_device->request_queue; + struct request_queue *req_q = osd_request_queue(or->osd_dev); struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL); if (IS_ERR(bio)) -- cgit v1.2.3 From 021e2230d6c04d80289fceb2d21c9ce93a615b32 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Sun, 24 May 2009 20:05:05 +0300 Subject: [SCSI] osduld: use filp_open() when looking up an osd-device This patch was inspired by Al Viro, for simplifying and fixing the retrieval of osd-devices by in-kernel users, eg: file systems. In-Kernel users, now, go through the same path user-mode does by opening a file on the osd char-device and though holding a reference to both the device and the Module. A file pointer was added to the osd_dev structure which is now allocated for each user. The internal osd_dev is no longer exposed outside of the uld. I wanted to do that for a long time so each libosd user can have his own defaults on the device. The API is left the same, so user code need not change. It is no longer needed to open/close a file handle on the osd char-device from user-mode, before mounting an exofs on it. Signed-off-by: Boaz Harrosh CC: Al Viro Signed-off-by: James Bottomley --- drivers/scsi/osd/osd_uld.c | 66 +++++++++++++++++++++------------------------- 1 file changed, 30 insertions(+), 36 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c index 22b59e13ba83..0bdef3390902 100644 --- a/drivers/scsi/osd/osd_uld.c +++ b/drivers/scsi/osd/osd_uld.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include @@ -175,10 +176,9 @@ static const struct file_operations osd_fops = { struct osd_dev *osduld_path_lookup(const char *name) { - struct path path; - struct inode *inode; - struct cdev *cdev; - struct osd_uld_device *uninitialized_var(oud); + struct osd_uld_device *oud; + struct osd_dev *od; + struct file *file; int error; if (!name || !*name) { @@ -186,52 +186,46 @@ struct osd_dev *osduld_path_lookup(const char *name) return ERR_PTR(-EINVAL); } - error = kern_path(name, LOOKUP_FOLLOW, &path); - if (error) { - OSD_ERR("path_lookup of %s failed=>%d\n", name, error); - return ERR_PTR(error); - } + od = kzalloc(sizeof(*od), GFP_KERNEL); + if (!od) + return ERR_PTR(-ENOMEM); - inode = path.dentry->d_inode; - error = -EINVAL; /* Not the right device e.g osd_uld_device */ - if (!S_ISCHR(inode->i_mode)) { - OSD_DEBUG("!S_ISCHR()\n"); - goto out; + file = filp_open(name, O_RDWR, 0); + if (IS_ERR(file)) { + error = PTR_ERR(file); + goto free_od; } - cdev = inode->i_cdev; - if (!cdev) { - OSD_ERR("Before mounting an OSD Based filesystem\n"); - OSD_ERR(" user-mode must open+close the %s device\n", name); - OSD_ERR(" Example: bash: echo < %s\n", name); - goto out; + if (file->f_op != &osd_fops){ + error = -EINVAL; + goto close_file; } - /* The Magic wand. Is it our char-dev */ - /* TODO: Support sg devices */ - if (cdev->owner != THIS_MODULE) { - OSD_ERR("Error mounting %s - is not an OSD device\n", name); - goto out; - } + oud = file->private_data; - oud = container_of(cdev, struct osd_uld_device, cdev); + *od = oud->od; + od->file = file; - __uld_get(oud); - error = 0; + return od; -out: - path_put(&path); - return error ? ERR_PTR(error) : &oud->od; +close_file: + fput(file); +free_od: + kfree(od); + return ERR_PTR(error); } EXPORT_SYMBOL(osduld_path_lookup); void osduld_put_device(struct osd_dev *od) { - if (od) { - struct osd_uld_device *oud = container_of(od, - struct osd_uld_device, od); - __uld_put(oud); + if (od && !IS_ERR(od)) { + struct osd_uld_device *oud = od->file->private_data; + + BUG_ON(od->scsi_device != oud->od.scsi_device); + + fput(od->file); + kfree(od); } } EXPORT_SYMBOL(osduld_put_device); -- cgit v1.2.3 From 03306793e686fd895ab8fa095bb9ec33658ea53a Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Sun, 24 May 2009 20:05:43 +0300 Subject: [SCSI] libosd: Use REQ_QUIET requests. libosd has it's own sense decoding and printout. Don't let scsi_lib duplicate that printout. (Which is done wrong in regard to osd commands) Signed-off-by: Boaz Harrosh Signed-off-by: James Bottomley --- drivers/scsi/osd/osd_initiator.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index 3959797149fd..71341ad32344 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -1340,6 +1340,8 @@ static int _init_blk_request(struct osd_request *or, or->request = req; req->cmd_type = REQ_TYPE_BLOCK_PC; + req->cmd_flags |= REQ_QUIET; + req->timeout = or->timeout; req->retries = or->retries; req->sense = or->sense; -- cgit v1.2.3 From 3860c97bd60a4525bb62eb90e3e7d2f02662ac59 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Sun, 24 May 2009 20:05:58 +0300 Subject: [SCSI] osd: Remove out-of-tree left overs * Delete Makefile. It is only used for out-of-tree compilation and was never needed. It slipped in by mistake. * Remove from Kbuild all the out of tree stuff as promised. Signed-off-by: Boaz Harrosh Signed-off-by: James Bottomley --- drivers/scsi/osd/Kbuild | 25 ------------------------- drivers/scsi/osd/Makefile | 37 ------------------------------------- 2 files changed, 62 deletions(-) delete mode 100755 drivers/scsi/osd/Makefile (limited to 'drivers') diff --git a/drivers/scsi/osd/Kbuild b/drivers/scsi/osd/Kbuild index 0e207aa67d16..5fd73d77c3af 100644 --- a/drivers/scsi/osd/Kbuild +++ b/drivers/scsi/osd/Kbuild @@ -11,31 +11,6 @@ # it under the terms of the GNU General Public License version 2 # -ifneq ($(OSD_INC),) -# we are built out-of-tree Kconfigure everything as on - -CONFIG_SCSI_OSD_INITIATOR=m -ccflags-y += -DCONFIG_SCSI_OSD_INITIATOR -DCONFIG_SCSI_OSD_INITIATOR_MODULE - -CONFIG_SCSI_OSD_ULD=m -ccflags-y += -DCONFIG_SCSI_OSD_ULD -DCONFIG_SCSI_OSD_ULD_MODULE - -# CONFIG_SCSI_OSD_DPRINT_SENSE = -# 0 - no print of errors -# 1 - print errors -# 2 - errors + warrnings -ccflags-y += -DCONFIG_SCSI_OSD_DPRINT_SENSE=1 - -# Uncomment to turn debug on -# ccflags-y += -DCONFIG_SCSI_OSD_DEBUG - -# if we are built out-of-tree and the hosting kernel has OSD headers -# then "ccflags-y +=" will not pick the out-off-tree headers. Only by doing -# this it will work. This might break in future kernels -LINUXINCLUDE := -I$(OSD_INC) $(LINUXINCLUDE) - -endif - # libosd.ko - osd-initiator library libosd-y := osd_initiator.o obj-$(CONFIG_SCSI_OSD_INITIATOR) += libosd.o diff --git a/drivers/scsi/osd/Makefile b/drivers/scsi/osd/Makefile deleted file mode 100755 index d905344f83ba..000000000000 --- a/drivers/scsi/osd/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -# -# Makefile for the OSD modules (out of tree) -# -# Copyright (C) 2008 Panasas Inc. All rights reserved. -# -# Authors: -# Boaz Harrosh -# Benny Halevy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 -# -# This Makefile is used to call the kernel Makefile in case of an out-of-tree -# build. -# $KSRC should point to a Kernel source tree otherwise host's default is -# used. (eg. /lib/modules/`uname -r`/build) - -# include path for out-of-tree Headers -OSD_INC ?= `pwd`/../../../include - -# allow users to override these -# e.g. to compile for a kernel that you aren't currently running -KSRC ?= /lib/modules/$(shell uname -r)/build -KBUILD_OUTPUT ?= -ARCH ?= -V ?= 0 - -# this is the basic Kbuild out-of-tree invocation, with the M= option -KBUILD_BASE = +$(MAKE) -C $(KSRC) M=`pwd` KBUILD_OUTPUT=$(KBUILD_OUTPUT) ARCH=$(ARCH) V=$(V) - -all: libosd - -libosd: ; - $(KBUILD_BASE) OSD_INC=$(OSD_INC) modules - -clean: - $(KBUILD_BASE) clean -- cgit v1.2.3 From b43d65f7e818485664037a46367cfb15af05bd8c Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 9 Jun 2009 08:11:42 +0100 Subject: [ARM] 5546/1: ARM PL022 SSP/SPI driver v3 This adds a driver for the ARM PL022 PrimeCell SSP/SPI driver found in the U300 platforms as well as in some ARM reference hardware, and in a modified version on the Nomadik board. Reviewed-by: Alessandro Rubini Reviewed-by: Russell King Reviewed-by: Baruch Siach Signed-off-by: Linus Walleij Signed-off-by: Russell King --- drivers/spi/Kconfig | 9 + drivers/spi/Makefile | 1 + drivers/spi/amba-pl022.c | 1866 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 1876 insertions(+) create mode 100644 drivers/spi/amba-pl022.c (limited to 'drivers') diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 7c61251bea61..8e7c17e4461f 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -171,6 +171,15 @@ config SPI_ORION help This enables using the SPI master controller on the Orion chips. +config SPI_PL022 + tristate "ARM AMBA PL022 SSP controller (EXPERIMENTAL)" + depends on ARM_AMBA && EXPERIMENTAL + default y if MACH_U300 + help + This selects the ARM(R) AMBA(R) PrimeCell PL022 SSP + controller. If you have an embedded system with an AMBA(R) + bus and a PL022 controller, say Y or M here. + config SPI_PXA2XX tristate "PXA2xx SSP SPI master" depends on ARCH_PXA && EXPERIMENTAL diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 5d0451936d86..ecfadb180482 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o obj-$(CONFIG_SPI_ORION) += orion_spi.o +obj-$(CONFIG_SPI_PL022) += amba-pl022.o obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o obj-$(CONFIG_SPI_MPC83xx) += spi_mpc83xx.o obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c new file mode 100644 index 000000000000..da76797ce8b9 --- /dev/null +++ b/drivers/spi/amba-pl022.c @@ -0,0 +1,1866 @@ +/* + * drivers/spi/amba-pl022.c + * + * A driver for the ARM PL022 PrimeCell SSP/SPI bus master. + * + * Copyright (C) 2008-2009 ST-Ericsson AB + * Copyright (C) 2006 STMicroelectronics Pvt. Ltd. + * + * Author: Linus Walleij + * + * Initial version inspired by: + * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c + * Initial adoption to PL022 by: + * Sachin Verma + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * TODO: + * - add timeout on polled transfers + * - add generic DMA framework support + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * This macro is used to define some register default values. + * reg is masked with mask, the OR:ed with an (again masked) + * val shifted sb steps to the left. + */ +#define SSP_WRITE_BITS(reg, val, mask, sb) \ + ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask)))) + +/* + * This macro is also used to define some default values. + * It will just shift val by sb steps to the left and mask + * the result with mask. + */ +#define GEN_MASK_BITS(val, mask, sb) \ + (((val)<<(sb)) & (mask)) + +#define DRIVE_TX 0 +#define DO_NOT_DRIVE_TX 1 + +#define DO_NOT_QUEUE_DMA 0 +#define QUEUE_DMA 1 + +#define RX_TRANSFER 1 +#define TX_TRANSFER 2 + +/* + * Macros to access SSP Registers with their offsets + */ +#define SSP_CR0(r) (r + 0x000) +#define SSP_CR1(r) (r + 0x004) +#define SSP_DR(r) (r + 0x008) +#define SSP_SR(r) (r + 0x00C) +#define SSP_CPSR(r) (r + 0x010) +#define SSP_IMSC(r) (r + 0x014) +#define SSP_RIS(r) (r + 0x018) +#define SSP_MIS(r) (r + 0x01C) +#define SSP_ICR(r) (r + 0x020) +#define SSP_DMACR(r) (r + 0x024) +#define SSP_ITCR(r) (r + 0x080) +#define SSP_ITIP(r) (r + 0x084) +#define SSP_ITOP(r) (r + 0x088) +#define SSP_TDR(r) (r + 0x08C) + +#define SSP_PID0(r) (r + 0xFE0) +#define SSP_PID1(r) (r + 0xFE4) +#define SSP_PID2(r) (r + 0xFE8) +#define SSP_PID3(r) (r + 0xFEC) + +#define SSP_CID0(r) (r + 0xFF0) +#define SSP_CID1(r) (r + 0xFF4) +#define SSP_CID2(r) (r + 0xFF8) +#define SSP_CID3(r) (r + 0xFFC) + +/* + * SSP Control Register 0 - SSP_CR0 + */ +#define SSP_CR0_MASK_DSS (0x1FUL << 0) +#define SSP_CR0_MASK_HALFDUP (0x1UL << 5) +#define SSP_CR0_MASK_SPO (0x1UL << 6) +#define SSP_CR0_MASK_SPH (0x1UL << 7) +#define SSP_CR0_MASK_SCR (0xFFUL << 8) +#define SSP_CR0_MASK_CSS (0x1FUL << 16) +#define SSP_CR0_MASK_FRF (0x3UL << 21) + +/* + * SSP Control Register 0 - SSP_CR1 + */ +#define SSP_CR1_MASK_LBM (0x1UL << 0) +#define SSP_CR1_MASK_SSE (0x1UL << 1) +#define SSP_CR1_MASK_MS (0x1UL << 2) +#define SSP_CR1_MASK_SOD (0x1UL << 3) +#define SSP_CR1_MASK_RENDN (0x1UL << 4) +#define SSP_CR1_MASK_TENDN (0x1UL << 5) +#define SSP_CR1_MASK_MWAIT (0x1UL << 6) +#define SSP_CR1_MASK_RXIFLSEL (0x7UL << 7) +#define SSP_CR1_MASK_TXIFLSEL (0x7UL << 10) + +/* + * SSP Data Register - SSP_DR + */ +#define SSP_DR_MASK_DATA 0xFFFFFFFF + +/* + * SSP Status Register - SSP_SR + */ +#define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */ +#define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */ +#define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */ +#define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */ +#define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */ + +/* + * SSP Clock Prescale Register - SSP_CPSR + */ +#define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0) + +/* + * SSP Interrupt Mask Set/Clear Register - SSP_IMSC + */ +#define SSP_IMSC_MASK_RORIM (0x1UL << 0) /* Receive Overrun Interrupt mask */ +#define SSP_IMSC_MASK_RTIM (0x1UL << 1) /* Receive timeout Interrupt mask */ +#define SSP_IMSC_MASK_RXIM (0x1UL << 2) /* Receive FIFO Interrupt mask */ +#define SSP_IMSC_MASK_TXIM (0x1UL << 3) /* Transmit FIFO Interrupt mask */ + +/* + * SSP Raw Interrupt Status Register - SSP_RIS + */ +/* Receive Overrun Raw Interrupt status */ +#define SSP_RIS_MASK_RORRIS (0x1UL << 0) +/* Receive Timeout Raw Interrupt status */ +#define SSP_RIS_MASK_RTRIS (0x1UL << 1) +/* Receive FIFO Raw Interrupt status */ +#define SSP_RIS_MASK_RXRIS (0x1UL << 2) +/* Transmit FIFO Raw Interrupt status */ +#define SSP_RIS_MASK_TXRIS (0x1UL << 3) + +/* + * SSP Masked Interrupt Status Register - SSP_MIS + */ +/* Receive Overrun Masked Interrupt status */ +#define SSP_MIS_MASK_RORMIS (0x1UL << 0) +/* Receive Timeout Masked Interrupt status */ +#define SSP_MIS_MASK_RTMIS (0x1UL << 1) +/* Receive FIFO Masked Interrupt status */ +#define SSP_MIS_MASK_RXMIS (0x1UL << 2) +/* Transmit FIFO Masked Interrupt status */ +#define SSP_MIS_MASK_TXMIS (0x1UL << 3) + +/* + * SSP Interrupt Clear Register - SSP_ICR + */ +/* Receive Overrun Raw Clear Interrupt bit */ +#define SSP_ICR_MASK_RORIC (0x1UL << 0) +/* Receive Timeout Clear Interrupt bit */ +#define SSP_ICR_MASK_RTIC (0x1UL << 1) + +/* + * SSP DMA Control Register - SSP_DMACR + */ +/* Receive DMA Enable bit */ +#define SSP_DMACR_MASK_RXDMAE (0x1UL << 0) +/* Transmit DMA Enable bit */ +#define SSP_DMACR_MASK_TXDMAE (0x1UL << 1) + +/* + * SSP Integration Test control Register - SSP_ITCR + */ +#define SSP_ITCR_MASK_ITEN (0x1UL << 0) +#define SSP_ITCR_MASK_TESTFIFO (0x1UL << 1) + +/* + * SSP Integration Test Input Register - SSP_ITIP + */ +#define ITIP_MASK_SSPRXD (0x1UL << 0) +#define ITIP_MASK_SSPFSSIN (0x1UL << 1) +#define ITIP_MASK_SSPCLKIN (0x1UL << 2) +#define ITIP_MASK_RXDMAC (0x1UL << 3) +#define ITIP_MASK_TXDMAC (0x1UL << 4) +#define ITIP_MASK_SSPTXDIN (0x1UL << 5) + +/* + * SSP Integration Test output Register - SSP_ITOP + */ +#define ITOP_MASK_SSPTXD (0x1UL << 0) +#define ITOP_MASK_SSPFSSOUT (0x1UL << 1) +#define ITOP_MASK_SSPCLKOUT (0x1UL << 2) +#define ITOP_MASK_SSPOEn (0x1UL << 3) +#define ITOP_MASK_SSPCTLOEn (0x1UL << 4) +#define ITOP_MASK_RORINTR (0x1UL << 5) +#define ITOP_MASK_RTINTR (0x1UL << 6) +#define ITOP_MASK_RXINTR (0x1UL << 7) +#define ITOP_MASK_TXINTR (0x1UL << 8) +#define ITOP_MASK_INTR (0x1UL << 9) +#define ITOP_MASK_RXDMABREQ (0x1UL << 10) +#define ITOP_MASK_RXDMASREQ (0x1UL << 11) +#define ITOP_MASK_TXDMABREQ (0x1UL << 12) +#define ITOP_MASK_TXDMASREQ (0x1UL << 13) + +/* + * SSP Test Data Register - SSP_TDR + */ +#define TDR_MASK_TESTDATA (0xFFFFFFFF) + +/* + * Message State + * we use the spi_message.state (void *) pointer to + * hold a single state value, that's why all this + * (void *) casting is done here. + */ +#define STATE_START ((void *) 0) +#define STATE_RUNNING ((void *) 1) +#define STATE_DONE ((void *) 2) +#define STATE_ERROR ((void *) -1) + +/* + * Queue State + */ +#define QUEUE_RUNNING (0) +#define QUEUE_STOPPED (1) +/* + * SSP State - Whether Enabled or Disabled + */ +#define SSP_DISABLED (0) +#define SSP_ENABLED (1) + +/* + * SSP DMA State - Whether DMA Enabled or Disabled + */ +#define SSP_DMA_DISABLED (0) +#define SSP_DMA_ENABLED (1) + +/* + * SSP Clock Defaults + */ +#define NMDK_SSP_DEFAULT_CLKRATE 0x2 +#define NMDK_SSP_DEFAULT_PRESCALE 0x40 + +/* + * SSP Clock Parameter ranges + */ +#define CPSDVR_MIN 0x02 +#define CPSDVR_MAX 0xFE +#define SCR_MIN 0x00 +#define SCR_MAX 0xFF + +/* + * SSP Interrupt related Macros + */ +#define DEFAULT_SSP_REG_IMSC 0x0UL +#define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC +#define ENABLE_ALL_INTERRUPTS (~DEFAULT_SSP_REG_IMSC) + +#define CLEAR_ALL_INTERRUPTS 0x3 + + +/* + * The type of reading going on on this chip + */ +enum ssp_reading { + READING_NULL, + READING_U8, + READING_U16, + READING_U32 +}; + +/** + * The type of writing going on on this chip + */ +enum ssp_writing { + WRITING_NULL, + WRITING_U8, + WRITING_U16, + WRITING_U32 +}; + +/** + * struct vendor_data - vendor-specific config parameters + * for PL022 derivates + * @fifodepth: depth of FIFOs (both) + * @max_bpw: maximum number of bits per word + * @unidir: supports unidirection transfers + */ +struct vendor_data { + int fifodepth; + int max_bpw; + bool unidir; +}; + +/** + * struct pl022 - This is the private SSP driver data structure + * @adev: AMBA device model hookup + * @phybase: The physical memory where the SSP device resides + * @virtbase: The virtual memory where the SSP is mapped + * @master: SPI framework hookup + * @master_info: controller-specific data from machine setup + * @regs: SSP controller register's virtual address + * @pump_messages: Work struct for scheduling work to the workqueue + * @lock: spinlock to syncronise access to driver data + * @workqueue: a workqueue on which any spi_message request is queued + * @busy: workqueue is busy + * @run: workqueue is running + * @pump_transfers: Tasklet used in Interrupt Transfer mode + * @cur_msg: Pointer to current spi_message being processed + * @cur_transfer: Pointer to current spi_transfer + * @cur_chip: pointer to current clients chip(assigned from controller_state) + * @tx: current position in TX buffer to be read + * @tx_end: end position in TX buffer to be read + * @rx: current position in RX buffer to be written + * @rx_end: end position in RX buffer to be written + * @readingtype: the type of read currently going on + * @writingtype: the type or write currently going on + */ +struct pl022 { + struct amba_device *adev; + struct vendor_data *vendor; + resource_size_t phybase; + void __iomem *virtbase; + struct clk *clk; + struct spi_master *master; + struct pl022_ssp_controller *master_info; + /* Driver message queue */ + struct workqueue_struct *workqueue; + struct work_struct pump_messages; + spinlock_t queue_lock; + struct list_head queue; + int busy; + int run; + /* Message transfer pump */ + struct tasklet_struct pump_transfers; + struct spi_message *cur_msg; + struct spi_transfer *cur_transfer; + struct chip_data *cur_chip; + void *tx; + void *tx_end; + void *rx; + void *rx_end; + enum ssp_reading read; + enum ssp_writing write; +}; + +/** + * struct chip_data - To maintain runtime state of SSP for each client chip + * @cr0: Value of control register CR0 of SSP + * @cr1: Value of control register CR1 of SSP + * @dmacr: Value of DMA control Register of SSP + * @cpsr: Value of Clock prescale register + * @n_bytes: how many bytes(power of 2) reqd for a given data width of client + * @enable_dma: Whether to enable DMA or not + * @write: function ptr to be used to write when doing xfer for this chip + * @read: function ptr to be used to read when doing xfer for this chip + * @cs_control: chip select callback provided by chip + * @xfer_type: polling/interrupt/DMA + * + * Runtime state of the SSP controller, maintained per chip, + * This would be set according to the current message that would be served + */ +struct chip_data { + u16 cr0; + u16 cr1; + u16 dmacr; + u16 cpsr; + u8 n_bytes; + u8 enable_dma:1; + enum ssp_reading read; + enum ssp_writing write; + void (*cs_control) (u32 command); + int xfer_type; +}; + +/** + * null_cs_control - Dummy chip select function + * @command: select/delect the chip + * + * If no chip select function is provided by client this is used as dummy + * chip select + */ +static void null_cs_control(u32 command) +{ + pr_debug("pl022: dummy chip select control, CS=0x%x\n", command); +} + +/** + * giveback - current spi_message is over, schedule next message and call + * callback of this message. Assumes that caller already + * set message->status; dma and pio irqs are blocked + * @pl022: SSP driver private data structure + */ +static void giveback(struct pl022 *pl022) +{ + struct spi_transfer *last_transfer; + unsigned long flags; + struct spi_message *msg; + void (*curr_cs_control) (u32 command); + + /* + * This local reference to the chip select function + * is needed because we set curr_chip to NULL + * as a step toward termininating the message. + */ + curr_cs_control = pl022->cur_chip->cs_control; + spin_lock_irqsave(&pl022->queue_lock, flags); + msg = pl022->cur_msg; + pl022->cur_msg = NULL; + pl022->cur_transfer = NULL; + pl022->cur_chip = NULL; + queue_work(pl022->workqueue, &pl022->pump_messages); + spin_unlock_irqrestore(&pl022->queue_lock, flags); + + last_transfer = list_entry(msg->transfers.prev, + struct spi_transfer, + transfer_list); + + /* Delay if requested before any change in chip select */ + if (last_transfer->delay_usecs) + /* + * FIXME: This runs in interrupt context. + * Is this really smart? + */ + udelay(last_transfer->delay_usecs); + + /* + * Drop chip select UNLESS cs_change is true or we are returning + * a message with an error, or next message is for another chip + */ + if (!last_transfer->cs_change) + curr_cs_control(SSP_CHIP_DESELECT); + else { + struct spi_message *next_msg; + + /* Holding of cs was hinted, but we need to make sure + * the next message is for the same chip. Don't waste + * time with the following tests unless this was hinted. + * + * We cannot postpone this until pump_messages, because + * after calling msg->complete (below) the driver that + * sent the current message could be unloaded, which + * could invalidate the cs_control() callback... + */ + + /* get a pointer to the next message, if any */ + spin_lock_irqsave(&pl022->queue_lock, flags); + if (list_empty(&pl022->queue)) + next_msg = NULL; + else + next_msg = list_entry(pl022->queue.next, + struct spi_message, queue); + spin_unlock_irqrestore(&pl022->queue_lock, flags); + + /* see if the next and current messages point + * to the same chip + */ + if (next_msg && next_msg->spi != msg->spi) + next_msg = NULL; + if (!next_msg || msg->state == STATE_ERROR) + curr_cs_control(SSP_CHIP_DESELECT); + } + msg->state = NULL; + if (msg->complete) + msg->complete(msg->context); + /* This message is completed, so let's turn off the clock! */ + clk_disable(pl022->clk); +} + +/** + * flush - flush the FIFO to reach a clean state + * @pl022: SSP driver private data structure + */ +static int flush(struct pl022 *pl022) +{ + unsigned long limit = loops_per_jiffy << 1; + + dev_dbg(&pl022->adev->dev, "flush\n"); + do { + while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) + readw(SSP_DR(pl022->virtbase)); + } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--); + return limit; +} + +/** + * restore_state - Load configuration of current chip + * @pl022: SSP driver private data structure + */ +static void restore_state(struct pl022 *pl022) +{ + struct chip_data *chip = pl022->cur_chip; + + writew(chip->cr0, SSP_CR0(pl022->virtbase)); + writew(chip->cr1, SSP_CR1(pl022->virtbase)); + writew(chip->dmacr, SSP_DMACR(pl022->virtbase)); + writew(chip->cpsr, SSP_CPSR(pl022->virtbase)); + writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); + writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); +} + +/** + * load_ssp_default_config - Load default configuration for SSP + * @pl022: SSP driver private data structure + */ + +/* + * Default SSP Register Values + */ +#define DEFAULT_SSP_REG_CR0 ( \ + GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \ + GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP, 5) | \ + GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ + GEN_MASK_BITS(SSP_CLK_FALLING_EDGE, SSP_CR0_MASK_SPH, 7) | \ + GEN_MASK_BITS(NMDK_SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \ + GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS, 16) | \ + GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 21) \ +) + +#define DEFAULT_SSP_REG_CR1 ( \ + GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \ + GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ + GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ + GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \ + GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN, 4) | \ + GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN, 5) | \ + GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT, 6) |\ + GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL, 7) | \ + GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL, 10) \ +) + +#define DEFAULT_SSP_REG_CPSR ( \ + GEN_MASK_BITS(NMDK_SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \ +) + +#define DEFAULT_SSP_REG_DMACR (\ + GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \ + GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \ +) + + +static void load_ssp_default_config(struct pl022 *pl022) +{ + writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase)); + writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase)); + writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase)); + writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase)); + writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); + writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); +} + +/** + * This will write to TX and read from RX according to the parameters + * set in pl022. + */ +static void readwriter(struct pl022 *pl022) +{ + + /* + * The FIFO depth is different inbetween primecell variants. + * I believe filling in too much in the FIFO might cause + * errons in 8bit wide transfers on ARM variants (just 8 words + * FIFO, means only 8x8 = 64 bits in FIFO) at least. + * + * FIXME: currently we have no logic to account for this. + * perhaps there is even something broken in HW regarding + * 8bit transfers (it doesn't fail on 16bit) so this needs + * more investigation... + */ + dev_dbg(&pl022->adev->dev, + "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n", + __func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end); + + /* Read as much as you can */ + while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) + && (pl022->rx < pl022->rx_end)) { + switch (pl022->read) { + case READING_NULL: + readw(SSP_DR(pl022->virtbase)); + break; + case READING_U8: + *(u8 *) (pl022->rx) = + readw(SSP_DR(pl022->virtbase)) & 0xFFU; + break; + case READING_U16: + *(u16 *) (pl022->rx) = + (u16) readw(SSP_DR(pl022->virtbase)); + break; + case READING_U32: + *(u32 *) (pl022->rx) = + readl(SSP_DR(pl022->virtbase)); + break; + } + pl022->rx += (pl022->cur_chip->n_bytes); + } + /* + * Write as much as you can, while keeping an eye on the RX FIFO! + */ + while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF) + && (pl022->tx < pl022->tx_end)) { + switch (pl022->write) { + case WRITING_NULL: + writew(0x0, SSP_DR(pl022->virtbase)); + break; + case WRITING_U8: + writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase)); + break; + case WRITING_U16: + writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase)); + break; + case WRITING_U32: + writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase)); + break; + } + pl022->tx += (pl022->cur_chip->n_bytes); + /* + * This inner reader takes care of things appearing in the RX + * FIFO as we're transmitting. This will happen a lot since the + * clock starts running when you put things into the TX FIFO, + * and then things are continously clocked into the RX FIFO. + */ + while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) + && (pl022->rx < pl022->rx_end)) { + switch (pl022->read) { + case READING_NULL: + readw(SSP_DR(pl022->virtbase)); + break; + case READING_U8: + *(u8 *) (pl022->rx) = + readw(SSP_DR(pl022->virtbase)) & 0xFFU; + break; + case READING_U16: + *(u16 *) (pl022->rx) = + (u16) readw(SSP_DR(pl022->virtbase)); + break; + case READING_U32: + *(u32 *) (pl022->rx) = + readl(SSP_DR(pl022->virtbase)); + break; + } + pl022->rx += (pl022->cur_chip->n_bytes); + } + } + /* + * When we exit here the TX FIFO should be full and the RX FIFO + * should be empty + */ +} + + +/** + * next_transfer - Move to the Next transfer in the current spi message + * @pl022: SSP driver private data structure + * + * This function moves though the linked list of spi transfers in the + * current spi message and returns with the state of current spi + * message i.e whether its last transfer is done(STATE_DONE) or + * Next transfer is ready(STATE_RUNNING) + */ +static void *next_transfer(struct pl022 *pl022) +{ + struct spi_message *msg = pl022->cur_msg; + struct spi_transfer *trans = pl022->cur_transfer; + + /* Move to next transfer */ + if (trans->transfer_list.next != &msg->transfers) { + pl022->cur_transfer = + list_entry(trans->transfer_list.next, + struct spi_transfer, transfer_list); + return STATE_RUNNING; + } + return STATE_DONE; +} +/** + * pl022_interrupt_handler - Interrupt handler for SSP controller + * + * This function handles interrupts generated for an interrupt based transfer. + * If a receive overrun (ROR) interrupt is there then we disable SSP, flag the + * current message's state as STATE_ERROR and schedule the tasklet + * pump_transfers which will do the postprocessing of the current message by + * calling giveback(). Otherwise it reads data from RX FIFO till there is no + * more data, and writes data in TX FIFO till it is not full. If we complete + * the transfer we move to the next transfer and schedule the tasklet. + */ +static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) +{ + struct pl022 *pl022 = dev_id; + struct spi_message *msg = pl022->cur_msg; + u16 irq_status = 0; + u16 flag = 0; + + if (unlikely(!msg)) { + dev_err(&pl022->adev->dev, + "bad message state in interrupt handler"); + /* Never fail */ + return IRQ_HANDLED; + } + + /* Read the Interrupt Status Register */ + irq_status = readw(SSP_MIS(pl022->virtbase)); + + if (unlikely(!irq_status)) + return IRQ_NONE; + + /* This handles the error code interrupts */ + if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) { + /* + * Overrun interrupt - bail out since our Data has been + * corrupted + */ + dev_err(&pl022->adev->dev, + "FIFO overrun\n"); + if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF) + dev_err(&pl022->adev->dev, + "RXFIFO is full\n"); + if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF) + dev_err(&pl022->adev->dev, + "TXFIFO is full\n"); + + /* + * Disable and clear interrupts, disable SSP, + * mark message with bad status so it can be + * retried. + */ + writew(DISABLE_ALL_INTERRUPTS, + SSP_IMSC(pl022->virtbase)); + writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); + writew((readw(SSP_CR1(pl022->virtbase)) & + (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); + msg->state = STATE_ERROR; + + /* Schedule message queue handler */ + tasklet_schedule(&pl022->pump_transfers); + return IRQ_HANDLED; + } + + readwriter(pl022); + + if ((pl022->tx == pl022->tx_end) && (flag == 0)) { + flag = 1; + /* Disable Transmit interrupt */ + writew(readw(SSP_IMSC(pl022->virtbase)) & + (~SSP_IMSC_MASK_TXIM), + SSP_IMSC(pl022->virtbase)); + } + + /* + * Since all transactions must write as much as shall be read, + * we can conclude the entire transaction once RX is complete. + * At this point, all TX will always be finished. + */ + if (pl022->rx >= pl022->rx_end) { + writew(DISABLE_ALL_INTERRUPTS, + SSP_IMSC(pl022->virtbase)); + writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); + if (unlikely(pl022->rx > pl022->rx_end)) { + dev_warn(&pl022->adev->dev, "read %u surplus " + "bytes (did you request an odd " + "number of bytes on a 16bit bus?)\n", + (u32) (pl022->rx - pl022->rx_end)); + } + /* Update total bytes transfered */ + msg->actual_length += pl022->cur_transfer->len; + if (pl022->cur_transfer->cs_change) + pl022->cur_chip-> + cs_control(SSP_CHIP_DESELECT); + /* Move to next transfer */ + msg->state = next_transfer(pl022); + tasklet_schedule(&pl022->pump_transfers); + return IRQ_HANDLED; + } + + return IRQ_HANDLED; +} + +/** + * This sets up the pointers to memory for the next message to + * send out on the SPI bus. + */ +static int set_up_next_transfer(struct pl022 *pl022, + struct spi_transfer *transfer) +{ + int residue; + + /* Sanity check the message for this bus width */ + residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes; + if (unlikely(residue != 0)) { + dev_err(&pl022->adev->dev, + "message of %u bytes to transmit but the current " + "chip bus has a data width of %u bytes!\n", + pl022->cur_transfer->len, + pl022->cur_chip->n_bytes); + dev_err(&pl022->adev->dev, "skipping this message\n"); + return -EIO; + } + pl022->tx = (void *)transfer->tx_buf; + pl022->tx_end = pl022->tx + pl022->cur_transfer->len; + pl022->rx = (void *)transfer->rx_buf; + pl022->rx_end = pl022->rx + pl022->cur_transfer->len; + pl022->write = + pl022->tx ? pl022->cur_chip->write : WRITING_NULL; + pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL; + return 0; +} + +/** + * pump_transfers - Tasklet function which schedules next interrupt transfer + * when running in interrupt transfer mode. + * @data: SSP driver private data structure + * + */ +static void pump_transfers(unsigned long data) +{ + struct pl022 *pl022 = (struct pl022 *) data; + struct spi_message *message = NULL; + struct spi_transfer *transfer = NULL; + struct spi_transfer *previous = NULL; + + /* Get current state information */ + message = pl022->cur_msg; + transfer = pl022->cur_transfer; + + /* Handle for abort */ + if (message->state == STATE_ERROR) { + message->status = -EIO; + giveback(pl022); + return; + } + + /* Handle end of message */ + if (message->state == STATE_DONE) { + message->status = 0; + giveback(pl022); + return; + } + + /* Delay if requested at end of transfer before CS change */ + if (message->state == STATE_RUNNING) { + previous = list_entry(transfer->transfer_list.prev, + struct spi_transfer, + transfer_list); + if (previous->delay_usecs) + /* + * FIXME: This runs in interrupt context. + * Is this really smart? + */ + udelay(previous->delay_usecs); + + /* Drop chip select only if cs_change is requested */ + if (previous->cs_change) + pl022->cur_chip->cs_control(SSP_CHIP_SELECT); + } else { + /* STATE_START */ + message->state = STATE_RUNNING; + } + + if (set_up_next_transfer(pl022, transfer)) { + message->state = STATE_ERROR; + message->status = -EIO; + giveback(pl022); + return; + } + /* Flush the FIFOs and let's go! */ + flush(pl022); + writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); +} + +/** + * NOT IMPLEMENTED + * configure_dma - It configures the DMA pipes for DMA transfers + * @data: SSP driver's private data structure + * + */ +static int configure_dma(void *data) +{ + struct pl022 *pl022 = data; + dev_dbg(&pl022->adev->dev, "configure DMA\n"); + return -ENOTSUPP; +} + +/** + * do_dma_transfer - It handles transfers of the current message + * if it is DMA xfer. + * NOT FULLY IMPLEMENTED + * @data: SSP driver's private data structure + */ +static void do_dma_transfer(void *data) +{ + struct pl022 *pl022 = data; + + if (configure_dma(data)) { + dev_dbg(&pl022->adev->dev, "configuration of DMA Failed!\n"); + goto err_config_dma; + } + + /* TODO: Implememt DMA setup of pipes here */ + + /* Enable target chip, set up transfer */ + pl022->cur_chip->cs_control(SSP_CHIP_SELECT); + if (set_up_next_transfer(pl022, pl022->cur_transfer)) { + /* Error path */ + pl022->cur_msg->state = STATE_ERROR; + pl022->cur_msg->status = -EIO; + giveback(pl022); + return; + } + /* Enable SSP */ + writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), + SSP_CR1(pl022->virtbase)); + + /* TODO: Enable the DMA transfer here */ + return; + + err_config_dma: + pl022->cur_msg->state = STATE_ERROR; + pl022->cur_msg->status = -EIO; + giveback(pl022); + return; +} + +static void do_interrupt_transfer(void *data) +{ + struct pl022 *pl022 = data; + + /* Enable target chip */ + pl022->cur_chip->cs_control(SSP_CHIP_SELECT); + if (set_up_next_transfer(pl022, pl022->cur_transfer)) { + /* Error path */ + pl022->cur_msg->state = STATE_ERROR; + pl022->cur_msg->status = -EIO; + giveback(pl022); + return; + } + /* Enable SSP, turn on interrupts */ + writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), + SSP_CR1(pl022->virtbase)); + writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); +} + +static void do_polling_transfer(void *data) +{ + struct pl022 *pl022 = data; + struct spi_message *message = NULL; + struct spi_transfer *transfer = NULL; + struct spi_transfer *previous = NULL; + struct chip_data *chip; + + chip = pl022->cur_chip; + message = pl022->cur_msg; + + while (message->state != STATE_DONE) { + /* Handle for abort */ + if (message->state == STATE_ERROR) + break; + transfer = pl022->cur_transfer; + + /* Delay if requested at end of transfer */ + if (message->state == STATE_RUNNING) { + previous = + list_entry(transfer->transfer_list.prev, + struct spi_transfer, transfer_list); + if (previous->delay_usecs) + udelay(previous->delay_usecs); + if (previous->cs_change) + pl022->cur_chip->cs_control(SSP_CHIP_SELECT); + } else { + /* STATE_START */ + message->state = STATE_RUNNING; + pl022->cur_chip->cs_control(SSP_CHIP_SELECT); + } + + /* Configuration Changing Per Transfer */ + if (set_up_next_transfer(pl022, transfer)) { + /* Error path */ + message->state = STATE_ERROR; + break; + } + /* Flush FIFOs and enable SSP */ + flush(pl022); + writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), + SSP_CR1(pl022->virtbase)); + + dev_dbg(&pl022->adev->dev, "POLLING TRANSFER ONGOING ... \n"); + /* FIXME: insert a timeout so we don't hang here indefinately */ + while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) + readwriter(pl022); + + /* Update total byte transfered */ + message->actual_length += pl022->cur_transfer->len; + if (pl022->cur_transfer->cs_change) + pl022->cur_chip->cs_control(SSP_CHIP_DESELECT); + /* Move to next transfer */ + message->state = next_transfer(pl022); + } + + /* Handle end of message */ + if (message->state == STATE_DONE) + message->status = 0; + else + message->status = -EIO; + + giveback(pl022); + return; +} + +/** + * pump_messages - Workqueue function which processes spi message queue + * @data: pointer to private data of SSP driver + * + * This function checks if there is any spi message in the queue that + * needs processing and delegate control to appropriate function + * do_polling_transfer()/do_interrupt_transfer()/do_dma_transfer() + * based on the kind of the transfer + * + */ +static void pump_messages(struct work_struct *work) +{ + struct pl022 *pl022 = + container_of(work, struct pl022, pump_messages); + unsigned long flags; + + /* Lock queue and check for queue work */ + spin_lock_irqsave(&pl022->queue_lock, flags); + if (list_empty(&pl022->queue) || pl022->run == QUEUE_STOPPED) { + pl022->busy = 0; + spin_unlock_irqrestore(&pl022->queue_lock, flags); + return; + } + /* Make sure we are not already running a message */ + if (pl022->cur_msg) { + spin_unlock_irqrestore(&pl022->queue_lock, flags); + return; + } + /* Extract head of queue */ + pl022->cur_msg = + list_entry(pl022->queue.next, struct spi_message, queue); + + list_del_init(&pl022->cur_msg->queue); + pl022->busy = 1; + spin_unlock_irqrestore(&pl022->queue_lock, flags); + + /* Initial message state */ + pl022->cur_msg->state = STATE_START; + pl022->cur_transfer = list_entry(pl022->cur_msg->transfers.next, + struct spi_transfer, + transfer_list); + + /* Setup the SPI using the per chip configuration */ + pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi); + /* + * We enable the clock here, then the clock will be disabled when + * giveback() is called in each method (poll/interrupt/DMA) + */ + clk_enable(pl022->clk); + restore_state(pl022); + flush(pl022); + + if (pl022->cur_chip->xfer_type == POLLING_TRANSFER) + do_polling_transfer(pl022); + else if (pl022->cur_chip->xfer_type == INTERRUPT_TRANSFER) + do_interrupt_transfer(pl022); + else + do_dma_transfer(pl022); +} + + +static int __init init_queue(struct pl022 *pl022) +{ + INIT_LIST_HEAD(&pl022->queue); + spin_lock_init(&pl022->queue_lock); + + pl022->run = QUEUE_STOPPED; + pl022->busy = 0; + + tasklet_init(&pl022->pump_transfers, + pump_transfers, (unsigned long)pl022); + + INIT_WORK(&pl022->pump_messages, pump_messages); + pl022->workqueue = create_singlethread_workqueue( + dev_name(pl022->master->dev.parent)); + if (pl022->workqueue == NULL) + return -EBUSY; + + return 0; +} + + +static int start_queue(struct pl022 *pl022) +{ + unsigned long flags; + + spin_lock_irqsave(&pl022->queue_lock, flags); + + if (pl022->run == QUEUE_RUNNING || pl022->busy) { + spin_unlock_irqrestore(&pl022->queue_lock, flags); + return -EBUSY; + } + + pl022->run = QUEUE_RUNNING; + pl022->cur_msg = NULL; + pl022->cur_transfer = NULL; + pl022->cur_chip = NULL; + spin_unlock_irqrestore(&pl022->queue_lock, flags); + + queue_work(pl022->workqueue, &pl022->pump_messages); + + return 0; +} + + +static int stop_queue(struct pl022 *pl022) +{ + unsigned long flags; + unsigned limit = 500; + int status = 0; + + spin_lock_irqsave(&pl022->queue_lock, flags); + + /* This is a bit lame, but is optimized for the common execution path. + * A wait_queue on the pl022->busy could be used, but then the common + * execution path (pump_messages) would be required to call wake_up or + * friends on every SPI message. Do this instead */ + pl022->run = QUEUE_STOPPED; + while (!list_empty(&pl022->queue) && pl022->busy && limit--) { + spin_unlock_irqrestore(&pl022->queue_lock, flags); + msleep(10); + spin_lock_irqsave(&pl022->queue_lock, flags); + } + + if (!list_empty(&pl022->queue) || pl022->busy) + status = -EBUSY; + + spin_unlock_irqrestore(&pl022->queue_lock, flags); + + return status; +} + +static int destroy_queue(struct pl022 *pl022) +{ + int status; + + status = stop_queue(pl022); + /* we are unloading the module or failing to load (only two calls + * to this routine), and neither call can handle a return value. + * However, destroy_workqueue calls flush_workqueue, and that will + * block until all work is done. If the reason that stop_queue + * timed out is that the work will never finish, then it does no + * good to call destroy_workqueue, so return anyway. */ + if (status != 0) + return status; + + destroy_workqueue(pl022->workqueue); + + return 0; +} + +static int verify_controller_parameters(struct pl022 *pl022, + struct pl022_config_chip *chip_info) +{ + if ((chip_info->lbm != LOOPBACK_ENABLED) + && (chip_info->lbm != LOOPBACK_DISABLED)) { + dev_err(chip_info->dev, + "loopback Mode is configured incorrectly\n"); + return -EINVAL; + } + if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI) + || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) { + dev_err(chip_info->dev, + "interface is configured incorrectly\n"); + return -EINVAL; + } + if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) && + (!pl022->vendor->unidir)) { + dev_err(chip_info->dev, + "unidirectional mode not supported in this " + "hardware version\n"); + return -EINVAL; + } + if ((chip_info->hierarchy != SSP_MASTER) + && (chip_info->hierarchy != SSP_SLAVE)) { + dev_err(chip_info->dev, + "hierarchy is configured incorrectly\n"); + return -EINVAL; + } + if (((chip_info->clk_freq).cpsdvsr < CPSDVR_MIN) + || ((chip_info->clk_freq).cpsdvsr > CPSDVR_MAX)) { + dev_err(chip_info->dev, + "cpsdvsr is configured incorrectly\n"); + return -EINVAL; + } + if ((chip_info->endian_rx != SSP_RX_MSB) + && (chip_info->endian_rx != SSP_RX_LSB)) { + dev_err(chip_info->dev, + "RX FIFO endianess is configured incorrectly\n"); + return -EINVAL; + } + if ((chip_info->endian_tx != SSP_TX_MSB) + && (chip_info->endian_tx != SSP_TX_LSB)) { + dev_err(chip_info->dev, + "TX FIFO endianess is configured incorrectly\n"); + return -EINVAL; + } + if ((chip_info->data_size < SSP_DATA_BITS_4) + || (chip_info->data_size > SSP_DATA_BITS_32)) { + dev_err(chip_info->dev, + "DATA Size is configured incorrectly\n"); + return -EINVAL; + } + if ((chip_info->com_mode != INTERRUPT_TRANSFER) + && (chip_info->com_mode != DMA_TRANSFER) + && (chip_info->com_mode != POLLING_TRANSFER)) { + dev_err(chip_info->dev, + "Communication mode is configured incorrectly\n"); + return -EINVAL; + } + if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM) + || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) { + dev_err(chip_info->dev, + "RX FIFO Trigger Level is configured incorrectly\n"); + return -EINVAL; + } + if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC) + || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) { + dev_err(chip_info->dev, + "TX FIFO Trigger Level is configured incorrectly\n"); + return -EINVAL; + } + if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) { + if ((chip_info->clk_phase != SSP_CLK_RISING_EDGE) + && (chip_info->clk_phase != SSP_CLK_FALLING_EDGE)) { + dev_err(chip_info->dev, + "Clock Phase is configured incorrectly\n"); + return -EINVAL; + } + if ((chip_info->clk_pol != SSP_CLK_POL_IDLE_LOW) + && (chip_info->clk_pol != SSP_CLK_POL_IDLE_HIGH)) { + dev_err(chip_info->dev, + "Clock Polarity is configured incorrectly\n"); + return -EINVAL; + } + } + if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) { + if ((chip_info->ctrl_len < SSP_BITS_4) + || (chip_info->ctrl_len > SSP_BITS_32)) { + dev_err(chip_info->dev, + "CTRL LEN is configured incorrectly\n"); + return -EINVAL; + } + if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO) + && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) { + dev_err(chip_info->dev, + "Wait State is configured incorrectly\n"); + return -EINVAL; + } + if ((chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) + && (chip_info->duplex != + SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) { + dev_err(chip_info->dev, + "DUPLEX is configured incorrectly\n"); + return -EINVAL; + } + } + if (chip_info->cs_control == NULL) { + dev_warn(chip_info->dev, + "Chip Select Function is NULL for this chip\n"); + chip_info->cs_control = null_cs_control; + } + return 0; +} + +/** + * pl022_transfer - transfer function registered to SPI master framework + * @spi: spi device which is requesting transfer + * @msg: spi message which is to handled is queued to driver queue + * + * This function is registered to the SPI framework for this SPI master + * controller. It will queue the spi_message in the queue of driver if + * the queue is not stopped and return. + */ +static int pl022_transfer(struct spi_device *spi, struct spi_message *msg) +{ + struct pl022 *pl022 = spi_master_get_devdata(spi->master); + unsigned long flags; + + spin_lock_irqsave(&pl022->queue_lock, flags); + + if (pl022->run == QUEUE_STOPPED) { + spin_unlock_irqrestore(&pl022->queue_lock, flags); + return -ESHUTDOWN; + } + msg->actual_length = 0; + msg->status = -EINPROGRESS; + msg->state = STATE_START; + + list_add_tail(&msg->queue, &pl022->queue); + if (pl022->run == QUEUE_RUNNING && !pl022->busy) + queue_work(pl022->workqueue, &pl022->pump_messages); + + spin_unlock_irqrestore(&pl022->queue_lock, flags); + return 0; +} + +static int calculate_effective_freq(struct pl022 *pl022, + int freq, + struct ssp_clock_params *clk_freq) +{ + /* Lets calculate the frequency parameters */ + u16 cpsdvsr = 2; + u16 scr = 0; + bool freq_found = false; + u32 rate; + u32 max_tclk; + u32 min_tclk; + + rate = clk_get_rate(pl022->clk); + /* cpsdvscr = 2 & scr 0 */ + max_tclk = (rate / (CPSDVR_MIN * (1 + SCR_MIN))); + /* cpsdvsr = 254 & scr = 255 */ + min_tclk = (rate / (CPSDVR_MAX * (1 + SCR_MAX))); + + if ((freq <= max_tclk) && (freq >= min_tclk)) { + while (cpsdvsr <= CPSDVR_MAX && !freq_found) { + while (scr <= SCR_MAX && !freq_found) { + if ((rate / + (cpsdvsr * (1 + scr))) > freq) + scr += 1; + else { + /* + * This bool is made true when + * effective frequency >= + * target frequency is found + */ + freq_found = true; + if ((rate / + (cpsdvsr * (1 + scr))) != freq) { + if (scr == SCR_MIN) { + cpsdvsr -= 2; + scr = SCR_MAX; + } else + scr -= 1; + } + } + } + if (!freq_found) { + cpsdvsr += 2; + scr = SCR_MIN; + } + } + if (cpsdvsr != 0) { + dev_dbg(&pl022->adev->dev, + "SSP Effective Frequency is %u\n", + (rate / (cpsdvsr * (1 + scr)))); + clk_freq->cpsdvsr = (u8) (cpsdvsr & 0xFF); + clk_freq->scr = (u8) (scr & 0xFF); + dev_dbg(&pl022->adev->dev, + "SSP cpsdvsr = %d, scr = %d\n", + clk_freq->cpsdvsr, clk_freq->scr); + } + } else { + dev_err(&pl022->adev->dev, + "controller data is incorrect: out of range frequency"); + return -EINVAL; + } + return 0; +} + +/** + * NOT IMPLEMENTED + * process_dma_info - Processes the DMA info provided by client drivers + * @chip_info: chip info provided by client device + * @chip: Runtime state maintained by the SSP controller for each spi device + * + * This function processes and stores DMA config provided by client driver + * into the runtime state maintained by the SSP controller driver + */ +static int process_dma_info(struct pl022_config_chip *chip_info, + struct chip_data *chip) +{ + dev_err(chip_info->dev, + "cannot process DMA info, DMA not implemented!\n"); + return -ENOTSUPP; +} + +/** + * pl022_setup - setup function registered to SPI master framework + * @spi: spi device which is requesting setup + * + * This function is registered to the SPI framework for this SPI master + * controller. If it is the first time when setup is called by this device, + * this function will initialize the runtime state for this chip and save + * the same in the device structure. Else it will update the runtime info + * with the updated chip info. Nothing is really being written to the + * controller hardware here, that is not done until the actual transfer + * commence. + */ + +/* FIXME: JUST GUESSING the spi->mode bits understood by this driver */ +#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \ + | SPI_LSB_FIRST | SPI_LOOP) + +static int pl022_setup(struct spi_device *spi) +{ + struct pl022_config_chip *chip_info; + struct chip_data *chip; + int status = 0; + struct pl022 *pl022 = spi_master_get_devdata(spi->master); + + if (spi->mode & ~MODEBITS) { + dev_dbg(&spi->dev, "unsupported mode bits %x\n", + spi->mode & ~MODEBITS); + return -EINVAL; + } + + if (!spi->max_speed_hz) + return -EINVAL; + + /* Get controller_state if one is supplied */ + chip = spi_get_ctldata(spi); + + if (chip == NULL) { + chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); + if (!chip) { + dev_err(&spi->dev, + "cannot allocate controller state\n"); + return -ENOMEM; + } + dev_dbg(&spi->dev, + "allocated memory for controller's runtime state\n"); + } + + /* Get controller data if one is supplied */ + chip_info = spi->controller_data; + + if (chip_info == NULL) { + /* spi_board_info.controller_data not is supplied */ + dev_dbg(&spi->dev, + "using default controller_data settings\n"); + + chip_info = + kzalloc(sizeof(struct pl022_config_chip), GFP_KERNEL); + + if (!chip_info) { + dev_err(&spi->dev, + "cannot allocate controller data\n"); + status = -ENOMEM; + goto err_first_setup; + } + + dev_dbg(&spi->dev, "allocated memory for controller data\n"); + + /* Pointer back to the SPI device */ + chip_info->dev = &spi->dev; + /* + * Set controller data default values: + * Polling is supported by default + */ + chip_info->lbm = LOOPBACK_DISABLED; + chip_info->com_mode = POLLING_TRANSFER; + chip_info->iface = SSP_INTERFACE_MOTOROLA_SPI; + chip_info->hierarchy = SSP_SLAVE; + chip_info->slave_tx_disable = DO_NOT_DRIVE_TX; + chip_info->endian_tx = SSP_TX_LSB; + chip_info->endian_rx = SSP_RX_LSB; + chip_info->data_size = SSP_DATA_BITS_12; + chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM; + chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC; + chip_info->clk_phase = SSP_CLK_FALLING_EDGE; + chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW; + chip_info->ctrl_len = SSP_BITS_8; + chip_info->wait_state = SSP_MWIRE_WAIT_ZERO; + chip_info->duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX; + chip_info->cs_control = null_cs_control; + } else { + dev_dbg(&spi->dev, + "using user supplied controller_data settings\n"); + } + + /* + * We can override with custom divisors, else we use the board + * frequency setting + */ + if ((0 == chip_info->clk_freq.cpsdvsr) + && (0 == chip_info->clk_freq.scr)) { + status = calculate_effective_freq(pl022, + spi->max_speed_hz, + &chip_info->clk_freq); + if (status < 0) + goto err_config_params; + } else { + if ((chip_info->clk_freq.cpsdvsr % 2) != 0) + chip_info->clk_freq.cpsdvsr = + chip_info->clk_freq.cpsdvsr - 1; + } + status = verify_controller_parameters(pl022, chip_info); + if (status) { + dev_err(&spi->dev, "controller data is incorrect"); + goto err_config_params; + } + /* Now set controller state based on controller data */ + chip->xfer_type = chip_info->com_mode; + chip->cs_control = chip_info->cs_control; + + if (chip_info->data_size <= 8) { + dev_dbg(&spi->dev, "1 <= n <=8 bits per word\n"); + chip->n_bytes = 1; + chip->read = READING_U8; + chip->write = WRITING_U8; + } else if (chip_info->data_size <= 16) { + dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n"); + chip->n_bytes = 2; + chip->read = READING_U16; + chip->write = WRITING_U16; + } else { + if (pl022->vendor->max_bpw >= 32) { + dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n"); + chip->n_bytes = 4; + chip->read = READING_U32; + chip->write = WRITING_U32; + } else { + dev_err(&spi->dev, + "illegal data size for this controller!\n"); + dev_err(&spi->dev, + "a standard pl022 can only handle " + "1 <= n <= 16 bit words\n"); + goto err_config_params; + } + } + + /* Now Initialize all register settings required for this chip */ + chip->cr0 = 0; + chip->cr1 = 0; + chip->dmacr = 0; + chip->cpsr = 0; + if ((chip_info->com_mode == DMA_TRANSFER) + && ((pl022->master_info)->enable_dma)) { + chip->enable_dma = 1; + dev_dbg(&spi->dev, "DMA mode set in controller state\n"); + status = process_dma_info(chip_info, chip); + if (status < 0) + goto err_config_params; + SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, + SSP_DMACR_MASK_RXDMAE, 0); + SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, + SSP_DMACR_MASK_TXDMAE, 1); + } else { + chip->enable_dma = 0; + dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n"); + SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, + SSP_DMACR_MASK_RXDMAE, 0); + SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, + SSP_DMACR_MASK_TXDMAE, 1); + } + + chip->cpsr = chip_info->clk_freq.cpsdvsr; + + SSP_WRITE_BITS(chip->cr0, chip_info->data_size, SSP_CR0_MASK_DSS, 0); + SSP_WRITE_BITS(chip->cr0, chip_info->duplex, SSP_CR0_MASK_HALFDUP, 5); + SSP_WRITE_BITS(chip->cr0, chip_info->clk_pol, SSP_CR0_MASK_SPO, 6); + SSP_WRITE_BITS(chip->cr0, chip_info->clk_phase, SSP_CR0_MASK_SPH, 7); + SSP_WRITE_BITS(chip->cr0, chip_info->clk_freq.scr, SSP_CR0_MASK_SCR, 8); + SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, SSP_CR0_MASK_CSS, 16); + SSP_WRITE_BITS(chip->cr0, chip_info->iface, SSP_CR0_MASK_FRF, 21); + SSP_WRITE_BITS(chip->cr1, chip_info->lbm, SSP_CR1_MASK_LBM, 0); + SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1); + SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2); + SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3); + SSP_WRITE_BITS(chip->cr1, chip_info->endian_rx, SSP_CR1_MASK_RENDN, 4); + SSP_WRITE_BITS(chip->cr1, chip_info->endian_tx, SSP_CR1_MASK_TENDN, 5); + SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, SSP_CR1_MASK_MWAIT, 6); + SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, SSP_CR1_MASK_RXIFLSEL, 7); + SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, SSP_CR1_MASK_TXIFLSEL, 10); + + /* Save controller_state */ + spi_set_ctldata(spi, chip); + return status; + err_config_params: + err_first_setup: + kfree(chip); + return status; +} + +/** + * pl022_cleanup - cleanup function registered to SPI master framework + * @spi: spi device which is requesting cleanup + * + * This function is registered to the SPI framework for this SPI master + * controller. It will free the runtime state of chip. + */ +static void pl022_cleanup(struct spi_device *spi) +{ + struct chip_data *chip = spi_get_ctldata(spi); + + spi_set_ctldata(spi, NULL); + kfree(chip); +} + + +static int __init +pl022_probe(struct amba_device *adev, struct amba_id *id) +{ + struct device *dev = &adev->dev; + struct pl022_ssp_controller *platform_info = adev->dev.platform_data; + struct spi_master *master; + struct pl022 *pl022 = NULL; /*Data for this driver */ + int status = 0; + + dev_info(&adev->dev, + "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid); + if (platform_info == NULL) { + dev_err(&adev->dev, "probe - no platform data supplied\n"); + status = -ENODEV; + goto err_no_pdata; + } + + /* Allocate master with space for data */ + master = spi_alloc_master(dev, sizeof(struct pl022)); + if (master == NULL) { + dev_err(&adev->dev, "probe - cannot alloc SPI master\n"); + status = -ENOMEM; + goto err_no_master; + } + + pl022 = spi_master_get_devdata(master); + pl022->master = master; + pl022->master_info = platform_info; + pl022->adev = adev; + pl022->vendor = id->data; + + /* + * Bus Number Which has been Assigned to this SSP controller + * on this board + */ + master->bus_num = platform_info->bus_id; + master->num_chipselect = platform_info->num_chipselect; + master->cleanup = pl022_cleanup; + master->setup = pl022_setup; + master->transfer = pl022_transfer; + + dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num); + + status = amba_request_regions(adev, NULL); + if (status) + goto err_no_ioregion; + + pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res)); + if (pl022->virtbase == NULL) { + status = -ENOMEM; + goto err_no_ioremap; + } + printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n", + adev->res.start, pl022->virtbase); + + pl022->clk = clk_get(&adev->dev, NULL); + if (IS_ERR(pl022->clk)) { + status = PTR_ERR(pl022->clk); + dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n"); + goto err_no_clk; + } + + /* Disable SSP */ + clk_enable(pl022->clk); + writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), + SSP_CR1(pl022->virtbase)); + load_ssp_default_config(pl022); + clk_disable(pl022->clk); + + status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022", + pl022); + if (status < 0) { + dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status); + goto err_no_irq; + } + /* Initialize and start queue */ + status = init_queue(pl022); + if (status != 0) { + dev_err(&adev->dev, "probe - problem initializing queue\n"); + goto err_init_queue; + } + status = start_queue(pl022); + if (status != 0) { + dev_err(&adev->dev, "probe - problem starting queue\n"); + goto err_start_queue; + } + /* Register with the SPI framework */ + amba_set_drvdata(adev, pl022); + status = spi_register_master(master); + if (status != 0) { + dev_err(&adev->dev, + "probe - problem registering spi master\n"); + goto err_spi_register; + } + dev_dbg(dev, "probe succeded\n"); + return 0; + + err_spi_register: + err_start_queue: + err_init_queue: + destroy_queue(pl022); + free_irq(adev->irq[0], pl022); + err_no_irq: + clk_put(pl022->clk); + err_no_clk: + iounmap(pl022->virtbase); + err_no_ioremap: + amba_release_regions(adev); + err_no_ioregion: + spi_master_put(master); + err_no_master: + err_no_pdata: + return status; +} + +static int __exit +pl022_remove(struct amba_device *adev) +{ + struct pl022 *pl022 = amba_get_drvdata(adev); + int status = 0; + if (!pl022) + return 0; + + /* Remove the queue */ + status = destroy_queue(pl022); + if (status != 0) { + dev_err(&adev->dev, + "queue remove failed (%d)\n", status); + return status; + } + load_ssp_default_config(pl022); + free_irq(adev->irq[0], pl022); + clk_disable(pl022->clk); + clk_put(pl022->clk); + iounmap(pl022->virtbase); + amba_release_regions(adev); + tasklet_disable(&pl022->pump_transfers); + spi_unregister_master(pl022->master); + spi_master_put(pl022->master); + amba_set_drvdata(adev, NULL); + dev_dbg(&adev->dev, "remove succeded\n"); + return 0; +} + +#ifdef CONFIG_PM +static int pl022_suspend(struct amba_device *adev, pm_message_t state) +{ + struct pl022 *pl022 = amba_get_drvdata(adev); + int status = 0; + + status = stop_queue(pl022); + if (status) { + dev_warn(&adev->dev, "suspend cannot stop queue\n"); + return status; + } + + clk_enable(pl022->clk); + load_ssp_default_config(pl022); + clk_disable(pl022->clk); + dev_dbg(&adev->dev, "suspended\n"); + return 0; +} + +static int pl022_resume(struct amba_device *adev) +{ + struct pl022 *pl022 = amba_get_drvdata(adev); + int status = 0; + + /* Start the queue running */ + status = start_queue(pl022); + if (status) + dev_err(&adev->dev, "problem starting queue (%d)\n", status); + else + dev_dbg(&adev->dev, "resumed\n"); + + return status; +} +#else +#define pl022_suspend NULL +#define pl022_resume NULL +#endif /* CONFIG_PM */ + +static struct vendor_data vendor_arm = { + .fifodepth = 8, + .max_bpw = 16, + .unidir = false, +}; + + +static struct vendor_data vendor_st = { + .fifodepth = 32, + .max_bpw = 32, + .unidir = false, +}; + +static struct amba_id pl022_ids[] = { + { + /* + * ARM PL022 variant, this has a 16bit wide + * and 8 locations deep TX/RX FIFO + */ + .id = 0x00041022, + .mask = 0x000fffff, + .data = &vendor_arm, + }, + { + /* + * ST Micro derivative, this has 32bit wide + * and 32 locations deep TX/RX FIFO + */ + .id = 0x00108022, + .mask = 0xffffffff, + .data = &vendor_st, + }, + { 0, 0 }, +}; + +static struct amba_driver pl022_driver = { + .drv = { + .name = "ssp-pl022", + }, + .id_table = pl022_ids, + .probe = pl022_probe, + .remove = __exit_p(pl022_remove), + .suspend = pl022_suspend, + .resume = pl022_resume, +}; + + +static int __init pl022_init(void) +{ + return amba_driver_register(&pl022_driver); +} + +module_init(pl022_init); + +static void __exit pl022_exit(void) +{ + amba_driver_unregister(&pl022_driver); +} + +module_exit(pl022_exit); + +MODULE_AUTHOR("Linus Walleij "); +MODULE_DESCRIPTION("PL022 SSP Controller Driver"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 1bcbf3948876e31a8ece28597dec447611ad9c8b Mon Sep 17 00:00:00 2001 From: Pavel Roskin Date: Wed, 10 Jun 2009 12:43:48 -0700 Subject: intelfb: fix spelling of "CLOCK" Signed-off-by: Pavel Roskin Cc: Eric Anholt Cc: Dave Airlie Signed-off-by: Andrew Morton Signed-off-by: Dave Airlie --- drivers/gpu/drm/i915/intel_fb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 7a66b91ccf42..cbd2ba828c72 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -207,7 +207,7 @@ static int intelfb_set_par(struct fb_info *info) if (var->pixclock != -1) { - DRM_ERROR("PIXEL CLCOK SET\n"); + DRM_ERROR("PIXEL CLOCK SET\n"); return -EINVAL; } else { struct drm_crtc *crtc; -- cgit v1.2.3 From b798b1fe3b6436275ad1b517a6823d55e3b75c22 Mon Sep 17 00:00:00 2001 From: "Robert P. J. Day" Date: Wed, 10 Jun 2009 12:43:49 -0700 Subject: drm: simplify kcalloc() call to kzalloc(). Calls to kcalloc() for a single element can be simplified to calls to kzalloc(). Signed-off-by: Robert P. J. Day Cc: Dave Airlie Cc: Eric Anholt Signed-off-by: Andrew Morton Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 4984aa89cf3d..ec43005100d9 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -133,7 +133,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) BUG_ON((size & (PAGE_SIZE - 1)) != 0); - obj = kcalloc(1, sizeof(*obj), GFP_KERNEL); + obj = kzalloc(sizeof(*obj), GFP_KERNEL); obj->dev = dev; obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); -- cgit v1.2.3 From 2ff2e8a3e1898e692b604424c384f134009dea80 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 26 May 2009 10:35:52 +1000 Subject: drm: don't associate _DRM_DRIVER maps with a master A driver will use the _DRM_DRIVER map flag to indicate that it wants to be responsible for removing the map itself, bypassing the DRM's automagic cleanup code. Since the multi-master changes this has been broken, resulting in some drivers having their registers unmapped before it's finished with them. Signed-off-by: Ben Skeggs Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_bufs.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 0411d912d82a..80a257554b30 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -371,7 +371,8 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, list->user_token = list->hash.key << PAGE_SHIFT; mutex_unlock(&dev->struct_mutex); - list->master = dev->primary->master; + if (!(map->flags & _DRM_DRIVER)) + list->master = dev->primary->master; *maplist = list; return 0; } -- cgit v1.2.3 From df4f7fe7bd516b3833e25c692c3970e22038a6ca Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 11 Jun 2009 16:16:10 +1000 Subject: radeon: remove _DRM_DRIVER from the preadded sarea map This shouldn't be there and is what broke r600 late in the 2.6.30 release cycle with Ben's patch. Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_cp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index 77a7a4d84650..521ef6a65356 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c @@ -2109,7 +2109,7 @@ int radeon_master_create(struct drm_device *dev, struct drm_master *master) /* prebuild the SAREA */ sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE); - ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK|_DRM_DRIVER, + ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &master_priv->sarea); if (ret) { DRM_ERROR("SAREA setup failed\n"); -- cgit v1.2.3 From 4fefcb27050b98c97b1c32bc710fc2f874449dee Mon Sep 17 00:00:00 2001 From: yakui_zhao Date: Tue, 2 Jun 2009 14:09:47 +0800 Subject: drm: add separate drm debugging levels Now all the DRM debug info will be reported if the boot option of "drm.debug=1" is added. Sometimes it is inconvenient to get the debug info in KMS mode. We will get too much unrelated info. This will separate several DRM debug levels and the debug level can be used to print the different debug info. And the debug level is controlled by the module parameter of drm.debug In this patch it is divided into four debug levels; drm_core, drm_driver, drm_kms, drm_mode. At the same time we can get the different debug info by changing the debug level. This can be done by adding the module parameter. Of course it can be changed through the /sys/module/drm/parameters/debug after the system is booted. Four debug macro definitions are provided. DRM_DEBUG(fmt, args...) DRM_DEBUG_DRIVER(prefix, fmt, args...) DRM_DEBUG_KMS(prefix, fmt, args...) DRM_DEBUG_MODE(prefix, fmt, args...) When the boot option of "drm.debug=4" is added, it will print the debug info using DRM_DEBUG_KMS macro definition. When the boot option of "drm.debug=6" is added, it will print the debug info using DRM_DEBUG_KMS/DRM_DEBUG_DRIVER. Sometimes we expect to print the value of an array. For example: SDVO command, In such case the following four DRM debug macro definitions are added: DRM_LOG(fmt, args...) DRM_LOG_DRIVER(fmt, args...) DRM_LOG_KMS(fmt, args...) DRM_LOG_MODE(fmt, args...) Signed-off-by: Zhao Yakui Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_stub.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index b9631e3a1ea6..89050684fe0d 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -51,7 +51,22 @@ struct idr drm_minors_idr; struct class *drm_class; struct proc_dir_entry *drm_proc_root; struct dentry *drm_debugfs_root; - +void drm_ut_debug_printk(unsigned int request_level, + const char *prefix, + const char *function_name, + const char *format, ...) +{ + va_list args; + + if (drm_debug & request_level) { + if (function_name) + printk(KERN_DEBUG "[%s:%s], ", prefix, function_name); + va_start(args, format); + vprintk(format, args); + va_end(args); + } +} +EXPORT_SYMBOL(drm_ut_debug_printk); static int drm_minor_get_id(struct drm_device *dev, int type) { int new_id; -- cgit v1.2.3 From 7fb85bfb54a64e9dd82ee4a79022c38ab58f21a0 Mon Sep 17 00:00:00 2001 From: yakui_zhao Date: Tue, 2 Jun 2009 14:10:49 +0800 Subject: drm/i915: replace DRM_DEBUG with DRM_DEBUG_KMS in intel_lvds Use the DRM_DEBUG_KMS macro definition to print the debug info for the LVDS. Signed-off-by: Zhao Yakui Signed-off-by: Dave Airlie --- drivers/gpu/drm/i915/intel_lvds.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index a7ae9f46aa9a..f22e6efbe6a3 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -37,6 +37,8 @@ #include "i915_drm.h" #include "i915_drv.h" +#define I915_LVDS "i915_lvds" + /** * Sets the backlight level. * @@ -447,7 +449,8 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = { static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) { - DRM_DEBUG("Skipping LVDS initialization for %s\n", id->ident); + DRM_DEBUG_KMS(I915_LVDS, + "Skipping LVDS initialization for %s\n", id->ident); return 1; } @@ -646,7 +649,7 @@ out: return; failed: - DRM_DEBUG("No LVDS modes found, disabling.\n"); + DRM_DEBUG_KMS(I915_LVDS, "No LVDS modes found, disabling.\n"); if (intel_output->ddc_bus) intel_i2c_destroy(intel_output->ddc_bus); drm_connector_cleanup(connector); -- cgit v1.2.3 From 342dc382c451f75ea202a65e6f529bdff6d184cd Mon Sep 17 00:00:00 2001 From: yakui_zhao Date: Tue, 2 Jun 2009 14:12:00 +0800 Subject: drm/i915: Replace DRM_DEBUG with DRM_DEBUG_KMS in intel_sdvo Use the DRM_DEBUG_KMS/DEBUG_LOG_KMS to print the debug info for SDVO device. Signed-off-by: Zhao Yakui Signed-off-by: Dave Airlie --- drivers/gpu/drm/i915/intel_sdvo.c | 67 ++++++++++++++++++++------------------- 1 file changed, 35 insertions(+), 32 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index d8fb88d335cd..7cb9ddf1161e 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -36,7 +36,7 @@ #include "intel_sdvo_regs.h" #undef SDVO_DEBUG - +#define I915_SDVO "i915_sdvo" struct intel_sdvo_priv { struct intel_i2c_chan *i2c_bus; int slaveaddr; @@ -277,20 +277,21 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; int i; - printk(KERN_DEBUG "%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd); + DRM_DEBUG_KMS(I915_SDVO, "%s: W: %02X ", + SDVO_NAME(sdvo_priv), cmd); for (i = 0; i < args_len; i++) - printk(KERN_DEBUG "%02X ", ((u8 *)args)[i]); + DRM_LOG_KMS("%02X ", ((u8 *)args)[i]); for (; i < 8; i++) - printk(KERN_DEBUG " "); + DRM_LOG_KMS(" "); for (i = 0; i < sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); i++) { if (cmd == sdvo_cmd_names[i].cmd) { - printk(KERN_DEBUG "(%s)", sdvo_cmd_names[i].name); + DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name); break; } } if (i == sizeof(sdvo_cmd_names)/ sizeof(sdvo_cmd_names[0])) - printk(KERN_DEBUG "(%02X)", cmd); - printk(KERN_DEBUG "\n"); + DRM_LOG_KMS("(%02X)", cmd); + DRM_LOG_KMS("\n"); } #else #define intel_sdvo_debug_write(o, c, a, l) @@ -329,16 +330,16 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output, struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; int i; - printk(KERN_DEBUG "%s: R: ", SDVO_NAME(sdvo_priv)); + DRM_DEBUG_KMS(I915_SDVO, "%s: R: ", SDVO_NAME(sdvo_priv)); for (i = 0; i < response_len; i++) - printk(KERN_DEBUG "%02X ", ((u8 *)response)[i]); + DRM_LOG_KMS("%02X ", ((u8 *)response)[i]); for (; i < 8; i++) - printk(KERN_DEBUG " "); + DRM_LOG_KMS(" "); if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) - printk(KERN_DEBUG "(%s)", cmd_status_names[status]); + DRM_LOG_KMS("(%s)", cmd_status_names[status]); else - printk(KERN_DEBUG "(??? %d)", status); - printk(KERN_DEBUG "\n"); + DRM_LOG_KMS("(??? %d)", status); + DRM_LOG_KMS("\n"); } #else #define intel_sdvo_debug_response(o, r, l, s) @@ -1824,8 +1825,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) { - DRM_DEBUG("No SDVO device found on SDVO%c\n", - output_device == SDVOB ? 'B' : 'C'); + DRM_DEBUG_KMS(I915_SDVO, + "No SDVO device found on SDVO%c\n", + output_device == SDVOB ? 'B' : 'C'); goto err_i2c; } } @@ -1909,9 +1911,10 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) sdvo_priv->controlled_output = 0; memcpy (bytes, &sdvo_priv->caps.output_flags, 2); - DRM_DEBUG("%s: Unknown SDVO output type (0x%02x%02x)\n", - SDVO_NAME(sdvo_priv), - bytes[0], bytes[1]); + DRM_DEBUG_KMS(I915_SDVO, + "%s: Unknown SDVO output type (0x%02x%02x)\n", + SDVO_NAME(sdvo_priv), + bytes[0], bytes[1]); encoder_type = DRM_MODE_ENCODER_NONE; connector_type = DRM_MODE_CONNECTOR_Unknown; goto err_i2c; @@ -1941,21 +1944,21 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) &sdvo_priv->pixel_clock_max); - DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, " - "clock range %dMHz - %dMHz, " - "input 1: %c, input 2: %c, " - "output 1: %c, output 2: %c\n", - SDVO_NAME(sdvo_priv), - sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id, - sdvo_priv->caps.device_rev_id, - sdvo_priv->pixel_clock_min / 1000, - sdvo_priv->pixel_clock_max / 1000, - (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N', - (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N', - /* check currently supported outputs */ - sdvo_priv->caps.output_flags & + DRM_DEBUG_KMS(I915_SDVO, "%s device VID/DID: %02X:%02X.%02X, " + "clock range %dMHz - %dMHz, " + "input 1: %c, input 2: %c, " + "output 1: %c, output 2: %c\n", + SDVO_NAME(sdvo_priv), + sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id, + sdvo_priv->caps.device_rev_id, + sdvo_priv->pixel_clock_min / 1000, + sdvo_priv->pixel_clock_max / 1000, + (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N', + (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N', + /* check currently supported outputs */ + sdvo_priv->caps.output_flags & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N', - sdvo_priv->caps.output_flags & + sdvo_priv->caps.output_flags & (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); return true; -- cgit v1.2.3 From dc890c2dcd63a90de68ee5f0253eefbb89d725f0 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sun, 7 Jun 2009 23:27:31 +0100 Subject: [ARM] 5544/1: Trust PrimeCell resource sizes I found the PrimeCell/AMBA Bus drivers distrusting the resource passed in as part of the struct amba_device abstraction. This patch removes all hard coded resource sizes found in the PrimeCell drivers and move the responsibility of this definition back to the platform/board device definition, which already exist and appear to be correct for all in-tree users of these drivers. We do this using the resource_size() inline function which was also replicated in the only driver using the resource size, so that has been changed too. The KMI_SIZE was left in kmi.h in case someone likes it. Test-compiled against Versatile and Integrator defconfigs, seems to work but I don't posess these boards and cannot test them. Signed-off-by: Linus Walleij Signed-off-by: Russell King --- drivers/input/serio/ambakmi.c | 2 +- drivers/mmc/host/mmci.c | 2 +- drivers/rtc/rtc-pl030.c | 2 +- drivers/rtc/rtc-pl031.c | 3 +-- drivers/serial/amba-pl010.c | 2 +- drivers/serial/amba-pl011.c | 2 +- drivers/video/amba-clcd.c | 2 +- 7 files changed, 7 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c index a28c06d686e1..89b394183a75 100644 --- a/drivers/input/serio/ambakmi.c +++ b/drivers/input/serio/ambakmi.c @@ -135,7 +135,7 @@ static int amba_kmi_probe(struct amba_device *dev, struct amba_id *id) io->dev.parent = &dev->dev; kmi->io = io; - kmi->base = ioremap(dev->res.start, KMI_SIZE); + kmi->base = ioremap(dev->res.start, resource_size(&dev->res)); if (!kmi->base) { ret = -ENOMEM; goto out; diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 7d4febdab286..e1aa8471ab1c 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -546,7 +546,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) host->mclk = clk_get_rate(host->clk); DBG(host, "eventual mclk rate: %u Hz\n", host->mclk); } - host->base = ioremap(dev->res.start, SZ_4K); + host->base = ioremap(dev->res.start, resource_size(&dev->res)); if (!host->base) { ret = -ENOMEM; goto clk_disable; diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c index aaf1f75fa293..457231bb1029 100644 --- a/drivers/rtc/rtc-pl030.c +++ b/drivers/rtc/rtc-pl030.c @@ -117,7 +117,7 @@ static int pl030_probe(struct amba_device *dev, struct amba_id *id) goto err_rtc; } - rtc->base = ioremap(dev->res.start, SZ_4K); + rtc->base = ioremap(dev->res.start, resource_size(&dev->res)); if (!rtc->base) { ret = -ENOMEM; goto err_map; diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index 451fc13784d1..f41873f98f66 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c @@ -142,8 +142,7 @@ static int pl031_probe(struct amba_device *adev, struct amba_id *id) goto out; } - ldata->base = ioremap(adev->res.start, - adev->res.end - adev->res.start + 1); + ldata->base = ioremap(adev->res.start, resource_size(&adev->res)); if (!ldata->base) { ret = -ENOMEM; goto out_no_remap; diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c index cdc049d4350f..58a4879c7e48 100644 --- a/drivers/serial/amba-pl010.c +++ b/drivers/serial/amba-pl010.c @@ -686,7 +686,7 @@ static int pl010_probe(struct amba_device *dev, struct amba_id *id) goto out; } - base = ioremap(dev->res.start, PAGE_SIZE); + base = ioremap(dev->res.start, resource_size(&dev->res)); if (!base) { ret = -ENOMEM; goto free; diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c index 8c5bda27736c..bf82e28770a9 100644 --- a/drivers/serial/amba-pl011.c +++ b/drivers/serial/amba-pl011.c @@ -767,7 +767,7 @@ static int pl011_probe(struct amba_device *dev, struct amba_id *id) goto out; } - base = ioremap(dev->res.start, PAGE_SIZE); + base = ioremap(dev->res.start, resource_size(&dev->res)); if (!base) { ret = -ENOMEM; goto free; diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c index d1f80bac54f0..fb8163d181ab 100644 --- a/drivers/video/amba-clcd.c +++ b/drivers/video/amba-clcd.c @@ -351,7 +351,7 @@ static int clcdfb_register(struct clcd_fb *fb) } fb->fb.fix.mmio_start = fb->dev->res.start; - fb->fb.fix.mmio_len = 4096; + fb->fb.fix.mmio_len = resource_size(&fb->dev->res); fb->regs = ioremap(fb->fb.fix.mmio_start, fb->fb.fix.mmio_len); if (!fb->regs) { -- cgit v1.2.3 From f053185948a1bd16329433f5371809765086c1ec Mon Sep 17 00:00:00 2001 From: yakui_zhao Date: Tue, 2 Jun 2009 14:12:47 +0800 Subject: drm: Replace DRM_DEBUG with DRM_DEBUG_MODE in drm_mode Replace the DRM_DEBUG with DRM_DEBUG_MODE macro to print the info in drm_mode. airlied:- fixed up to remove a conflicting #define Signed-off-by: Zhao Yakui Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_modes.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index c9b80fdd4630..54f492a488a9 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -38,6 +38,7 @@ #include "drm.h" #include "drm_crtc.h" +#define DRM_MODESET_DEBUG "drm_mode" /** * drm_mode_debug_printmodeline - debug print a mode * @dev: DRM device @@ -50,12 +51,13 @@ */ void drm_mode_debug_printmodeline(struct drm_display_mode *mode) { - DRM_DEBUG("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n", - mode->base.id, mode->name, mode->vrefresh, mode->clock, - mode->hdisplay, mode->hsync_start, - mode->hsync_end, mode->htotal, - mode->vdisplay, mode->vsync_start, - mode->vsync_end, mode->vtotal, mode->type, mode->flags); + DRM_DEBUG_MODE(DRM_MODESET_DEBUG, + "Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n", + mode->base.id, mode->name, mode->vrefresh, mode->clock, + mode->hdisplay, mode->hsync_start, + mode->hsync_end, mode->htotal, + mode->vdisplay, mode->vsync_start, + mode->vsync_end, mode->vtotal, mode->type, mode->flags); } EXPORT_SYMBOL(drm_mode_debug_printmodeline); @@ -401,7 +403,9 @@ void drm_mode_prune_invalid(struct drm_device *dev, list_del(&mode->head); if (verbose) { drm_mode_debug_printmodeline(mode); - DRM_DEBUG("Not using %s mode %d\n", mode->name, mode->status); + DRM_DEBUG_MODE(DRM_MODESET_DEBUG, + "Not using %s mode %d\n", + mode->name, mode->status); } drm_mode_destroy(dev, mode); } -- cgit v1.2.3 From be25ed9c5cc06e1d17aa97e41daf88f0b46143e6 Mon Sep 17 00:00:00 2001 From: yakui_zhao Date: Tue, 2 Jun 2009 14:13:55 +0800 Subject: drm: Replace DRM_DEBUG with DRM_DEBUG_DRIVER in i915 driver Replace the DRM_DEBUG with the DRM_DEBUG_DRIVER to print the debug info in i915 driver. Signed-off-by: Zhao Yakui Signed-off-by: Dave Airlie --- drivers/gpu/drm/i915/i915_dma.c | 49 ++++++++++++++++++++++++----------------- 1 file changed, 29 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 6bc716d13a52..054576d5da99 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -33,6 +33,8 @@ #include "i915_drm.h" #include "i915_drv.h" +#define I915_DRV "i915_drv" + /* Really want an OS-independent resettable timer. Would like to have * this loop run for (eg) 3 sec, but have the timer reset every time * the head pointer changes, so that EBUSY only happens if the ring @@ -99,7 +101,7 @@ static int i915_init_phys_hws(struct drm_device *dev) memset(dev_priv->hw_status_page, 0, PAGE_SIZE); I915_WRITE(HWS_PGA, dev_priv->dma_status_page); - DRM_DEBUG("Enabled hardware status page\n"); + DRM_DEBUG_DRIVER(I915_DRV, "Enabled hardware status page\n"); return 0; } @@ -185,7 +187,8 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) master_priv->sarea_priv = (drm_i915_sarea_t *) ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); } else { - DRM_DEBUG("sarea not found assuming DRI2 userspace\n"); + DRM_DEBUG_DRIVER(I915_DRV, + "sarea not found assuming DRI2 userspace\n"); } if (init->ring_size != 0) { @@ -235,7 +238,7 @@ static int i915_dma_resume(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - DRM_DEBUG("%s\n", __func__); + DRM_DEBUG_DRIVER(I915_DRV, "%s\n", __func__); if (dev_priv->ring.map.handle == NULL) { DRM_ERROR("can not ioremap virtual address for" @@ -248,13 +251,14 @@ static int i915_dma_resume(struct drm_device * dev) DRM_ERROR("Can not find hardware status page\n"); return -EINVAL; } - DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); + DRM_DEBUG_DRIVER(I915_DRV, "hw status page @ %p\n", + dev_priv->hw_status_page); if (dev_priv->status_gfx_addr != 0) I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); else I915_WRITE(HWS_PGA, dev_priv->dma_status_page); - DRM_DEBUG("Enabled hardware status page\n"); + DRM_DEBUG_DRIVER(I915_DRV, "Enabled hardware status page\n"); return 0; } @@ -548,10 +552,10 @@ static int i915_dispatch_flip(struct drm_device * dev) if (!master_priv->sarea_priv) return -EINVAL; - DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", - __func__, - dev_priv->current_page, - master_priv->sarea_priv->pf_current_page); + DRM_DEBUG_DRIVER(I915_DRV, "%s: page=%d pfCurrentPage=%d\n", + __func__, + dev_priv->current_page, + master_priv->sarea_priv->pf_current_page); i915_kernel_lost_context(dev); @@ -629,8 +633,9 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, return -EINVAL; } - DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", - batch->start, batch->used, batch->num_cliprects); + DRM_DEBUG_DRIVER(I915_DRV, + "i915 batchbuffer, start %x used %d cliprects %d\n", + batch->start, batch->used, batch->num_cliprects); RING_LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -678,8 +683,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, void *batch_data; int ret; - DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", - cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); + DRM_DEBUG_DRIVER(I915_DRV, + "i915 cmdbuffer, buf %p sz %d cliprects %d\n", + cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); RING_LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -734,7 +740,7 @@ static int i915_flip_bufs(struct drm_device *dev, void *data, { int ret; - DRM_DEBUG("%s\n", __func__); + DRM_DEBUG_DRIVER(I915_DRV, "%s\n", __func__); RING_LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -777,7 +783,8 @@ static int i915_getparam(struct drm_device *dev, void *data, value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; break; default: - DRM_DEBUG("Unknown parameter %d\n", param->param); + DRM_DEBUG_DRIVER(I915_DRV, "Unknown parameter %d\n", + param->param); return -EINVAL; } @@ -817,7 +824,8 @@ static int i915_setparam(struct drm_device *dev, void *data, dev_priv->fence_reg_start = param->value; break; default: - DRM_DEBUG("unknown parameter %d\n", param->param); + DRM_DEBUG_DRIVER(I915_DRV, "unknown parameter %d\n", + param->param); return -EINVAL; } @@ -865,9 +873,10 @@ static int i915_set_status_page(struct drm_device *dev, void *data, memset(dev_priv->hw_status_page, 0, PAGE_SIZE); I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); - DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n", - dev_priv->status_gfx_addr); - DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page); + DRM_DEBUG_DRIVER(I915_DRV, "load hws HWS_PGA with gfx mem 0x%x\n", + dev_priv->status_gfx_addr); + DRM_DEBUG_DRIVER(I915_DRV, "load hws at %p\n", + dev_priv->hw_status_page); return 0; } @@ -1270,7 +1279,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) { struct drm_i915_file_private *i915_file_priv; - DRM_DEBUG("\n"); + DRM_DEBUG_DRIVER(I915_DRV, "\n"); i915_file_priv = (struct drm_i915_file_private *) drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES); -- cgit v1.2.3 From c5c07550d41abe86d109430e718f2007113031f8 Mon Sep 17 00:00:00 2001 From: "Figo.zhang" Date: Sat, 6 Jun 2009 18:26:26 +0800 Subject: drm/via: vfree() no need checking before calling it vfree() does it's own NULL checking, no need for explicit check before calling it. Signed-off-by: Figo.zhang Signed-off-by: Dave Airlie --- drivers/gpu/drm/via/via_dmablit.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c index 409e00afdd07..327380888b4a 100644 --- a/drivers/gpu/drm/via/via_dmablit.c +++ b/drivers/gpu/drm/via/via_dmablit.c @@ -195,10 +195,8 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) default: vsg->state = dr_via_sg_init; } - if (vsg->bounce_buffer) { - vfree(vsg->bounce_buffer); - vsg->bounce_buffer = NULL; - } + vfree(vsg->bounce_buffer); + vsg->bounce_buffer = NULL; vsg->free_on_sequence = 0; } -- cgit v1.2.3 From 7ff145593d808a371924652c8d6a15fb75ce2250 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 22 Apr 2009 18:52:14 +1000 Subject: drm/i915: duplicate desired mode for use by fbcon. duplicate the mode into fbcon storage, so when we free modes later we don't just lose this. Signed-off-by: Dave Airlie --- drivers/gpu/drm/i915/intel_display.c | 2 ++ drivers/gpu/drm/i915/intel_fb.c | 16 ++++++++++++---- 2 files changed, 14 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b32a51f2a91d..028f5b66e3d8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2310,6 +2310,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + if (intel_crtc->mode_set.mode) + drm_mode_destroy(crtc->dev, intel_crtc->mode_set.mode); drm_crtc_cleanup(crtc); kfree(intel_crtc); } diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index cbd2ba828c72..0ecf6b76a401 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -674,8 +674,12 @@ static int intelfb_multi_fb_probe_crtc(struct drm_device *dev, struct drm_crtc * par->crtc_ids[0] = crtc->base.id; modeset->num_connectors = conn_count; - if (modeset->mode != modeset->crtc->desired_mode) - modeset->mode = modeset->crtc->desired_mode; + if (modeset->crtc->desired_mode) { + if (modeset->mode) + drm_mode_destroy(dev, modeset->mode); + modeset->mode = drm_mode_duplicate(dev, + modeset->crtc->desired_mode); + } par->crtc_count = 1; @@ -824,8 +828,12 @@ static int intelfb_single_fb_probe(struct drm_device *dev) par->crtc_ids[crtc_count++] = crtc->base.id; modeset->num_connectors = conn_count; - if (modeset->mode != modeset->crtc->desired_mode) - modeset->mode = modeset->crtc->desired_mode; + if (modeset->crtc->desired_mode) { + if (modeset->mode) + drm_mode_destroy(dev, modeset->mode); + modeset->mode = drm_mode_duplicate(dev, + modeset->crtc->desired_mode); + } } par->crtc_count = crtc_count; -- cgit v1.2.3 From 61f11699e7a92d932b31ded3715ad4f70eb26ef2 Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Sat, 30 May 2009 20:42:27 -0700 Subject: drm: Eliminate magic I2C frobbing when reading EDID This code depends on the underlying I2C adapter using the bit-banging algo, which may not be the case. If specific encoders require this mechanism, they should build a custom I2C algo that implements this workaround, rather than having it in the general path. Signed-off-by: Keith Packard Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid.c | 74 +--------------------------------------------- 1 file changed, 1 insertion(+), 73 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index ca9c61656714..c4d9b3308d42 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -584,85 +584,13 @@ int drm_do_probe_ddc_edid(struct i2c_adapter *adapter, } EXPORT_SYMBOL(drm_do_probe_ddc_edid); -/** - * Get EDID information. - * - * \param adapter : i2c device adaptor. - * \param buf : EDID data buffer to be filled - * \param len : EDID data buffer length - * \return 0 on success or -1 on failure. - * - * Initialize DDC, then fetch EDID information - * by calling drm_do_probe_ddc_edid function. - */ -static int drm_ddc_read(struct i2c_adapter *adapter, - unsigned char *buf, int len) -{ - struct i2c_algo_bit_data *algo_data = adapter->algo_data; - int i, j; - int ret = -1; - - algo_data->setscl(algo_data->data, 1); - - for (i = 0; i < 1; i++) { - /* For some old monitors we need the - * following process to initialize/stop DDC - */ - algo_data->setsda(algo_data->data, 1); - msleep(13); - - algo_data->setscl(algo_data->data, 1); - for (j = 0; j < 5; j++) { - msleep(10); - if (algo_data->getscl(algo_data->data)) - break; - } - if (j == 5) - continue; - - algo_data->setsda(algo_data->data, 0); - msleep(15); - algo_data->setscl(algo_data->data, 0); - msleep(15); - algo_data->setsda(algo_data->data, 1); - msleep(15); - - /* Do the real work */ - ret = drm_do_probe_ddc_edid(adapter, buf, len); - algo_data->setsda(algo_data->data, 0); - algo_data->setscl(algo_data->data, 0); - msleep(15); - - algo_data->setscl(algo_data->data, 1); - for (j = 0; j < 10; j++) { - msleep(10); - if (algo_data->getscl(algo_data->data)) - break; - } - - algo_data->setsda(algo_data->data, 1); - msleep(15); - algo_data->setscl(algo_data->data, 0); - algo_data->setsda(algo_data->data, 0); - if (ret == 0) - break; - } - /* Release the DDC lines when done or the Apple Cinema HD display - * will switch off - */ - algo_data->setsda(algo_data->data, 1); - algo_data->setscl(algo_data->data, 1); - - return ret; -} - static int drm_ddc_read_edid(struct drm_connector *connector, struct i2c_adapter *adapter, char *buf, int len) { int ret; - ret = drm_ddc_read(adapter, buf, len); + ret = drm_do_probe_ddc_edid(adapter, buf, len); if (ret != 0) { dev_info(&connector->dev->pdev->dev, "%s: no EDID data\n", drm_get_connector_name(connector)); -- cgit v1.2.3 From 2a71ebcd85bcc4d6607f577f23a491f796c30e82 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 12 Jun 2009 15:53:10 +1000 Subject: drm/radeon: add rv740 drm support. This adds drm support for the RV740 family of chips to the r600 support code. Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/r600_cp.c | 38 ++++++++++++++++++++++++++++++++++--- drivers/gpu/drm/radeon/radeon_drv.h | 1 + 2 files changed, 36 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index bc9d09dfa8e7..aa4eee4b7f3a 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c @@ -489,15 +489,16 @@ static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv) RADEON_WRITE(R600_CP_ME_RAM_DATA, RV770_cp_microcode[i]); RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0); - } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV730)) { + } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV730) || + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)) { RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); - DRM_INFO("Loading RV730 PFP Microcode\n"); + DRM_INFO("Loading RV730/RV740 PFP Microcode\n"); for (i = 0; i < R700_PFP_UCODE_SIZE; i++) RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV730_pfp_microcode[i]); RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0); - DRM_INFO("Loading RV730 CP Microcode\n"); + DRM_INFO("Loading RV730/RV740 CP Microcode\n"); for (i = 0; i < R700_PM4_UCODE_SIZE; i++) RADEON_WRITE(R600_CP_ME_RAM_DATA, RV730_cp_microcode[i]); RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0); @@ -1324,6 +1325,10 @@ static void r700_gfx_init(struct drm_device *dev, dev_priv->r700_sc_prim_fifo_size = 0xf9; dev_priv->r700_sc_hiz_tile_fifo_size = 0x30; dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130; + if (dev_priv->r600_sx_max_export_pos_size > 16) { + dev_priv->r600_sx_max_export_pos_size -= 16; + dev_priv->r600_sx_max_export_smx_size += 16; + } break; case CHIP_RV710: dev_priv->r600_max_pipes = 2; @@ -1345,6 +1350,31 @@ static void r700_gfx_init(struct drm_device *dev, dev_priv->r700_sc_hiz_tile_fifo_size = 0x30; dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130; break; + case CHIP_RV740: + dev_priv->r600_max_pipes = 4; + dev_priv->r600_max_tile_pipes = 4; + dev_priv->r600_max_simds = 8; + dev_priv->r600_max_backends = 4; + dev_priv->r600_max_gprs = 256; + dev_priv->r600_max_threads = 248; + dev_priv->r600_max_stack_entries = 512; + dev_priv->r600_max_hw_contexts = 8; + dev_priv->r600_max_gs_threads = 16 * 2; + dev_priv->r600_sx_max_export_size = 256; + dev_priv->r600_sx_max_export_pos_size = 32; + dev_priv->r600_sx_max_export_smx_size = 224; + dev_priv->r600_sq_num_cf_insts = 2; + + dev_priv->r700_sx_num_of_sets = 7; + dev_priv->r700_sc_prim_fifo_size = 0x100; + dev_priv->r700_sc_hiz_tile_fifo_size = 0x30; + dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130; + + if (dev_priv->r600_sx_max_export_pos_size > 16) { + dev_priv->r600_sx_max_export_pos_size -= 16; + dev_priv->r600_sx_max_export_smx_size += 16; + } + break; default: break; } @@ -1493,6 +1523,7 @@ static void r700_gfx_init(struct drm_device *dev, break; case CHIP_RV730: case CHIP_RV710: + case CHIP_RV740: default: sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4); break; @@ -1569,6 +1600,7 @@ static void r700_gfx_init(struct drm_device *dev, switch (dev_priv->flags & RADEON_FAMILY_MASK) { case CHIP_RV770: case CHIP_RV730: + case CHIP_RV740: gs_prim_buffer_depth = 384; break; case CHIP_RV710: diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 8071d965f142..e266e5f8dc26 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h @@ -146,6 +146,7 @@ enum radeon_family { CHIP_RV770, CHIP_RV730, CHIP_RV710, + CHIP_RV740, CHIP_LAST, }; -- cgit v1.2.3 From 715cbb05c935e8a4306a730d14a72d5af881523e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 12 Jun 2009 15:55:44 +1000 Subject: drm/radeon: add support for RV790. This adds the PCI IDs for the rv790 which are equiv to the rv770. Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/r600_cp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index aa4eee4b7f3a..146f3570af8e 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c @@ -478,13 +478,13 @@ static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv) if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV770)) { RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); - DRM_INFO("Loading RV770 PFP Microcode\n"); + DRM_INFO("Loading RV770/RV790 PFP Microcode\n"); for (i = 0; i < R700_PFP_UCODE_SIZE; i++) RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV770_pfp_microcode[i]); RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0); - DRM_INFO("Loading RV770 CP Microcode\n"); + DRM_INFO("Loading RV770/RV790 CP Microcode\n"); for (i = 0; i < R700_PM4_UCODE_SIZE; i++) RADEON_WRITE(R600_CP_ME_RAM_DATA, RV770_cp_microcode[i]); RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0); -- cgit v1.2.3 From 249d6048ca98b5452105b0824abac1275661b8e3 Mon Sep 17 00:00:00 2001 From: Jerome Glisse Date: Wed, 8 Apr 2009 17:11:16 +0200 Subject: drm: Split out the mm declarations in a separate header. Add atomic operations. this is a TTM preparation patch, it rearranges the mm and add operations needed to do mm operations in atomic context. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_mm.c | 165 +++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 137 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 367c590ffbba..7819fd930a51 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -42,8 +42,11 @@ */ #include "drmP.h" +#include "drm_mm.h" #include +#define MM_UNUSED_TARGET 4 + unsigned long drm_mm_tail_space(struct drm_mm *mm) { struct list_head *tail_node; @@ -74,16 +77,62 @@ int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size) return 0; } +static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) +{ + struct drm_mm_node *child; + + if (atomic) + child = kmalloc(sizeof(*child), GFP_ATOMIC); + else + child = kmalloc(sizeof(*child), GFP_KERNEL); + + if (unlikely(child == NULL)) { + spin_lock(&mm->unused_lock); + if (list_empty(&mm->unused_nodes)) + child = NULL; + else { + child = + list_entry(mm->unused_nodes.next, + struct drm_mm_node, fl_entry); + list_del(&child->fl_entry); + --mm->num_unused; + } + spin_unlock(&mm->unused_lock); + } + return child; +} + +int drm_mm_pre_get(struct drm_mm *mm) +{ + struct drm_mm_node *node; + + spin_lock(&mm->unused_lock); + while (mm->num_unused < MM_UNUSED_TARGET) { + spin_unlock(&mm->unused_lock); + node = kmalloc(sizeof(*node), GFP_KERNEL); + spin_lock(&mm->unused_lock); + + if (unlikely(node == NULL)) { + int ret = (mm->num_unused < 2) ? -ENOMEM : 0; + spin_unlock(&mm->unused_lock); + return ret; + } + ++mm->num_unused; + list_add_tail(&node->fl_entry, &mm->unused_nodes); + } + spin_unlock(&mm->unused_lock); + return 0; +} +EXPORT_SYMBOL(drm_mm_pre_get); static int drm_mm_create_tail_node(struct drm_mm *mm, - unsigned long start, - unsigned long size) + unsigned long start, + unsigned long size, int atomic) { struct drm_mm_node *child; - child = (struct drm_mm_node *) - drm_alloc(sizeof(*child), DRM_MEM_MM); - if (!child) + child = drm_mm_kmalloc(mm, atomic); + if (unlikely(child == NULL)) return -ENOMEM; child->free = 1; @@ -97,8 +146,7 @@ static int drm_mm_create_tail_node(struct drm_mm *mm, return 0; } - -int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size) +int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic) { struct list_head *tail_node; struct drm_mm_node *entry; @@ -106,20 +154,21 @@ int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size) tail_node = mm->ml_entry.prev; entry = list_entry(tail_node, struct drm_mm_node, ml_entry); if (!entry->free) { - return drm_mm_create_tail_node(mm, entry->start + entry->size, size); + return drm_mm_create_tail_node(mm, entry->start + entry->size, + size, atomic); } entry->size += size; return 0; } static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, - unsigned long size) + unsigned long size, + int atomic) { struct drm_mm_node *child; - child = (struct drm_mm_node *) - drm_alloc(sizeof(*child), DRM_MEM_MM); - if (!child) + child = drm_mm_kmalloc(parent->mm, atomic); + if (unlikely(child == NULL)) return NULL; INIT_LIST_HEAD(&child->fl_entry); @@ -151,8 +200,9 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent, tmp = parent->start % alignment; if (tmp) { - align_splitoff = drm_mm_split_at_start(parent, alignment - tmp); - if (!align_splitoff) + align_splitoff = + drm_mm_split_at_start(parent, alignment - tmp, 0); + if (unlikely(align_splitoff == NULL)) return NULL; } @@ -161,7 +211,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent, parent->free = 0; return parent; } else { - child = drm_mm_split_at_start(parent, size); + child = drm_mm_split_at_start(parent, size, 0); } if (align_splitoff) @@ -169,14 +219,49 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent, return child; } + EXPORT_SYMBOL(drm_mm_get_block); +struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent, + unsigned long size, + unsigned alignment) +{ + + struct drm_mm_node *align_splitoff = NULL; + struct drm_mm_node *child; + unsigned tmp = 0; + + if (alignment) + tmp = parent->start % alignment; + + if (tmp) { + align_splitoff = + drm_mm_split_at_start(parent, alignment - tmp, 1); + if (unlikely(align_splitoff == NULL)) + return NULL; + } + + if (parent->size == size) { + list_del_init(&parent->fl_entry); + parent->free = 0; + return parent; + } else { + child = drm_mm_split_at_start(parent, size, 1); + } + + if (align_splitoff) + drm_mm_put_block(align_splitoff); + + return child; +} +EXPORT_SYMBOL(drm_mm_get_block_atomic); + /* * Put a block. Merge with the previous and / or next block if they are free. * Otherwise add to the free stack. */ -void drm_mm_put_block(struct drm_mm_node * cur) +void drm_mm_put_block(struct drm_mm_node *cur) { struct drm_mm *mm = cur->mm; @@ -188,21 +273,27 @@ void drm_mm_put_block(struct drm_mm_node * cur) int merged = 0; if (cur_head->prev != root_head) { - prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry); + prev_node = + list_entry(cur_head->prev, struct drm_mm_node, ml_entry); if (prev_node->free) { prev_node->size += cur->size; merged = 1; } } if (cur_head->next != root_head) { - next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry); + next_node = + list_entry(cur_head->next, struct drm_mm_node, ml_entry); if (next_node->free) { if (merged) { prev_node->size += next_node->size; list_del(&next_node->ml_entry); list_del(&next_node->fl_entry); - drm_free(next_node, sizeof(*next_node), - DRM_MEM_MM); + if (mm->num_unused < MM_UNUSED_TARGET) { + list_add(&next_node->fl_entry, + &mm->unused_nodes); + ++mm->num_unused; + } else + kfree(next_node); } else { next_node->size += cur->size; next_node->start = cur->start; @@ -215,14 +306,19 @@ void drm_mm_put_block(struct drm_mm_node * cur) list_add(&cur->fl_entry, &mm->fl_entry); } else { list_del(&cur->ml_entry); - drm_free(cur, sizeof(*cur), DRM_MEM_MM); + if (mm->num_unused < MM_UNUSED_TARGET) { + list_add(&cur->fl_entry, &mm->unused_nodes); + ++mm->num_unused; + } else + kfree(cur); } } + EXPORT_SYMBOL(drm_mm_put_block); -struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, - unsigned long size, - unsigned alignment, int best_match) +struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, + unsigned long size, + unsigned alignment, int best_match) { struct list_head *list; const struct list_head *free_stack = &mm->fl_entry; @@ -247,7 +343,6 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, wasted += alignment - tmp; } - if (entry->size >= size + wasted) { if (!best_match) return entry; @@ -260,6 +355,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, return best; } +EXPORT_SYMBOL(drm_mm_search_free); int drm_mm_clean(struct drm_mm * mm) { @@ -267,14 +363,17 @@ int drm_mm_clean(struct drm_mm * mm) return (head->next->next == head); } -EXPORT_SYMBOL(drm_mm_search_free); +EXPORT_SYMBOL(drm_mm_clean); int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) { INIT_LIST_HEAD(&mm->ml_entry); INIT_LIST_HEAD(&mm->fl_entry); + INIT_LIST_HEAD(&mm->unused_nodes); + mm->num_unused = 0; + spin_lock_init(&mm->unused_lock); - return drm_mm_create_tail_node(mm, start, size); + return drm_mm_create_tail_node(mm, start, size, 0); } EXPORT_SYMBOL(drm_mm_init); @@ -282,6 +381,7 @@ void drm_mm_takedown(struct drm_mm * mm) { struct list_head *bnode = mm->fl_entry.next; struct drm_mm_node *entry; + struct drm_mm_node *next; entry = list_entry(bnode, struct drm_mm_node, fl_entry); @@ -293,7 +393,16 @@ void drm_mm_takedown(struct drm_mm * mm) list_del(&entry->fl_entry); list_del(&entry->ml_entry); + kfree(entry); + + spin_lock(&mm->unused_lock); + list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) { + list_del(&entry->fl_entry); + kfree(entry); + --mm->num_unused; + } + spin_unlock(&mm->unused_lock); - drm_free(entry, sizeof(*entry), DRM_MEM_MM); + BUG_ON(mm->num_unused != 0); } EXPORT_SYMBOL(drm_mm_takedown); -- cgit v1.2.3 From f2cb5d86e1af175a9b210241800f03a447f92621 Mon Sep 17 00:00:00 2001 From: Jerome Glisse Date: Wed, 8 Apr 2009 17:16:24 +0200 Subject: drm: Export hash table functionality. add exports so TTM module can use these functions. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_hashtab.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c index af539f7d87dd..ac35145c3e20 100644 --- a/drivers/gpu/drm/drm_hashtab.c +++ b/drivers/gpu/drm/drm_hashtab.c @@ -62,6 +62,7 @@ int drm_ht_create(struct drm_open_hash *ht, unsigned int order) } return 0; } +EXPORT_SYMBOL(drm_ht_create); void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key) { @@ -156,6 +157,7 @@ int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *it } return 0; } +EXPORT_SYMBOL(drm_ht_just_insert_please); int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item) @@ -169,6 +171,7 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, *item = hlist_entry(list, struct drm_hash_item, head); return 0; } +EXPORT_SYMBOL(drm_ht_find_item); int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key) { @@ -202,3 +205,4 @@ void drm_ht_remove(struct drm_open_hash *ht) ht->table = NULL; } } +EXPORT_SYMBOL(drm_ht_remove); -- cgit v1.2.3 From 76d4e00a05d06c1d1552adea24fcf6182c9d8999 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 12 Jun 2009 10:26:21 +0200 Subject: [S390] merge cpu.h into cputime.h All definition in cpu.h have to do with cputime accounting. Move them to cputime.h and remove the header file. Signed-off-by: Martin Schwidefsky --- drivers/s390/cio/cio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 2aebb9823044..9889f188c7c5 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include -- cgit v1.2.3 From e45efa99b0b0035a2afc192c242e37eec5477497 Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Fri, 12 Jun 2009 10:26:27 +0200 Subject: [S390] cio: fix sanity checks in device_ops. Some sanity checks in device_ops.c test the output of container_of macros to be !NULL. Test the input parameters instead. Signed-off-by: Sebastian Ott Signed-off-by: Martin Schwidefsky --- drivers/s390/cio/device_ops.c | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 151754d54745..bf0a24af39a0 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -114,7 +114,7 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) struct subchannel *sch; int ret; - if (!cdev) + if (!cdev || !cdev->dev.parent) return -ENODEV; if (cdev->private->state == DEV_STATE_NOT_OPER) return -ENODEV; @@ -122,8 +122,6 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) cdev->private->state != DEV_STATE_W4SENSE) return -EINVAL; sch = to_subchannel(cdev->dev.parent); - if (!sch) - return -ENODEV; ret = cio_clear(sch); if (ret == 0) cdev->private->intparm = intparm; @@ -161,11 +159,9 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, struct subchannel *sch; int ret; - if (!cdev) + if (!cdev || !cdev->dev.parent) return -ENODEV; sch = to_subchannel(cdev->dev.parent); - if (!sch) - return -ENODEV; if (cdev->private->state == DEV_STATE_NOT_OPER) return -ENODEV; if (cdev->private->state == DEV_STATE_VERIFY || @@ -339,7 +335,7 @@ int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm) struct subchannel *sch; int ret; - if (!cdev) + if (!cdev || !cdev->dev.parent) return -ENODEV; if (cdev->private->state == DEV_STATE_NOT_OPER) return -ENODEV; @@ -347,8 +343,6 @@ int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm) cdev->private->state != DEV_STATE_W4SENSE) return -EINVAL; sch = to_subchannel(cdev->dev.parent); - if (!sch) - return -ENODEV; ret = cio_halt(sch); if (ret == 0) cdev->private->intparm = intparm; @@ -372,11 +366,9 @@ int ccw_device_resume(struct ccw_device *cdev) { struct subchannel *sch; - if (!cdev) + if (!cdev || !cdev->dev.parent) return -ENODEV; sch = to_subchannel(cdev->dev.parent); - if (!sch) - return -ENODEV; if (cdev->private->state == DEV_STATE_NOT_OPER) return -ENODEV; if (cdev->private->state != DEV_STATE_ONLINE || @@ -471,11 +463,11 @@ __u8 ccw_device_get_path_mask(struct ccw_device *cdev) { struct subchannel *sch; - sch = to_subchannel(cdev->dev.parent); - if (!sch) + if (!cdev->dev.parent) return 0; - else - return sch->lpm; + + sch = to_subchannel(cdev->dev.parent); + return sch->lpm; } /* -- cgit v1.2.3 From 4c57542320e73b9ff46b04092273dbcc184a4fb6 Mon Sep 17 00:00:00 2001 From: Jan Glauber Date: Fri, 12 Jun 2009 10:26:28 +0200 Subject: [S390] qdio: simplify error handling in irq handler The check for the device status in qdio_establish_handle_irq() had dead code. Remove the unused code and simplify the error handling. Signed-off-by: Jan Glauber Signed-off-by: Martin Schwidefsky --- drivers/s390/cio/qdio_main.c | 44 ++++++++++++-------------------------------- 1 file changed, 12 insertions(+), 32 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index accd957454e7..ba4facc37011 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -881,42 +881,26 @@ no_handler: qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); } -static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat, - int dstat) +static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, + int dstat) { struct qdio_irq *irq_ptr = cdev->private->qdio_data; - if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) { - DBF_ERROR("EQ:ck con"); - goto error; - } + DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq"); - if (!(dstat & DEV_STAT_DEV_END)) { - DBF_ERROR("EQ:no dev"); + if (cstat) goto error; - } - - if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) { - DBF_ERROR("EQ: bad io"); + if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END)) goto error; - } - return 0; + if (!(dstat & DEV_STAT_DEV_END)) + goto error; + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); + return; + error: DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no); DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); - return 1; -} - -static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, - int dstat) -{ - struct qdio_irq *irq_ptr = cdev->private->qdio_data; - - DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq"); - if (!qdio_establish_check_errors(cdev, cstat, dstat)) - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); } /* qdio interrupt handler */ @@ -946,7 +930,6 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, } } qdio_irq_check_sense(irq_ptr, irb); - cstat = irb->scsw.cmd.cstat; dstat = irb->scsw.cmd.dstat; @@ -954,22 +937,19 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, case QDIO_IRQ_STATE_INACTIVE: qdio_establish_handle_irq(cdev, cstat, dstat); break; - case QDIO_IRQ_STATE_CLEANUP: qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); break; - case QDIO_IRQ_STATE_ESTABLISHED: case QDIO_IRQ_STATE_ACTIVE: if (cstat & SCHN_STAT_PCI) { qdio_int_handler_pci(irq_ptr); return; } - if ((cstat & ~SCHN_STAT_PCI) || dstat) { + if (cstat || dstat) qdio_handle_activate_check(cdev, intparm, cstat, dstat); - break; - } + break; default: WARN_ON(1); } -- cgit v1.2.3 From a7c65a559ac371a08e67600ae585052441d71392 Mon Sep 17 00:00:00 2001 From: Jan Glauber Date: Fri, 12 Jun 2009 10:26:29 +0200 Subject: [S390] qdio: inline qdio_perf_stat_inc Move qdio_perf_stat_inc to the header file so it can be inlined. Remove unused qdio_perf_stat_dec. Signed-off-by: Jan Glauber Signed-off-by: Martin Schwidefsky --- drivers/s390/cio/qdio_perf.c | 12 ------------ drivers/s390/cio/qdio_perf.h | 10 ++++++---- 2 files changed, 6 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c index 136d0f0b1e93..eff943923c6f 100644 --- a/drivers/s390/cio/qdio_perf.c +++ b/drivers/s390/cio/qdio_perf.c @@ -25,18 +25,6 @@ struct qdio_perf_stats perf_stats; static struct proc_dir_entry *qdio_perf_pde; #endif -inline void qdio_perf_stat_inc(atomic_long_t *count) -{ - if (qdio_performance_stats) - atomic_long_inc(count); -} - -inline void qdio_perf_stat_dec(atomic_long_t *count) -{ - if (qdio_performance_stats) - atomic_long_dec(count); -} - /* * procfs functions */ diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h index 7821ac4fa517..ff4504ce1e3c 100644 --- a/drivers/s390/cio/qdio_perf.h +++ b/drivers/s390/cio/qdio_perf.h @@ -9,7 +9,6 @@ #define QDIO_PERF_H #include -#include #include struct qdio_perf_stats { @@ -50,10 +49,13 @@ struct qdio_perf_stats { extern struct qdio_perf_stats perf_stats; extern int qdio_performance_stats; +static inline void qdio_perf_stat_inc(atomic_long_t *count) +{ + if (qdio_performance_stats) + atomic_long_inc(count); +} + int qdio_setup_perf_stats(void); void qdio_remove_perf_stats(void); -extern void qdio_perf_stat_inc(atomic_long_t *count); -extern void qdio_perf_stat_dec(atomic_long_t *count); - #endif -- cgit v1.2.3 From fcf7581f7ca82e63e4e137be77c342a4e4ec8401 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 12 Jun 2009 10:26:30 +0200 Subject: [S390] 3270: do not register with tty_register_device The tty3270_notifier that calls tty_register_device / tty_unregister_device is harmful in two ways: 1) the device node that is create is wrong because the minor numbers for 3270 tty start with 1 and tty_notifier passes the minor as index. 2) If 1) is corrected you'll get a warning: WARNING: at fs/sysfs/dir.c:462 sysfs_add_one+0x4c/0x60() sysfs: duplicate filename '227:1' can not be created The 227:1 link is already created by raw3270_create_attributes to refer to ../../class/tty/tty. There cannot be two links. Signed-off-by: Martin Schwidefsky --- drivers/s390/char/tty3270.c | 15 --------------- 1 file changed, 15 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index a7fe6302c982..aa7a114f6529 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c @@ -1754,14 +1754,6 @@ static const struct tty_operations tty3270_ops = { .set_termios = tty3270_set_termios }; -static void tty3270_notifier(int index, int active) -{ - if (active) - tty_register_device(tty3270_driver, index, NULL); - else - tty_unregister_device(tty3270_driver, index); -} - /* * 3270 tty registration code called from tty_init(). * Most kernel services (incl. kmalloc) are available at this poimt. @@ -1796,12 +1788,6 @@ static int __init tty3270_init(void) return ret; } tty3270_driver = driver; - ret = raw3270_register_notifier(tty3270_notifier); - if (ret) { - put_tty_driver(driver); - return ret; - - } return 0; } @@ -1810,7 +1796,6 @@ tty3270_exit(void) { struct tty_driver *driver; - raw3270_unregister_notifier(tty3270_notifier); driver = tty3270_driver; tty3270_driver = NULL; tty_unregister_driver(driver); -- cgit v1.2.3 From 205d7ab9c9af6847dda30650a0b8f98555a20654 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 12 Jun 2009 10:26:31 +0200 Subject: [S390] 3270: lock dependency fixes Lockdep found a problem with the lock order of the view lock and the ccw device lock. raw3270_activate_view/raw3270_deactivate_view first take the ccw device lock then call the activate/deactivate functions of the view which take view lock. The update functions of the con3270/tty3270 view will first take the view lock, then take the ccw device lock. To fix this the activate/deactivate functions are changed to avoid taking the view lock by moving the functions calls that modify the 3270 output buffer to the update function which is called by a timer. Signed-off-by: Martin Schwidefsky --- drivers/s390/char/con3270.c | 38 ++++++++++++++------------------------ drivers/s390/char/tty3270.c | 42 ++++++++++++++++++------------------------ 2 files changed, 32 insertions(+), 48 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index d028d2ee83dd..ed5396dae58e 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c @@ -64,7 +64,7 @@ static struct con3270 *condev; #define CON_UPDATE_ERASE 1 /* Use EWRITEA instead of WRITE. */ #define CON_UPDATE_LIST 2 /* Update lines in tty3270->update. */ #define CON_UPDATE_STATUS 4 /* Update status line. */ -#define CON_UPDATE_ALL 7 +#define CON_UPDATE_ALL 8 /* Recreate screen. */ static void con3270_update(struct con3270 *); @@ -73,18 +73,10 @@ static void con3270_update(struct con3270 *); */ static void con3270_set_timer(struct con3270 *cp, int expires) { - if (expires == 0) { - if (timer_pending(&cp->timer)) - del_timer(&cp->timer); - return; - } - if (timer_pending(&cp->timer) && - mod_timer(&cp->timer, jiffies + expires)) - return; - cp->timer.function = (void (*)(unsigned long)) con3270_update; - cp->timer.data = (unsigned long) cp; - cp->timer.expires = jiffies + expires; - add_timer(&cp->timer); + if (expires == 0) + del_timer(&cp->timer); + else + mod_timer(&cp->timer, jiffies + expires); } /* @@ -225,6 +217,12 @@ con3270_update(struct con3270 *cp) spin_lock_irqsave(&cp->view.lock, flags); updated = 0; + if (cp->update_flags & CON_UPDATE_ALL) { + con3270_rebuild_update(cp); + con3270_update_status(cp); + cp->update_flags = CON_UPDATE_ERASE | CON_UPDATE_LIST | + CON_UPDATE_STATUS; + } if (cp->update_flags & CON_UPDATE_ERASE) { /* Use erase write alternate to initialize display. */ raw3270_request_set_cmd(wrq, TC_EWRITEA); @@ -302,7 +300,6 @@ con3270_read_tasklet(struct raw3270_request *rrq) deactivate = 1; break; case 0x6d: /* clear: start from scratch. */ - con3270_rebuild_update(cp); cp->update_flags = CON_UPDATE_ALL; con3270_set_timer(cp, 1); break; @@ -382,30 +379,21 @@ con3270_issue_read(struct con3270 *cp) static int con3270_activate(struct raw3270_view *view) { - unsigned long flags; struct con3270 *cp; cp = (struct con3270 *) view; - spin_lock_irqsave(&cp->view.lock, flags); - cp->nr_up = 0; - con3270_rebuild_update(cp); - con3270_update_status(cp); cp->update_flags = CON_UPDATE_ALL; con3270_set_timer(cp, 1); - spin_unlock_irqrestore(&cp->view.lock, flags); return 0; } static void con3270_deactivate(struct raw3270_view *view) { - unsigned long flags; struct con3270 *cp; cp = (struct con3270 *) view; - spin_lock_irqsave(&cp->view.lock, flags); del_timer(&cp->timer); - spin_unlock_irqrestore(&cp->view.lock, flags); } static int @@ -504,6 +492,7 @@ con3270_write(struct console *co, const char *str, unsigned int count) con3270_cline_end(cp); } /* Setup timer to output current console buffer after 1/10 second */ + cp->nr_up = 0; if (cp->view.dev && !timer_pending(&cp->timer)) con3270_set_timer(cp, HZ/10); spin_unlock_irqrestore(&cp->view.lock,flags); @@ -624,7 +613,8 @@ con3270_init(void) INIT_LIST_HEAD(&condev->lines); INIT_LIST_HEAD(&condev->update); - init_timer(&condev->timer); + setup_timer(&condev->timer, (void (*)(unsigned long)) con3270_update, + (unsigned long) condev); tasklet_init(&condev->readlet, (void (*)(unsigned long)) con3270_read_tasklet, (unsigned long) condev->read); diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index aa7a114f6529..38385677c653 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c @@ -112,7 +112,7 @@ struct tty3270 { #define TTY_UPDATE_LIST 2 /* Update lines in tty3270->update. */ #define TTY_UPDATE_INPUT 4 /* Update input line. */ #define TTY_UPDATE_STATUS 8 /* Update status line. */ -#define TTY_UPDATE_ALL 15 +#define TTY_UPDATE_ALL 16 /* Recreate screen. */ static void tty3270_update(struct tty3270 *); @@ -121,19 +121,10 @@ static void tty3270_update(struct tty3270 *); */ static void tty3270_set_timer(struct tty3270 *tp, int expires) { - if (expires == 0) { - if (timer_pending(&tp->timer) && del_timer(&tp->timer)) - raw3270_put_view(&tp->view); - return; - } - if (timer_pending(&tp->timer) && - mod_timer(&tp->timer, jiffies + expires)) - return; - raw3270_get_view(&tp->view); - tp->timer.function = (void (*)(unsigned long)) tty3270_update; - tp->timer.data = (unsigned long) tp; - tp->timer.expires = jiffies + expires; - add_timer(&tp->timer); + if (expires == 0) + del_timer(&tp->timer); + else + mod_timer(&tp->timer, jiffies + expires); } /* @@ -337,7 +328,6 @@ tty3270_write_callback(struct raw3270_request *rq, void *data) tp = (struct tty3270 *) rq->view; if (rq->rc != 0) { /* Write wasn't successfull. Refresh all. */ - tty3270_rebuild_update(tp); tp->update_flags = TTY_UPDATE_ALL; tty3270_set_timer(tp, 1); } @@ -366,6 +356,12 @@ tty3270_update(struct tty3270 *tp) spin_lock(&tp->view.lock); updated = 0; + if (tp->update_flags & TTY_UPDATE_ALL) { + tty3270_rebuild_update(tp); + tty3270_update_status(tp); + tp->update_flags = TTY_UPDATE_ERASE | TTY_UPDATE_LIST | + TTY_UPDATE_INPUT | TTY_UPDATE_STATUS; + } if (tp->update_flags & TTY_UPDATE_ERASE) { /* Use erase write alternate to erase display. */ raw3270_request_set_cmd(wrq, TC_EWRITEA); @@ -425,7 +421,6 @@ tty3270_update(struct tty3270 *tp) xchg(&tp->write, wrq); } spin_unlock(&tp->view.lock); - raw3270_put_view(&tp->view); } /* @@ -570,7 +565,6 @@ tty3270_read_tasklet(struct raw3270_request *rrq) tty3270_set_timer(tp, 1); } else if (tp->input->string[0] == 0x6d) { /* Display has been cleared. Redraw. */ - tty3270_rebuild_update(tp); tp->update_flags = TTY_UPDATE_ALL; tty3270_set_timer(tp, 1); } @@ -641,22 +635,20 @@ static int tty3270_activate(struct raw3270_view *view) { struct tty3270 *tp; - unsigned long flags; tp = (struct tty3270 *) view; - spin_lock_irqsave(&tp->view.lock, flags); - tp->nr_up = 0; - tty3270_rebuild_update(tp); - tty3270_update_status(tp); tp->update_flags = TTY_UPDATE_ALL; tty3270_set_timer(tp, 1); - spin_unlock_irqrestore(&tp->view.lock, flags); return 0; } static void tty3270_deactivate(struct raw3270_view *view) { + struct tty3270 *tp; + + tp = (struct tty3270 *) view; + del_timer(&tp->timer); } static int @@ -743,6 +735,7 @@ tty3270_free_view(struct tty3270 *tp) { int pages; + del_timer_sync(&tp->timer); kbd_free(tp->kbd); raw3270_request_free(tp->kreset); raw3270_request_free(tp->read); @@ -889,7 +882,8 @@ tty3270_open(struct tty_struct *tty, struct file * filp) INIT_LIST_HEAD(&tp->update); INIT_LIST_HEAD(&tp->rcl_lines); tp->rcl_max = 20; - init_timer(&tp->timer); + setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update, + (unsigned long) tp); tasklet_init(&tp->readlet, (void (*)(unsigned long)) tty3270_read_tasklet, (unsigned long) tp->read); -- cgit v1.2.3 From 6b9d8e80bb9edd0c9fe948a6ef105391de56b012 Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Fri, 12 Jun 2009 10:26:34 +0200 Subject: [S390] qdio: fix access beyond ARRAY_SIZE of irq_ptr->{in,out}put_qs Do not go beyond ARRAY_SIZE of irq_ptr->{in,out}put_qs Signed-off-by: Roel Kluin Signed-off-by: Martin Schwidefsky --- drivers/s390/cio/qdio_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index ba4facc37011..d79cf5bf0e62 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -1494,7 +1494,7 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) || (count > QDIO_MAX_BUFFERS_PER_Q) || - (q_nr > QDIO_MAX_QUEUES_PER_IRQ)) + (q_nr >= QDIO_MAX_QUEUES_PER_IRQ)) return -EINVAL; if (!count) -- cgit v1.2.3 From d0591485e15ccd908f91058f7da134248dcdbbb3 Mon Sep 17 00:00:00 2001 From: Gerald Schaefer Date: Fri, 12 Jun 2009 10:26:35 +0200 Subject: [S390] dcssblk: revert devt conversion git commit f331c0296f2a9fee0d396a70598b954062603015 changed users of ->first_minor to devt. This broke device handling in dcssblk, so that no additional devices could be added after the first one. This patch reverts the devt conversion to the previous ->first_minor handling. Signed-off-by: Gerald Schaefer Signed-off-by: Martin Schwidefsky --- drivers/s390/block/dcssblk.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index a4c7ffcd9987..b21caf177e37 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -127,7 +127,7 @@ dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info) found = 0; // test if minor available list_for_each_entry(entry, &dcssblk_devices, lh) - if (minor == MINOR(disk_devt(entry->gd))) + if (minor == entry->gd->first_minor) found++; if (!found) break; // got unused minor } @@ -625,7 +625,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char if (rc) goto release_gd; sprintf(dev_info->gd->disk_name, "dcssblk%d", - MINOR(disk_devt(dev_info->gd))); + dev_info->gd->first_minor); list_add_tail(&dev_info->lh, &dcssblk_devices); if (!try_module_get(THIS_MODULE)) { -- cgit v1.2.3 From 45b44d76d373e66d08e0c745dc82ff9123103588 Mon Sep 17 00:00:00 2001 From: Stefan Weinhuber Date: Fri, 12 Jun 2009 10:26:36 +0200 Subject: [S390] dasd: no High Performance FICON in 31-bit mode The High Performance FICON feature is not supported in 31-bit mode, no matter what the various flags say. So we need to check for the CONFIG_64BIT option as well. Signed-off-by: Stefan Weinhuber Signed-off-by: Martin Schwidefsky --- drivers/s390/block/dasd_eckd.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index a41c94053e64..81f8819eaf19 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -2336,9 +2336,10 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, { int tpm, cmdrtd, cmdwtd; int use_prefix; - - struct dasd_eckd_private *private; +#if defined(CONFIG_64BIT) int fcx_in_css, fcx_in_gneq, fcx_in_features; +#endif + struct dasd_eckd_private *private; struct dasd_device *basedev; sector_t first_rec, last_rec; sector_t first_trk, last_trk; @@ -2361,11 +2362,15 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, last_offs = sector_div(last_trk, blk_per_trk); cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk); - /* is transport mode supported ? */ + /* is transport mode supported? */ +#if defined(CONFIG_64BIT) fcx_in_css = css_general_characteristics.fcx; fcx_in_gneq = private->gneq->reserved2[7] & 0x04; fcx_in_features = private->features.feature[40] & 0x80; tpm = fcx_in_css && fcx_in_gneq && fcx_in_features; +#else + tpm = 0; +#endif /* is read track data and write track data in command mode supported? */ cmdrtd = private->features.feature[9] & 0x20; -- cgit v1.2.3 From 92636b152f3b58e459988934f689619af9e04dbc Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Fri, 12 Jun 2009 10:26:37 +0200 Subject: [S390] dasd: check_characteristics cleanup Fix a broken memset (sizeof pointer vs sizeof the underlying structure) by cleaning up the involved functions. Signed-off-by: Sebastian Ott Signed-off-by: Martin Schwidefsky --- drivers/s390/block/dasd.c | 4 ++-- drivers/s390/block/dasd_eckd.c | 17 ++++++++--------- drivers/s390/block/dasd_fba.c | 14 +++++++------- drivers/s390/block/dasd_int.h | 2 +- 4 files changed, 18 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 27a1be0cd4d4..35f43bea5d07 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -2427,12 +2427,12 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic, - void **rdc_buffer, int rdc_buffer_size) + void *rdc_buffer, int rdc_buffer_size) { int ret; struct dasd_ccw_req *cqr; - cqr = dasd_generic_build_rdc(device, *rdc_buffer, rdc_buffer_size, + cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size, magic); if (IS_ERR(cqr)) return PTR_ERR(cqr); diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 81f8819eaf19..c4e818111a40 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -1097,20 +1097,20 @@ dasd_eckd_check_characteristics(struct dasd_device *device) { struct dasd_eckd_private *private; struct dasd_block *block; - void *rdc_data; int is_known, rc; private = (struct dasd_eckd_private *) device->private; - if (private == NULL) { - private = kzalloc(sizeof(struct dasd_eckd_private), - GFP_KERNEL | GFP_DMA); - if (private == NULL) { + if (!private) { + private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); + if (!private) { dev_warn(&device->cdev->dev, "Allocating memory for private DASD data " "failed\n"); return -ENOMEM; } device->private = (void *) private; + } else { + memset(private, 0, sizeof(*private)); } /* Invalidate status of initial analysis. */ private->init_cqr_status = -1; @@ -1161,9 +1161,8 @@ dasd_eckd_check_characteristics(struct dasd_device *device) goto out_err3; /* Read Device Characteristics */ - rdc_data = (void *) &(private->rdc_data); - memset(rdc_data, 0, sizeof(rdc_data)); - rc = dasd_generic_read_dev_chars(device, "ECKD", &rdc_data, 64); + rc = dasd_generic_read_dev_chars(device, "ECKD", &private->rdc_data, + 64); if (rc) { DBF_EVENT(DBF_WARNING, "Read device characteristics failed, rc=%d for " @@ -1183,7 +1182,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device) private->rdc_data.dev_model, private->rdc_data.cu_type, private->rdc_data.cu_model.model, - private->real_cyl, + private->real_cyl, private->rdc_data.trk_per_cyl, private->rdc_data.sec_per_trk); return 0; diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 8912358daa2f..8c3c8ffbc8bf 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -122,20 +122,20 @@ dasd_fba_check_characteristics(struct dasd_device *device) struct dasd_block *block; struct dasd_fba_private *private; struct ccw_device *cdev = device->cdev; - void *rdc_data; int rc; private = (struct dasd_fba_private *) device->private; - if (private == NULL) { - private = kzalloc(sizeof(struct dasd_fba_private), - GFP_KERNEL | GFP_DMA); - if (private == NULL) { + if (!private) { + private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); + if (!private) { dev_warn(&device->cdev->dev, "Allocating memory for private DASD " "data failed\n"); return -ENOMEM; } device->private = (void *) private; + } else { + memset(private, 0, sizeof(*private)); } block = dasd_alloc_block(); if (IS_ERR(block)) { @@ -150,8 +150,8 @@ dasd_fba_check_characteristics(struct dasd_device *device) block->base = device; /* Read Device Characteristics */ - rdc_data = (void *) &(private->rdc_data); - rc = dasd_generic_read_dev_chars(device, "FBA ", &rdc_data, 32); + rc = dasd_generic_read_dev_chars(device, "FBA ", &private->rdc_data, + 32); if (rc) { DBF_EVENT(DBF_WARNING, "Read device characteristics returned " "error %d for device: %s", diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index c1e487f774c6..3ab69b5a41f6 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -578,7 +578,7 @@ int dasd_generic_set_offline (struct ccw_device *cdev); int dasd_generic_notify(struct ccw_device *, int); void dasd_generic_handle_state_change(struct dasd_device *); -int dasd_generic_read_dev_chars(struct dasd_device *, char *, void **, int); +int dasd_generic_read_dev_chars(struct dasd_device *, char *, void *, int); char *dasd_get_sense(struct irb *); /* externals in dasd_devmap.c */ -- cgit v1.2.3 From 736e6ea0bf97ec79521f88704ce8506e5d60d078 Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Fri, 12 Jun 2009 10:26:38 +0200 Subject: [S390] dasd: sync after async probe Some functions called as a late_initcall depend on completely initialized devices. Since commit f3445a1a656bc26b07946cc6d20de1ef07c8d116 the dasd driver uses the new async framework and relies on the fact that synchronization is done in prepare_namespace which is called after the late_initcalls. Fix this by calling async_synchronize_full at the end of the related init functions. Signed-off-by: Sebastian Ott Signed-off-by: Martin Schwidefsky --- drivers/s390/block/dasd_eckd.c | 8 +++++++- drivers/s390/block/dasd_fba.c | 8 +++++++- 2 files changed, 14 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index c4e818111a40..216c09bcd222 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -3277,8 +3277,14 @@ static struct dasd_discipline dasd_eckd_discipline = { static int __init dasd_eckd_init(void) { + int ret; + ASCEBC(dasd_eckd_discipline.ebcname, 4); - return ccw_driver_register(&dasd_eckd_driver); + ret = ccw_driver_register(&dasd_eckd_driver); + if (!ret) + wait_for_device_probe(); + + return ret; } static void __exit diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 8c3c8ffbc8bf..597c6ffdb9f2 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -604,8 +604,14 @@ static struct dasd_discipline dasd_fba_discipline = { static int __init dasd_fba_init(void) { + int ret; + ASCEBC(dasd_fba_discipline.ebcname, 4); - return ccw_driver_register(&dasd_fba_driver); + ret = ccw_driver_register(&dasd_fba_driver); + if (!ret) + wait_for_device_probe(); + + return ret; } static void __exit -- cgit v1.2.3 From 6cc7f168954fe8b3d8988a90b2478a9c11c5ebcb Mon Sep 17 00:00:00 2001 From: Stefan Weinhuber Date: Fri, 12 Jun 2009 10:26:39 +0200 Subject: [S390] dasd: forward internal errors to dasd_sleep_on caller If a DASD requests is started with dasd_sleep_on and fails, then the calling function may need to know the reason for the failure. In cases of hardware errors it can inspect the sense data in the irb, but when the reason is internal (e.g. start_IO failed) then it needs a meaningfull return code. Signed-off-by: Stefan Weinhuber Signed-off-by: Martin Schwidefsky --- drivers/s390/block/dasd.c | 30 ++++++++++++++++++++++++------ drivers/s390/block/dasd_diag.c | 1 + drivers/s390/block/dasd_eckd.c | 8 +++++--- drivers/s390/block/dasd_int.h | 1 + 4 files changed, 31 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 35f43bea5d07..442bb98a2821 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -851,8 +851,10 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) /* Check the cqr */ rc = dasd_check_cqr(cqr); - if (rc) + if (rc) { + cqr->intrc = rc; return rc; + } device = (struct dasd_device *) cqr->startdev; if (cqr->retries < 0) { /* internal error 14 - start_IO run out of retries */ @@ -915,6 +917,7 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) BUG(); break; } + cqr->intrc = rc; return rc; } @@ -1454,8 +1457,12 @@ int dasd_sleep_on(struct dasd_ccw_req *cqr) dasd_add_request_tail(cqr); wait_event(generic_waitq, _wait_for_wakeup(cqr)); - /* Request status is either done or failed. */ - rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; + if (cqr->status == DASD_CQR_DONE) + rc = 0; + else if (cqr->intrc) + rc = cqr->intrc; + else + rc = -EIO; return rc; } @@ -1477,8 +1484,15 @@ int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) dasd_cancel_req(cqr); /* wait (non-interruptible) for final status */ wait_event(generic_waitq, _wait_for_wakeup(cqr)); + cqr->intrc = rc; } - rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; + + if (cqr->status == DASD_CQR_DONE) + rc = 0; + else if (cqr->intrc) + rc = cqr->intrc; + else + rc = -EIO; return rc; } @@ -1523,8 +1537,12 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) wait_event(generic_waitq, _wait_for_wakeup(cqr)); - /* Request status is either done or failed. */ - rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; + if (cqr->status == DASD_CQR_DONE) + rc = 0; + else if (cqr->intrc) + rc = cqr->intrc; + else + rc = -EIO; return rc; } diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 2efaddfae560..644086ba2ede 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -202,6 +202,7 @@ dasd_start_diag(struct dasd_ccw_req * cqr) rc = -EIO; break; } + cqr->intrc = rc; return rc; } diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 216c09bcd222..cf0cfdba1244 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -3017,8 +3017,9 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, " I/O status report for device %s:\n", dev_name(&device->cdev->dev)); len += sprintf(page + len, KERN_ERR PRINTK_HEADER - " in req: %p CS: 0x%02X DS: 0x%02X\n", req, - scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw)); + " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n", + req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), + scsw_cc(&irb->scsw), req->intrc); len += sprintf(page + len, KERN_ERR PRINTK_HEADER " device %s: Failing CCW: %p\n", dev_name(&device->cdev->dev), @@ -3119,9 +3120,10 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, " I/O status report for device %s:\n", dev_name(&device->cdev->dev)); len += sprintf(page + len, KERN_ERR PRINTK_HEADER - " in req: %p CS: 0x%02X DS: 0x%02X " + " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d " "fcxs: 0x%02X schxs: 0x%02X\n", req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), + scsw_cc(&irb->scsw), req->intrc, irb->scsw.tm.fcxs, irb->scsw.tm.schxs); len += sprintf(page + len, KERN_ERR PRINTK_HEADER " device %s: Failing TCW: %p\n", diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 3ab69b5a41f6..f97ceb795078 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -173,6 +173,7 @@ struct dasd_ccw_req { void *data; /* pointer to data area */ /* these are important for recovering erroneous requests */ + int intrc; /* internal error, e.g. from start_IO */ struct irb irb; /* device status in case of an error */ struct dasd_ccw_req *refers; /* ERP-chain queueing. */ void *function; /* originating ERP action */ -- cgit v1.2.3 From 88dbd2037229bd2ed7543ffd0d8f2d9dec9d31d2 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:46 +0200 Subject: [S390] ftrace: add function graph tracer support Function graph tracer support for s390. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- drivers/s390/cio/cio.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 9889f188c7c5..5ec7789bd9d8 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -12,6 +12,7 @@ #define KMSG_COMPONENT "cio" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#include #include #include #include @@ -626,8 +627,7 @@ out: * handlers). * */ -void -do_IRQ (struct pt_regs *regs) +void __irq_entry do_IRQ(struct pt_regs *regs) { struct tpi_info *tpi_info; struct subchannel *sch; -- cgit v1.2.3 From 6ab56315a36e42e90ad7173aa8b3bbd1467d1fea Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 12 Jun 2009 09:29:52 +0100 Subject: kmemleak: Remove the kmemleak.h include in drivers/char/vt.c This file is no longer annotated for false positives but the kmemleak.h include was still present. Signed-off-by: Catalin Marinas Acked-by: Alan Cox --- drivers/char/vt.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/char/vt.c b/drivers/char/vt.c index de9ebee8657b..c796a86ab7f3 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c @@ -103,7 +103,6 @@ #include #include #include -#include #define MAX_NR_CON_DRIVER 16 -- cgit v1.2.3 From ab8e2eb722f1e5fcbd8181e3e9ef4e95c52124df Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 12 Jun 2009 21:46:50 -0600 Subject: cyber2000fb.c: use proper method for stopping unload if CONFIG_ARCH_SHARK Russell explains the __module_get(): > cyber2000fb.c does it in its module initialization function > to prevent the module (when built for Shark) from being unloaded. It > does this because it's from the days of 2.2 kernels and no one bothered > writing the module unload support for Shark. Since 2.4, the correct answer has been to not define an unload fn. Cc: Russell King Cc: alex@shark-linux.de Signed-off-by: Rusty Russell --- drivers/video/cyber2000fb.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c index 83c5cefc266c..da7c01b39be2 100644 --- a/drivers/video/cyber2000fb.c +++ b/drivers/video/cyber2000fb.c @@ -1736,10 +1736,8 @@ static int __init cyber2000fb_init(void) #ifdef CONFIG_ARCH_SHARK err = cyberpro_vl_probe(); - if (!err) { + if (!err) ret = 0; - __module_get(THIS_MODULE); - } #endif #ifdef CONFIG_PCI err = pci_register_driver(&cyberpro_driver); @@ -1749,14 +1747,15 @@ static int __init cyber2000fb_init(void) return ret ? err : 0; } +module_init(cyber2000fb_init); +#ifndef CONFIG_ARCH_SHARK static void __exit cyberpro_exit(void) { pci_unregister_driver(&cyberpro_driver); } - -module_init(cyber2000fb_init); module_exit(cyberpro_exit); +#endif MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("CyberPro 2000, 2010 and 5000 framebuffer driver"); -- cgit v1.2.3 From 9a71af2c3627b379b7c31917a7f6ee0d29bc559b Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 12 Jun 2009 21:46:53 -0600 Subject: module_param: invbool should take a 'bool', not an 'int' It takes an 'int' for historical reasons, and there are only two users: simply switch it over to bool. The other user (uvesafb.c) will get a (harmless-on-x86) warning until the next patch is applied. Cc: Brad Douglas Cc: Michal Januszewski Signed-off-by: Rusty Russell --- drivers/video/aty/aty128fb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c index 35e8eb02b9e9..e4e4d433b007 100644 --- a/drivers/video/aty/aty128fb.c +++ b/drivers/video/aty/aty128fb.c @@ -354,7 +354,7 @@ static int default_crt_on __devinitdata = 0; static int default_lcd_on __devinitdata = 1; #ifdef CONFIG_MTRR -static int mtrr = 1; +static bool mtrr = true; #endif #ifdef CONFIG_PMAC_BACKLIGHT -- cgit v1.2.3 From 2ead9439f0c6ed03faafe27abe8bc1dd256d117b Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 12 Jun 2009 21:46:58 -0600 Subject: uvesafb: improve parameter handling. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 1) Now module_param(..., invbool, ...) requires a bool, and similarly module_param(..., bool, ...) allows it, change pmi_setpal to a bool. 2) #define param_get_scroll to NULL, since it can never be called (perm argument to module_param_named is 0). 3) Return -EINVAL from param_set_scroll if the value is bad, so it's reported. Note that I don't think the old fb_get_options() is required for new drivers: the parameters automatically work as uvesafb.XXX=... anyway. Acked-by: Michał Januszewski Signed-off-by: Rusty Russell --- drivers/video/uvesafb.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c index 421770b5e6ab..ca5b4643a401 100644 --- a/drivers/video/uvesafb.c +++ b/drivers/video/uvesafb.c @@ -45,7 +45,7 @@ static struct fb_fix_screeninfo uvesafb_fix __devinitdata = { static int mtrr __devinitdata = 3; /* enable mtrr by default */ static int blank = 1; /* enable blanking by default */ static int ypan = 1; /* 0: scroll, 1: ypan, 2: ywrap */ -static int pmi_setpal __devinitdata = 1; /* use PMI for palette changes */ +static bool pmi_setpal __devinitdata = true; /* use PMI for palette changes */ static int nocrtc __devinitdata; /* ignore CRTC settings */ static int noedid __devinitdata; /* don't try DDC transfers */ static int vram_remap __devinitdata; /* set amt. of memory to be used */ @@ -2002,11 +2002,7 @@ static void __devexit uvesafb_exit(void) module_exit(uvesafb_exit); -static int param_get_scroll(char *buffer, struct kernel_param *kp) -{ - return 0; -} - +#define param_get_scroll NULL static int param_set_scroll(const char *val, struct kernel_param *kp) { ypan = 0; @@ -2017,6 +2013,8 @@ static int param_set_scroll(const char *val, struct kernel_param *kp) ypan = 1; else if (!strcmp(val, "ywrap")) ypan = 2; + else + return -EINVAL; return 0; } -- cgit v1.2.3 From ef688e151c00e5d529703be9a04fd506df8bc54e Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 12 Jun 2009 22:16:35 -0600 Subject: virtio: meet virtio spec by finalizing features before using device Virtio devices are supposed to negotiate features before they start using the device, but the current code doesn't do this. This is because the driver's probe() function invariably has to add buffers to a virtqueue, or probe the disk (virtio_blk). This currently doesn't matter since no existing backend is strict about the feature negotiation. But it's possible to imagine a future feature which completely changes how a device operates: in this case, we'd need to acknowledge it before using the device. Signed-off-by: Rusty Russell --- drivers/virtio/virtio.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 018c070a357f..6b6810364860 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -118,13 +118,14 @@ static int virtio_dev_probe(struct device *_d) if (device_features & (1 << i)) set_bit(i, dev->features); + dev->config->finalize_features(dev); + err = drv->probe(dev); if (err) add_status(dev, VIRTIO_CONFIG_S_FAILED); - else { - dev->config->finalize_features(dev); + else add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); - } + return err; } -- cgit v1.2.3 From 9499f5e7ed5224c40706f0cec6542a9916bc7606 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 12 Jun 2009 22:16:35 -0600 Subject: virtio: add names to virtqueue struct, mapping from devices to queues. Add a linked list of all virtqueues for a virtio device: this helps for debugging and is also needed for upcoming interface change. Also, add a "name" field for clearer debug messages. Signed-off-by: Rusty Russell --- drivers/block/virtio_blk.c | 2 +- drivers/char/hw_random/virtio-rng.c | 2 +- drivers/char/virtio_console.c | 4 ++-- drivers/lguest/lguest_device.c | 5 +++-- drivers/net/virtio_net.c | 6 +++--- drivers/s390/kvm/kvm_virtio.c | 7 ++++--- drivers/virtio/virtio.c | 2 ++ drivers/virtio/virtio_balloon.c | 4 ++-- drivers/virtio/virtio_pci.c | 5 +++-- drivers/virtio/virtio_ring.c | 27 ++++++++++++++++++++------- 10 files changed, 41 insertions(+), 23 deletions(-) (limited to 'drivers') diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index c0facaa55cf4..db55a50d9f6a 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -288,7 +288,7 @@ static int virtblk_probe(struct virtio_device *vdev) sg_init_table(vblk->sg, vblk->sg_elems); /* We expect one virtqueue, for output. */ - vblk->vq = vdev->config->find_vq(vdev, 0, blk_done); + vblk->vq = vdev->config->find_vq(vdev, 0, blk_done, "requests"); if (IS_ERR(vblk->vq)) { err = PTR_ERR(vblk->vq); goto out_free_vblk; diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 86e83f883139..2aeafcea95fe 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -94,7 +94,7 @@ static int virtrng_probe(struct virtio_device *vdev) int err; /* We expect a single virtqueue. */ - vq = vdev->config->find_vq(vdev, 0, random_recv_done); + vq = vdev->config->find_vq(vdev, 0, random_recv_done, "input"); if (IS_ERR(vq)) return PTR_ERR(vq); diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index ff6f5a4b58fb..58684e4a0814 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -202,13 +202,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev) /* Find the input queue. */ /* FIXME: This is why we want to wean off hvc: we do nothing * when input comes in. */ - in_vq = vdev->config->find_vq(vdev, 0, hvc_handle_input); + in_vq = vdev->config->find_vq(vdev, 0, hvc_handle_input, "input"); if (IS_ERR(in_vq)) { err = PTR_ERR(in_vq); goto free; } - out_vq = vdev->config->find_vq(vdev, 1, NULL); + out_vq = vdev->config->find_vq(vdev, 1, NULL, "output"); if (IS_ERR(out_vq)) { err = PTR_ERR(out_vq); goto free_in_vq; diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c index df44d962626d..4babed899d59 100644 --- a/drivers/lguest/lguest_device.c +++ b/drivers/lguest/lguest_device.c @@ -228,7 +228,8 @@ extern void lguest_setup_irq(unsigned int irq); * function. */ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, unsigned index, - void (*callback)(struct virtqueue *vq)) + void (*callback)(struct virtqueue *vq), + const char *name) { struct lguest_device *ldev = to_lgdev(vdev); struct lguest_vq_info *lvq; @@ -263,7 +264,7 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, /* OK, tell virtio_ring.c to set up a virtqueue now we know its size * and we've got a pointer to its pages. */ vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN, - vdev, lvq->pages, lg_notify, callback); + vdev, lvq->pages, lg_notify, callback, name); if (!vq) { err = -ENOMEM; goto unmap; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 4d1d47953fc6..be3b734ff5a1 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -906,20 +906,20 @@ static int virtnet_probe(struct virtio_device *vdev) vi->mergeable_rx_bufs = true; /* We expect two virtqueues, receive then send. */ - vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); + vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done, "input"); if (IS_ERR(vi->rvq)) { err = PTR_ERR(vi->rvq); goto free; } - vi->svq = vdev->config->find_vq(vdev, 1, skb_xmit_done); + vi->svq = vdev->config->find_vq(vdev, 1, skb_xmit_done, "output"); if (IS_ERR(vi->svq)) { err = PTR_ERR(vi->svq); goto free_recv; } if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) { - vi->cvq = vdev->config->find_vq(vdev, 2, NULL); + vi->cvq = vdev->config->find_vq(vdev, 2, NULL, "control"); if (IS_ERR(vi->cvq)) { err = PTR_ERR(vi->svq); goto free_send; diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c index cbc8566fab70..ba8995fbf041 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/kvm/kvm_virtio.c @@ -173,8 +173,9 @@ static void kvm_notify(struct virtqueue *vq) * this device and sets it up. */ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, - unsigned index, - void (*callback)(struct virtqueue *vq)) + unsigned index, + void (*callback)(struct virtqueue *vq), + const char *name) { struct kvm_device *kdev = to_kvmdev(vdev); struct kvm_vqconfig *config; @@ -194,7 +195,7 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN, vdev, (void *) config->address, - kvm_notify, callback); + kvm_notify, callback, name); if (!vq) { err = -ENOMEM; goto unmap; diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 6b6810364860..3f52c767dfe9 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -186,6 +186,8 @@ int register_virtio_device(struct virtio_device *dev) /* Acknowledge that we've seen the device. */ add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); + INIT_LIST_HEAD(&dev->vqs); + /* device_register() causes the bus infrastructure to look for a * matching driver. */ err = device_register(&dev->dev); diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 9c76a061a04d..0fa73b4d18b0 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -218,13 +218,13 @@ static int virtballoon_probe(struct virtio_device *vdev) vb->vdev = vdev; /* We expect two virtqueues. */ - vb->inflate_vq = vdev->config->find_vq(vdev, 0, balloon_ack); + vb->inflate_vq = vdev->config->find_vq(vdev, 0, balloon_ack, "inflate"); if (IS_ERR(vb->inflate_vq)) { err = PTR_ERR(vb->inflate_vq); goto out_free_vb; } - vb->deflate_vq = vdev->config->find_vq(vdev, 1, balloon_ack); + vb->deflate_vq = vdev->config->find_vq(vdev, 1, balloon_ack, "deflate"); if (IS_ERR(vb->deflate_vq)) { err = PTR_ERR(vb->deflate_vq); goto out_del_inflate_vq; diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index 330aacbdec1f..be4047abd5ba 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c @@ -208,7 +208,8 @@ static irqreturn_t vp_interrupt(int irq, void *opaque) /* the config->find_vq() implementation */ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, - void (*callback)(struct virtqueue *vq)) + void (*callback)(struct virtqueue *vq), + const char *name) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_vq_info *info; @@ -247,7 +248,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, /* create the vring */ vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN, - vdev, info->queue, vp_notify, callback); + vdev, info->queue, vp_notify, callback, name); if (!vq) { err = -ENOMEM; goto out_activate_queue; diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 5c52369ab9bb..579fa693d5d0 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -23,21 +23,30 @@ #ifdef DEBUG /* For development, we want to crash whenever the ring is screwed. */ -#define BAD_RING(_vq, fmt...) \ - do { dev_err(&(_vq)->vq.vdev->dev, fmt); BUG(); } while(0) +#define BAD_RING(_vq, fmt, args...) \ + do { \ + dev_err(&(_vq)->vq.vdev->dev, \ + "%s:"fmt, (_vq)->vq.name, ##args); \ + BUG(); \ + } while (0) /* Caller is supposed to guarantee no reentry. */ #define START_USE(_vq) \ do { \ if ((_vq)->in_use) \ - panic("in_use = %i\n", (_vq)->in_use); \ + panic("%s:in_use = %i\n", \ + (_vq)->vq.name, (_vq)->in_use); \ (_vq)->in_use = __LINE__; \ mb(); \ - } while(0) + } while (0) #define END_USE(_vq) \ do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0) #else -#define BAD_RING(_vq, fmt...) \ - do { dev_err(&_vq->vq.vdev->dev, fmt); (_vq)->broken = true; } while(0) +#define BAD_RING(_vq, fmt, args...) \ + do { \ + dev_err(&_vq->vq.vdev->dev, \ + "%s:"fmt, (_vq)->vq.name, ##args); \ + (_vq)->broken = true; \ + } while (0) #define START_USE(vq) #define END_USE(vq) #endif @@ -284,7 +293,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, struct virtio_device *vdev, void *pages, void (*notify)(struct virtqueue *), - void (*callback)(struct virtqueue *)) + void (*callback)(struct virtqueue *), + const char *name) { struct vring_virtqueue *vq; unsigned int i; @@ -303,10 +313,12 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, vq->vq.callback = callback; vq->vq.vdev = vdev; vq->vq.vq_ops = &vring_vq_ops; + vq->vq.name = name; vq->notify = notify; vq->broken = false; vq->last_used_idx = 0; vq->num_added = 0; + list_add_tail(&vq->vq.list, &vdev->vqs); #ifdef DEBUG vq->in_use = false; #endif @@ -327,6 +339,7 @@ EXPORT_SYMBOL_GPL(vring_new_virtqueue); void vring_del_virtqueue(struct virtqueue *vq) { + list_del(&vq->list); kfree(to_vvq(vq)); } EXPORT_SYMBOL_GPL(vring_del_virtqueue); -- cgit v1.2.3 From d2a7ddda9ffb1c8961abff6714b0f1eb925c120f Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 12 Jun 2009 22:16:36 -0600 Subject: virtio: find_vqs/del_vqs virtio operations This replaces find_vq/del_vq with find_vqs/del_vqs virtio operations, and updates all drivers. This is needed for MSI support, because MSI needs to know the total number of vectors upfront. Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell (+ lguest/9p compile fixes) --- drivers/block/virtio_blk.c | 6 ++--- drivers/char/hw_random/virtio-rng.c | 6 ++--- drivers/char/virtio_console.c | 26 +++++++++------------ drivers/lguest/lguest_device.c | 36 +++++++++++++++++++++++++++-- drivers/net/virtio_net.c | 45 +++++++++++++++---------------------- drivers/s390/kvm/kvm_virtio.c | 36 +++++++++++++++++++++++++++-- drivers/virtio/virtio_balloon.c | 27 +++++++++------------- drivers/virtio/virtio_pci.c | 37 ++++++++++++++++++++++++------ 8 files changed, 144 insertions(+), 75 deletions(-) (limited to 'drivers') diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index db55a50d9f6a..07d8e595e51f 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -288,7 +288,7 @@ static int virtblk_probe(struct virtio_device *vdev) sg_init_table(vblk->sg, vblk->sg_elems); /* We expect one virtqueue, for output. */ - vblk->vq = vdev->config->find_vq(vdev, 0, blk_done, "requests"); + vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests"); if (IS_ERR(vblk->vq)) { err = PTR_ERR(vblk->vq); goto out_free_vblk; @@ -388,7 +388,7 @@ out_put_disk: out_mempool: mempool_destroy(vblk->pool); out_free_vq: - vdev->config->del_vq(vblk->vq); + vdev->config->del_vqs(vdev); out_free_vblk: kfree(vblk); out: @@ -409,7 +409,7 @@ static void virtblk_remove(struct virtio_device *vdev) blk_cleanup_queue(vblk->disk->queue); put_disk(vblk->disk); mempool_destroy(vblk->pool); - vdev->config->del_vq(vblk->vq); + vdev->config->del_vqs(vdev); kfree(vblk); } diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 2aeafcea95fe..f2041fede822 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -94,13 +94,13 @@ static int virtrng_probe(struct virtio_device *vdev) int err; /* We expect a single virtqueue. */ - vq = vdev->config->find_vq(vdev, 0, random_recv_done, "input"); + vq = virtio_find_single_vq(vdev, random_recv_done, "input"); if (IS_ERR(vq)) return PTR_ERR(vq); err = hwrng_register(&virtio_hwrng); if (err) { - vdev->config->del_vq(vq); + vdev->config->del_vqs(vdev); return err; } @@ -112,7 +112,7 @@ static void virtrng_remove(struct virtio_device *vdev) { vdev->config->reset(vdev); hwrng_unregister(&virtio_hwrng); - vdev->config->del_vq(vq); + vdev->config->del_vqs(vdev); } static struct virtio_device_id id_table[] = { diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 58684e4a0814..c74dacfa6795 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -188,6 +188,9 @@ static void hvc_handle_input(struct virtqueue *vq) * Finally we put our input buffer in the input queue, ready to receive. */ static int __devinit virtcons_probe(struct virtio_device *dev) { + vq_callback_t *callbacks[] = { hvc_handle_input, NULL}; + const char *names[] = { "input", "output" }; + struct virtqueue *vqs[2]; int err; vdev = dev; @@ -199,20 +202,15 @@ static int __devinit virtcons_probe(struct virtio_device *dev) goto fail; } - /* Find the input queue. */ + /* Find the queues. */ /* FIXME: This is why we want to wean off hvc: we do nothing * when input comes in. */ - in_vq = vdev->config->find_vq(vdev, 0, hvc_handle_input, "input"); - if (IS_ERR(in_vq)) { - err = PTR_ERR(in_vq); + err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names); + if (err) goto free; - } - out_vq = vdev->config->find_vq(vdev, 1, NULL, "output"); - if (IS_ERR(out_vq)) { - err = PTR_ERR(out_vq); - goto free_in_vq; - } + in_vq = vqs[0]; + out_vq = vqs[1]; /* Start using the new console output. */ virtio_cons.get_chars = get_chars; @@ -233,17 +231,15 @@ static int __devinit virtcons_probe(struct virtio_device *dev) hvc = hvc_alloc(0, 0, &virtio_cons, PAGE_SIZE); if (IS_ERR(hvc)) { err = PTR_ERR(hvc); - goto free_out_vq; + goto free_vqs; } /* Register the input buffer the first time. */ add_inbuf(); return 0; -free_out_vq: - vdev->config->del_vq(out_vq); -free_in_vq: - vdev->config->del_vq(in_vq); +free_vqs: + vdev->config->del_vqs(vdev); free: kfree(inbuf); fail: diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c index 4babed899d59..e082cdac88b4 100644 --- a/drivers/lguest/lguest_device.c +++ b/drivers/lguest/lguest_device.c @@ -313,6 +313,38 @@ static void lg_del_vq(struct virtqueue *vq) kfree(lvq); } +static void lg_del_vqs(struct virtio_device *vdev) +{ + struct virtqueue *vq, *n; + + list_for_each_entry_safe(vq, n, &vdev->vqs, list) + lg_del_vq(vq); +} + +static int lg_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]) +{ + struct lguest_device *ldev = to_lgdev(vdev); + int i; + + /* We must have this many virtqueues. */ + if (nvqs > ldev->desc->num_vq) + return -ENOENT; + + for (i = 0; i < nvqs; ++i) { + vqs[i] = lg_find_vq(vdev, i, callbacks[i], names[i]); + if (IS_ERR(vqs[i])) + goto error; + } + return 0; + +error: + lg_del_vqs(vdev); + return PTR_ERR(vqs[i]); +} + /* The ops structure which hooks everything together. */ static struct virtio_config_ops lguest_config_ops = { .get_features = lg_get_features, @@ -322,8 +354,8 @@ static struct virtio_config_ops lguest_config_ops = { .get_status = lg_get_status, .set_status = lg_set_status, .reset = lg_reset, - .find_vq = lg_find_vq, - .del_vq = lg_del_vq, + .find_vqs = lg_find_vqs, + .del_vqs = lg_del_vqs, }; /* The root device for the lguest virtio devices. This makes them appear as diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index be3b734ff5a1..7fa620ddeb21 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -845,6 +845,10 @@ static int virtnet_probe(struct virtio_device *vdev) int err; struct net_device *dev; struct virtnet_info *vi; + struct virtqueue *vqs[3]; + vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL}; + const char *names[] = { "input", "output", "control" }; + int nvqs; /* Allocate ourselves a network device with room for our info */ dev = alloc_etherdev(sizeof(struct virtnet_info)); @@ -905,25 +909,19 @@ static int virtnet_probe(struct virtio_device *vdev) if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) vi->mergeable_rx_bufs = true; - /* We expect two virtqueues, receive then send. */ - vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done, "input"); - if (IS_ERR(vi->rvq)) { - err = PTR_ERR(vi->rvq); + /* We expect two virtqueues, receive then send, + * and optionally control. */ + nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2; + + err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names); + if (err) goto free; - } - vi->svq = vdev->config->find_vq(vdev, 1, skb_xmit_done, "output"); - if (IS_ERR(vi->svq)) { - err = PTR_ERR(vi->svq); - goto free_recv; - } + vi->rvq = vqs[0]; + vi->svq = vqs[1]; if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) { - vi->cvq = vdev->config->find_vq(vdev, 2, NULL, "control"); - if (IS_ERR(vi->cvq)) { - err = PTR_ERR(vi->svq); - goto free_send; - } + vi->cvq = vqs[2]; if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) dev->features |= NETIF_F_HW_VLAN_FILTER; @@ -941,7 +939,7 @@ static int virtnet_probe(struct virtio_device *vdev) err = register_netdev(dev); if (err) { pr_debug("virtio_net: registering device failed\n"); - goto free_ctrl; + goto free_vqs; } /* Last of all, set up some receive buffers. */ @@ -962,13 +960,8 @@ static int virtnet_probe(struct virtio_device *vdev) unregister: unregister_netdev(dev); -free_ctrl: - if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) - vdev->config->del_vq(vi->cvq); -free_send: - vdev->config->del_vq(vi->svq); -free_recv: - vdev->config->del_vq(vi->rvq); +free_vqs: + vdev->config->del_vqs(vdev); free: free_netdev(dev); return err; @@ -994,12 +987,10 @@ static void virtnet_remove(struct virtio_device *vdev) BUG_ON(vi->num != 0); - vdev->config->del_vq(vi->svq); - vdev->config->del_vq(vi->rvq); - if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) - vdev->config->del_vq(vi->cvq); unregister_netdev(vi->dev); + vdev->config->del_vqs(vi->vdev); + while (vi->pages) __free_pages(get_a_page(vi, GFP_KERNEL), 0); diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c index ba8995fbf041..e38e5d306faf 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/kvm/kvm_virtio.c @@ -227,6 +227,38 @@ static void kvm_del_vq(struct virtqueue *vq) KVM_S390_VIRTIO_RING_ALIGN)); } +static void kvm_del_vqs(struct virtio_device *vdev) +{ + struct virtqueue *vq, *n; + + list_for_each_entry_safe(vq, n, &vdev->vqs, list) + kvm_del_vq(vq); +} + +static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]) +{ + struct kvm_device *kdev = to_kvmdev(vdev); + int i; + + /* We must have this many virtqueues. */ + if (nvqs > kdev->desc->num_vq) + return -ENOENT; + + for (i = 0; i < nvqs; ++i) { + vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i]); + if (IS_ERR(vqs[i])) + goto error; + } + return 0; + +error: + kvm_del_vqs(vdev); + return PTR_ERR(vqs[i]); +} + /* * The config ops structure as defined by virtio config */ @@ -238,8 +270,8 @@ static struct virtio_config_ops kvm_vq_configspace_ops = { .get_status = kvm_get_status, .set_status = kvm_set_status, .reset = kvm_reset, - .find_vq = kvm_find_vq, - .del_vq = kvm_del_vq, + .find_vqs = kvm_find_vqs, + .del_vqs = kvm_del_vqs, }; /* diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 0fa73b4d18b0..26b278264796 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -204,6 +204,9 @@ static int balloon(void *_vballoon) static int virtballoon_probe(struct virtio_device *vdev) { struct virtio_balloon *vb; + struct virtqueue *vqs[2]; + vq_callback_t *callbacks[] = { balloon_ack, balloon_ack }; + const char *names[] = { "inflate", "deflate" }; int err; vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); @@ -218,22 +221,17 @@ static int virtballoon_probe(struct virtio_device *vdev) vb->vdev = vdev; /* We expect two virtqueues. */ - vb->inflate_vq = vdev->config->find_vq(vdev, 0, balloon_ack, "inflate"); - if (IS_ERR(vb->inflate_vq)) { - err = PTR_ERR(vb->inflate_vq); + err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names); + if (err) goto out_free_vb; - } - vb->deflate_vq = vdev->config->find_vq(vdev, 1, balloon_ack, "deflate"); - if (IS_ERR(vb->deflate_vq)) { - err = PTR_ERR(vb->deflate_vq); - goto out_del_inflate_vq; - } + vb->inflate_vq = vqs[0]; + vb->deflate_vq = vqs[1]; vb->thread = kthread_run(balloon, vb, "vballoon"); if (IS_ERR(vb->thread)) { err = PTR_ERR(vb->thread); - goto out_del_deflate_vq; + goto out_del_vqs; } vb->tell_host_first @@ -241,10 +239,8 @@ static int virtballoon_probe(struct virtio_device *vdev) return 0; -out_del_deflate_vq: - vdev->config->del_vq(vb->deflate_vq); -out_del_inflate_vq: - vdev->config->del_vq(vb->inflate_vq); +out_del_vqs: + vdev->config->del_vqs(vdev); out_free_vb: kfree(vb); out: @@ -264,8 +260,7 @@ static void virtballoon_remove(struct virtio_device *vdev) /* Now we reset the device so we can clean up the queues. */ vdev->config->reset(vdev); - vdev->config->del_vq(vb->deflate_vq); - vdev->config->del_vq(vb->inflate_vq); + vdev->config->del_vqs(vdev); kfree(vb); } diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index be4047abd5ba..027f13fbe493 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c @@ -276,11 +276,7 @@ static void vp_del_vq(struct virtqueue *vq) { struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); struct virtio_pci_vq_info *info = vq->priv; - unsigned long flags, size; - - spin_lock_irqsave(&vp_dev->lock, flags); - list_del(&info->node); - spin_unlock_irqrestore(&vp_dev->lock, flags); + unsigned long size; vring_del_virtqueue(vq); @@ -293,14 +289,41 @@ static void vp_del_vq(struct virtqueue *vq) kfree(info); } +static void vp_del_vqs(struct virtio_device *vdev) +{ + struct virtqueue *vq, *n; + + list_for_each_entry_safe(vq, n, &vdev->vqs, list) + vp_del_vq(vq); +} + +static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]) +{ + int i; + + for (i = 0; i < nvqs; ++i) { + vqs[i] = vp_find_vq(vdev, i, callbacks[i], names[i]); + if (IS_ERR(vqs[i])) + goto error; + } + return 0; + +error: + vp_del_vqs(vdev); + return PTR_ERR(vqs[i]); +} + static struct virtio_config_ops virtio_pci_config_ops = { .get = vp_get, .set = vp_set, .get_status = vp_get_status, .set_status = vp_set_status, .reset = vp_reset, - .find_vq = vp_find_vq, - .del_vq = vp_del_vq, + .find_vqs = vp_find_vqs, + .del_vqs = vp_del_vqs, .get_features = vp_get_features, .finalize_features = vp_finalize_features, }; -- cgit v1.2.3 From 77cf524654a886e0fbbf03b16b44f048deef7b0c Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 14 May 2009 13:55:31 +0300 Subject: virtio_pci: split up vp_interrupt This reorganizes virtio-pci code in vp_interrupt slightly, so that it's easier to add per-vq MSI support on top. Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell --- drivers/virtio/virtio_pci.c | 53 +++++++++++++++++++++++++++++---------------- 1 file changed, 34 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index 027f13fbe493..951e673e50a4 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c @@ -164,6 +164,37 @@ static void vp_notify(struct virtqueue *vq) iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); } +/* Handle a configuration change: Tell driver if it wants to know. */ +static irqreturn_t vp_config_changed(int irq, void *opaque) +{ + struct virtio_pci_device *vp_dev = opaque; + struct virtio_driver *drv; + drv = container_of(vp_dev->vdev.dev.driver, + struct virtio_driver, driver); + + if (drv && drv->config_changed) + drv->config_changed(&vp_dev->vdev); + return IRQ_HANDLED; +} + +/* Notify all virtqueues on an interrupt. */ +static irqreturn_t vp_vring_interrupt(int irq, void *opaque) +{ + struct virtio_pci_device *vp_dev = opaque; + struct virtio_pci_vq_info *info; + irqreturn_t ret = IRQ_NONE; + unsigned long flags; + + spin_lock_irqsave(&vp_dev->lock, flags); + list_for_each_entry(info, &vp_dev->virtqueues, node) { + if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + spin_unlock_irqrestore(&vp_dev->lock, flags); + + return ret; +} + /* A small wrapper to also acknowledge the interrupt when it's handled. * I really need an EIO hook for the vring so I can ack the interrupt once we * know that we'll be handling the IRQ but before we invoke the callback since @@ -173,9 +204,6 @@ static void vp_notify(struct virtqueue *vq) static irqreturn_t vp_interrupt(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; - struct virtio_pci_vq_info *info; - irqreturn_t ret = IRQ_NONE; - unsigned long flags; u8 isr; /* reading the ISR has the effect of also clearing it so it's very @@ -187,23 +215,10 @@ static irqreturn_t vp_interrupt(int irq, void *opaque) return IRQ_NONE; /* Configuration change? Tell driver if it wants to know. */ - if (isr & VIRTIO_PCI_ISR_CONFIG) { - struct virtio_driver *drv; - drv = container_of(vp_dev->vdev.dev.driver, - struct virtio_driver, driver); - - if (drv && drv->config_changed) - drv->config_changed(&vp_dev->vdev); - } + if (isr & VIRTIO_PCI_ISR_CONFIG) + vp_config_changed(irq, opaque); - spin_lock_irqsave(&vp_dev->lock, flags); - list_for_each_entry(info, &vp_dev->virtqueues, node) { - if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) - ret = IRQ_HANDLED; - } - spin_unlock_irqrestore(&vp_dev->lock, flags); - - return ret; + return vp_vring_interrupt(irq, opaque); } /* the config->find_vq() implementation */ -- cgit v1.2.3 From 82af8ce84ed65d2fb6d8c017d3f2bbbf161061fb Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 14 May 2009 13:55:41 +0300 Subject: virtio_pci: optional MSI-X support This implements optional MSI-X support in virtio_pci. MSI-X is used whenever the host supports at least 2 MSI-X vectors: 1 for configuration changes and 1 for virtqueues. Per-virtqueue vectors are allocated if enough vectors available. Signed-off-by: Michael S. Tsirkin Acked-by: Anthony Liguori Signed-off-by: Rusty Russell (+ whitespace, style) --- drivers/virtio/virtio_pci.c | 228 ++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 209 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index 951e673e50a4..193c8f0e5cc5 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c @@ -42,6 +42,26 @@ struct virtio_pci_device /* a list of queues so we can dispatch IRQs */ spinlock_t lock; struct list_head virtqueues; + + /* MSI-X support */ + int msix_enabled; + int intx_enabled; + struct msix_entry *msix_entries; + /* Name strings for interrupts. This size should be enough, + * and I'm too lazy to allocate each name separately. */ + char (*msix_names)[256]; + /* Number of available vectors */ + unsigned msix_vectors; + /* Vectors allocated */ + unsigned msix_used_vectors; +}; + +/* Constants for MSI-X */ +/* Use first vector for configuration changes, second and the rest for + * virtqueues Thus, we need at least 2 vectors for MSI. */ +enum { + VP_MSIX_CONFIG_VECTOR = 0, + VP_MSIX_VQ_VECTOR = 1, }; struct virtio_pci_vq_info @@ -60,6 +80,9 @@ struct virtio_pci_vq_info /* the list node for the virtqueues list */ struct list_head node; + + /* MSI-X vector (or none) */ + unsigned vector; }; /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ @@ -109,7 +132,8 @@ static void vp_get(struct virtio_device *vdev, unsigned offset, void *buf, unsigned len) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); - void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG + offset; + void __iomem *ioaddr = vp_dev->ioaddr + + VIRTIO_PCI_CONFIG(vp_dev) + offset; u8 *ptr = buf; int i; @@ -123,7 +147,8 @@ static void vp_set(struct virtio_device *vdev, unsigned offset, const void *buf, unsigned len) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); - void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG + offset; + void __iomem *ioaddr = vp_dev->ioaddr + + VIRTIO_PCI_CONFIG(vp_dev) + offset; const u8 *ptr = buf; int i; @@ -221,7 +246,122 @@ static irqreturn_t vp_interrupt(int irq, void *opaque) return vp_vring_interrupt(irq, opaque); } -/* the config->find_vq() implementation */ +static void vp_free_vectors(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + int i; + + if (vp_dev->intx_enabled) { + free_irq(vp_dev->pci_dev->irq, vp_dev); + vp_dev->intx_enabled = 0; + } + + for (i = 0; i < vp_dev->msix_used_vectors; ++i) + free_irq(vp_dev->msix_entries[i].vector, vp_dev); + vp_dev->msix_used_vectors = 0; + + if (vp_dev->msix_enabled) { + /* Disable the vector used for configuration */ + iowrite16(VIRTIO_MSI_NO_VECTOR, + vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); + /* Flush the write out to device */ + ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); + + vp_dev->msix_enabled = 0; + pci_disable_msix(vp_dev->pci_dev); + } +} + +static int vp_enable_msix(struct pci_dev *dev, struct msix_entry *entries, + int *options, int noptions) +{ + int i; + for (i = 0; i < noptions; ++i) + if (!pci_enable_msix(dev, entries, options[i])) + return options[i]; + return -EBUSY; +} + +static int vp_request_vectors(struct virtio_device *vdev, unsigned max_vqs) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + const char *name = dev_name(&vp_dev->vdev.dev); + unsigned i, v; + int err = -ENOMEM; + /* We want at most one vector per queue and one for config changes. + * Fallback to separate vectors for config and a shared for queues. + * Finally fall back to regular interrupts. */ + int options[] = { max_vqs + 1, 2 }; + int nvectors = max(options[0], options[1]); + + vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, + GFP_KERNEL); + if (!vp_dev->msix_entries) + goto error_entries; + vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, + GFP_KERNEL); + if (!vp_dev->msix_names) + goto error_names; + + for (i = 0; i < nvectors; ++i) + vp_dev->msix_entries[i].entry = i; + + err = vp_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, + options, ARRAY_SIZE(options)); + if (err < 0) { + /* Can't allocate enough MSI-X vectors, use regular interrupt */ + vp_dev->msix_vectors = 0; + err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, + IRQF_SHARED, name, vp_dev); + if (err) + goto error_irq; + vp_dev->intx_enabled = 1; + } else { + vp_dev->msix_vectors = err; + vp_dev->msix_enabled = 1; + + /* Set the vector used for configuration */ + v = vp_dev->msix_used_vectors; + snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, + "%s-config", name); + err = request_irq(vp_dev->msix_entries[v].vector, + vp_config_changed, 0, vp_dev->msix_names[v], + vp_dev); + if (err) + goto error_irq; + ++vp_dev->msix_used_vectors; + + iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); + /* Verify we had enough resources to assign the vector */ + v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); + if (v == VIRTIO_MSI_NO_VECTOR) { + err = -EBUSY; + goto error_irq; + } + } + + if (vp_dev->msix_vectors && vp_dev->msix_vectors != max_vqs + 1) { + /* Shared vector for all VQs */ + v = vp_dev->msix_used_vectors; + snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, + "%s-virtqueues", name); + err = request_irq(vp_dev->msix_entries[v].vector, + vp_vring_interrupt, 0, vp_dev->msix_names[v], + vp_dev); + if (err) + goto error_irq; + ++vp_dev->msix_used_vectors; + } + return 0; +error_irq: + vp_free_vectors(vdev); + kfree(vp_dev->msix_names); +error_names: + kfree(vp_dev->msix_entries); +error_entries: + return err; +} + static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq), const char *name) @@ -230,7 +370,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, struct virtio_pci_vq_info *info; struct virtqueue *vq; unsigned long flags, size; - u16 num; + u16 num, vector; int err; /* Select the queue we're interested in */ @@ -249,6 +389,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, info->queue_index = index; info->num = num; + info->vector = VIRTIO_MSI_NO_VECTOR; size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); @@ -272,12 +413,43 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, vq->priv = info; info->vq = vq; + /* allocate per-vq vector if available and necessary */ + if (callback && vp_dev->msix_used_vectors < vp_dev->msix_vectors) { + vector = vp_dev->msix_used_vectors; + snprintf(vp_dev->msix_names[vector], sizeof *vp_dev->msix_names, + "%s-%s", dev_name(&vp_dev->vdev.dev), name); + err = request_irq(vp_dev->msix_entries[vector].vector, + vring_interrupt, 0, + vp_dev->msix_names[vector], vq); + if (err) + goto out_request_irq; + info->vector = vector; + ++vp_dev->msix_used_vectors; + } else + vector = VP_MSIX_VQ_VECTOR; + + if (callback && vp_dev->msix_enabled) { + iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); + vector = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); + if (vector == VIRTIO_MSI_NO_VECTOR) { + err = -EBUSY; + goto out_assign; + } + } + spin_lock_irqsave(&vp_dev->lock, flags); list_add(&info->node, &vp_dev->virtqueues); spin_unlock_irqrestore(&vp_dev->lock, flags); return vq; +out_assign: + if (info->vector != VIRTIO_MSI_NO_VECTOR) { + free_irq(vp_dev->msix_entries[info->vector].vector, vq); + --vp_dev->msix_used_vectors; + } +out_request_irq: + vring_del_virtqueue(vq); out_activate_queue: iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); free_pages_exact(info->queue, size); @@ -286,17 +458,27 @@ out_info: return ERR_PTR(err); } -/* the config->del_vq() implementation */ static void vp_del_vq(struct virtqueue *vq) { struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); struct virtio_pci_vq_info *info = vq->priv; unsigned long size; + iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); + + if (info->vector != VIRTIO_MSI_NO_VECTOR) + free_irq(vp_dev->msix_entries[info->vector].vector, vq); + + if (vp_dev->msix_enabled) { + iowrite16(VIRTIO_MSI_NO_VECTOR, + vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); + /* Flush the write out to device */ + ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); + } + vring_del_virtqueue(vq); /* Select and deactivate the queue */ - iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); @@ -304,30 +486,46 @@ static void vp_del_vq(struct virtqueue *vq) kfree(info); } +/* the config->del_vqs() implementation */ static void vp_del_vqs(struct virtio_device *vdev) { struct virtqueue *vq, *n; list_for_each_entry_safe(vq, n, &vdev->vqs, list) vp_del_vq(vq); + + vp_free_vectors(vdev); } +/* the config->find_vqs() implementation */ static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char *names[]) { - int i; + int vectors = 0; + int i, err; + + /* How many vectors would we like? */ + for (i = 0; i < nvqs; ++i) + if (callbacks[i]) + ++vectors; + + err = vp_request_vectors(vdev, vectors); + if (err) + goto error_request; for (i = 0; i < nvqs; ++i) { vqs[i] = vp_find_vq(vdev, i, callbacks[i], names[i]); if (IS_ERR(vqs[i])) - goto error; + goto error_find; } return 0; -error: +error_find: vp_del_vqs(vdev); + +error_request: return PTR_ERR(vqs[i]); } @@ -349,7 +547,7 @@ static void virtio_pci_release_dev(struct device *_d) struct virtio_pci_device *vp_dev = to_vp_device(dev); struct pci_dev *pci_dev = vp_dev->pci_dev; - free_irq(pci_dev->irq, vp_dev); + vp_del_vqs(dev); pci_set_drvdata(pci_dev, NULL); pci_iounmap(pci_dev, vp_dev->ioaddr); pci_release_regions(pci_dev); @@ -408,21 +606,13 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev, vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; vp_dev->vdev.id.device = pci_dev->subsystem_device; - /* register a handler for the queue with the PCI device's interrupt */ - err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, - dev_name(&vp_dev->vdev.dev), vp_dev); - if (err) - goto out_set_drvdata; - /* finally register the virtio device */ err = register_virtio_device(&vp_dev->vdev); if (err) - goto out_req_irq; + goto out_set_drvdata; return 0; -out_req_irq: - free_irq(pci_dev->irq, vp_dev); out_set_drvdata: pci_set_drvdata(pci_dev, NULL); pci_iounmap(pci_dev, vp_dev->ioaddr); -- cgit v1.2.3 From a92892825a122a74ddad1d408fa27132e28b05ae Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 12 Jun 2009 22:16:37 -0600 Subject: virtio: expose features in sysfs Each device negotiates feature bits; expose these in sysfs to help diagnostics and debugging. Signed-off-by: Rusty Russell --- drivers/virtio/virtio.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'drivers') diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 3f52c767dfe9..bd0745250fd9 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -31,11 +31,27 @@ static ssize_t modalias_show(struct device *_d, return sprintf(buf, "virtio:d%08Xv%08X\n", dev->id.device, dev->id.vendor); } +static ssize_t features_show(struct device *_d, + struct device_attribute *attr, char *buf) +{ + struct virtio_device *dev = container_of(_d, struct virtio_device, dev); + unsigned int i; + ssize_t len = 0; + + /* We actually represent this as a bitstring, as it could be + * arbitrary length in future. */ + for (i = 0; i < ARRAY_SIZE(dev->features)*BITS_PER_LONG; i++) + len += sprintf(buf+len, "%c", + test_bit(i, dev->features) ? '1' : '0'); + len += sprintf(buf+len, "\n"); + return len; +} static struct device_attribute virtio_dev_attrs[] = { __ATTR_RO(device), __ATTR_RO(vendor), __ATTR_RO(status), __ATTR_RO(modalias), + __ATTR_RO(features), __ATTR_NULL }; -- cgit v1.2.3 From 9fa29b9df32ba4db055f3977933cd0c1b8fe67cd Mon Sep 17 00:00:00 2001 From: Mark McLoughlin Date: Mon, 11 May 2009 18:11:45 +0100 Subject: virtio: indirect ring entries (VIRTIO_RING_F_INDIRECT_DESC) Add a new feature flag for indirect ring entries. These are ring entries which point to a table of buffer descriptors. The idea here is to increase the ring capacity by allowing a larger effective ring size whereby the ring size dictates the number of requests that may be outstanding, rather than the size of those requests. This should be most effective in the case of block I/O where we can potentially benefit by concurrently dispatching a large number of large requests. Even in the simple case of single segment block requests, this results in a threefold increase in ring capacity. Signed-off-by: Mark McLoughlin Signed-off-by: Rusty Russell --- drivers/virtio/virtio_ring.c | 75 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 73 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 579fa693d5d0..a882f2606515 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -61,6 +61,9 @@ struct vring_virtqueue /* Other side has made a mess, don't try any more. */ bool broken; + /* Host supports indirect buffers */ + bool indirect; + /* Number of free buffers */ unsigned int num_free; /* Head of free buffer list. */ @@ -85,6 +88,55 @@ struct vring_virtqueue #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) +/* Set up an indirect table of descriptors and add it to the queue. */ +static int vring_add_indirect(struct vring_virtqueue *vq, + struct scatterlist sg[], + unsigned int out, + unsigned int in) +{ + struct vring_desc *desc; + unsigned head; + int i; + + desc = kmalloc((out + in) * sizeof(struct vring_desc), GFP_ATOMIC); + if (!desc) + return vq->vring.num; + + /* Transfer entries from the sg list into the indirect page */ + for (i = 0; i < out; i++) { + desc[i].flags = VRING_DESC_F_NEXT; + desc[i].addr = sg_phys(sg); + desc[i].len = sg->length; + desc[i].next = i+1; + sg++; + } + for (; i < (out + in); i++) { + desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; + desc[i].addr = sg_phys(sg); + desc[i].len = sg->length; + desc[i].next = i+1; + sg++; + } + + /* Last one doesn't continue. */ + desc[i-1].flags &= ~VRING_DESC_F_NEXT; + desc[i-1].next = 0; + + /* We're about to use a buffer */ + vq->num_free--; + + /* Use a single buffer which doesn't continue */ + head = vq->free_head; + vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; + vq->vring.desc[head].addr = virt_to_phys(desc); + vq->vring.desc[head].len = i * sizeof(struct vring_desc); + + /* Update free pointer */ + vq->free_head = vq->vring.desc[head].next; + + return head; +} + static int vring_add_buf(struct virtqueue *_vq, struct scatterlist sg[], unsigned int out, @@ -94,12 +146,21 @@ static int vring_add_buf(struct virtqueue *_vq, struct vring_virtqueue *vq = to_vvq(_vq); unsigned int i, avail, head, uninitialized_var(prev); + START_USE(vq); + BUG_ON(data == NULL); + + /* If the host supports indirect descriptor tables, and we have multiple + * buffers, then go indirect. FIXME: tune this threshold */ + if (vq->indirect && (out + in) > 1 && vq->num_free) { + head = vring_add_indirect(vq, sg, out, in); + if (head != vq->vring.num) + goto add_head; + } + BUG_ON(out + in > vq->vring.num); BUG_ON(out + in == 0); - START_USE(vq); - if (vq->num_free < out + in) { pr_debug("Can't add buf len %i - avail = %i\n", out + in, vq->num_free); @@ -136,6 +197,7 @@ static int vring_add_buf(struct virtqueue *_vq, /* Update free pointer */ vq->free_head = i; +add_head: /* Set token. */ vq->data[head] = data; @@ -179,6 +241,11 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head) /* Put back on free list: find end */ i = head; + + /* Free the indirect table */ + if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) + kfree(phys_to_virt(vq->vring.desc[i].addr)); + while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { i = vq->vring.desc[i].next; vq->num_free++; @@ -323,6 +390,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, vq->in_use = false; #endif + vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); + /* No callback? Tell other side not to bother us. */ if (!callback) vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; @@ -351,6 +420,8 @@ void vring_transport_features(struct virtio_device *vdev) for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { switch (i) { + case VIRTIO_RING_F_INDIRECT_DESC: + break; default: /* We don't understand this bit. */ clear_bit(i, vdev->features); -- cgit v1.2.3 From 98e94444748e9af93423d1fab90543e75569a58c Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Mon, 18 May 2009 03:39:09 -0400 Subject: virtio_blk: add missing __dev{init,exit} markings The remove member of the virtio_driver structure uses __devexit_p(), so the remove function itself should be marked with __devexit. And where there be __devexit on the remove, so is there __devinit on the probe. Signed-off-by: Mike Frysinger Signed-off-by: Rusty Russell --- drivers/block/virtio_blk.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 07d8e595e51f..43db3ea15b54 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -254,7 +254,7 @@ static int index_to_minor(int index) return index << PART_BITS; } -static int virtblk_probe(struct virtio_device *vdev) +static int __devinit virtblk_probe(struct virtio_device *vdev) { struct virtio_blk *vblk; int err; @@ -395,7 +395,7 @@ out: return err; } -static void virtblk_remove(struct virtio_device *vdev) +static void __devexit virtblk_remove(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; -- cgit v1.2.3 From 594de1dd6449f79c99e1ba4577ea0e4e06e2b405 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 12 Jun 2009 22:16:39 -0600 Subject: virtio: handle short buffers in virtio_rng. If the device fills less than 4 bytes of our random buffer, we'll BUG_ON. It's nicer to handle the case where it partially fills the buffer (the protocol doesn't explicitly bad that). Signed-off-by: Rusty Russell --- drivers/char/hw_random/virtio-rng.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index f2041fede822..32216b623248 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -35,13 +35,13 @@ static DECLARE_COMPLETION(have_data); static void random_recv_done(struct virtqueue *vq) { - int len; + unsigned int len; /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ if (!vq->vq_ops->get_buf(vq, &len)) return; - data_left = len / sizeof(random_data[0]); + data_left += len; complete(&have_data); } @@ -49,7 +49,7 @@ static void register_buffer(void) { struct scatterlist sg; - sg_init_one(&sg, random_data, RANDOM_DATA_SIZE); + sg_init_one(&sg, random_data+data_left, RANDOM_DATA_SIZE-data_left); /* There should always be room for one buffer. */ if (vq->vq_ops->add_buf(vq, &sg, 0, 1, random_data) != 0) BUG(); @@ -59,24 +59,32 @@ static void register_buffer(void) /* At least we don't udelay() in a loop like some other drivers. */ static int virtio_data_present(struct hwrng *rng, int wait) { - if (data_left) + if (data_left >= sizeof(u32)) return 1; +again: if (!wait) return 0; wait_for_completion(&have_data); + + /* Not enough? Re-register. */ + if (unlikely(data_left < sizeof(u32))) { + register_buffer(); + goto again; + } + return 1; } /* virtio_data_present() must have succeeded before this is called. */ static int virtio_data_read(struct hwrng *rng, u32 *data) { - BUG_ON(!data_left); - - *data = random_data[--data_left]; + BUG_ON(data_left < sizeof(u32)); + data_left -= sizeof(u32); + *data = random_data[data_left / 4]; - if (!data_left) { + if (data_left < sizeof(u32)) { init_completion(&have_data); register_buffer(); } -- cgit v1.2.3 From c89e80168ba1ed37627fe03116b0cf8474dcb7e0 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Tue, 26 May 2009 15:46:09 +0200 Subject: virtio: fix id_matching for virtio drivers This bug never appeared, since all current virtio drivers use VIRTIO_DEV_ANY_ID for the vendor field. If a real vendor would be used, the check in virtio_id_match is wrong - it returns 0 if id->vendor == dev->id.vendor. Signed-off-by: Christian Borntraeger Signed-off-by: Rusty Russell --- drivers/virtio/virtio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index bd0745250fd9..22642a255d32 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -61,7 +61,7 @@ static inline int virtio_id_match(const struct virtio_device *dev, if (id->device != dev->id.device) return 0; - return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor != dev->id.vendor; + return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor == dev->id.vendor; } /* This looks through all the IDs a driver claims to support. If any of them -- cgit v1.2.3 From e3353853730eb99c56b7b0aed1667d51c0e3699a Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Tue, 26 May 2009 15:46:10 +0200 Subject: virtio: enhance id_matching for virtio drivers This patch allows a virtio driver to use VIRTIO_DEV_ANY_ID for the device id. This will be used by a test module that can be bound to any virtio device. Signed-off-by: Christian Borntraeger Signed-off-by: Rusty Russell --- drivers/virtio/virtio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 22642a255d32..3a43ebf83a49 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -58,7 +58,7 @@ static struct device_attribute virtio_dev_attrs[] = { static inline int virtio_id_match(const struct virtio_device *dev, const struct virtio_device_id *id) { - if (id->device != dev->id.device) + if (id->device != dev->id.device && id->device != VIRTIO_DEV_ANY_ID) return 0; return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor == dev->id.vendor; -- cgit v1.2.3 From a6c372de6e4b9a8188b66badcee3e3792eccdd26 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 12 Jun 2009 22:27:01 -0600 Subject: lguest: fix lguest wake on guest clock tick, or fd activity The Launcher could be inside the Guest on another CPU; wake_up_process will do nothing because it is "running". kick_process will knock it back into our kernel in this case, otherwise we'll miss it until the next guest exit. Signed-off-by: Rusty Russell --- drivers/lguest/interrupts_and_traps.c | 6 +++--- drivers/lguest/lguest_user.c | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c index 6e99adbe1946..9ea26ad88c9d 100644 --- a/drivers/lguest/interrupts_and_traps.c +++ b/drivers/lguest/interrupts_and_traps.c @@ -511,9 +511,9 @@ static enum hrtimer_restart clockdev_fn(struct hrtimer *timer) /* Remember the first interrupt is the timer interrupt. */ set_bit(0, cpu->irqs_pending); - /* If the Guest is actually stopped, we need to wake it up. */ - if (cpu->halted) - wake_up_process(cpu->tsk); + /* Guest may be stopped or running on another CPU. */ + if (!wake_up_process(cpu->tsk)) + kick_process(cpu->tsk); return HRTIMER_NORESTART; } diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index b8ee103eed5f..bcdcf3453e78 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c @@ -24,8 +24,8 @@ static int break_guest_out(struct lg_cpu *cpu, const unsigned long __user*input) if (on) { cpu->break_out = 1; - /* Pop it out of the Guest (may be running on different CPU) */ - wake_up_process(cpu->tsk); + if (!wake_up_process(cpu->tsk)) + kick_process(cpu->tsk); /* Wait for them to reset it */ return wait_event_interruptible(cpu->break_wq, !cpu->break_out); } else { -- cgit v1.2.3 From abd41f037e1a64543000ed73b42f616d04d92700 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 12 Jun 2009 22:27:02 -0600 Subject: lguest: fix race in halt code When the Guest does the LHCALL_HALT hypercall, we go to sleep, expecting that a timer or the Waker will wake_up_process() us. But we do it in a stupid way, leaving a classic missing wakeup race. So split maybe_do_interrupt() into interrupt_pending() and try_deliver_interrupt(), and check maybe_do_interrupt() and the "break_out" flag before calling schedule. Signed-off-by: Rusty Russell --- drivers/lguest/core.c | 14 ++++++++++++-- drivers/lguest/interrupts_and_traps.c | 26 +++++++++++++++++--------- drivers/lguest/lg.h | 3 ++- 3 files changed, 31 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index 4845fb3cf74b..8ca1def5b142 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -188,6 +188,8 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) { /* We stop running once the Guest is dead. */ while (!cpu->lg->dead) { + unsigned int irq; + /* First we run any hypercalls the Guest wants done. */ if (cpu->hcall) do_hypercalls(cpu); @@ -211,7 +213,9 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) /* Check if there are any interrupts which can be delivered now: * if so, this sets up the hander to be executed when we next * run the Guest. */ - maybe_do_interrupt(cpu); + irq = interrupt_pending(cpu); + if (irq < LGUEST_IRQS) + try_deliver_interrupt(cpu, irq); /* All long-lived kernel loops need to check with this horrible * thing called the freezer. If the Host is trying to suspend, @@ -227,7 +231,13 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) * clock timer or LHREQ_BREAK from the Waker will wake us. */ if (cpu->halted) { set_current_state(TASK_INTERRUPTIBLE); - schedule(); + /* Just before we sleep, make sure nothing snuck in + * which we should be doing. */ + if (interrupt_pending(cpu) < LGUEST_IRQS + || cpu->break_out) + set_current_state(TASK_RUNNING); + else + schedule(); continue; } diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c index 9ea26ad88c9d..a8c966fee1e4 100644 --- a/drivers/lguest/interrupts_and_traps.c +++ b/drivers/lguest/interrupts_and_traps.c @@ -128,30 +128,38 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, /*H:205 * Virtual Interrupts. * - * maybe_do_interrupt() gets called before every entry to the Guest, to see if - * we should divert the Guest to running an interrupt handler. */ -void maybe_do_interrupt(struct lg_cpu *cpu) + * interrupt_pending() returns the first pending interrupt which isn't blocked + * by the Guest. It is called before every entry to the Guest, and just before + * we go to sleep when the Guest has halted itself. */ +unsigned int interrupt_pending(struct lg_cpu *cpu) { unsigned int irq; DECLARE_BITMAP(blk, LGUEST_IRQS); - struct desc_struct *idt; /* If the Guest hasn't even initialized yet, we can do nothing. */ if (!cpu->lg->lguest_data) - return; + return LGUEST_IRQS; /* Take our "irqs_pending" array and remove any interrupts the Guest * wants blocked: the result ends up in "blk". */ if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts, sizeof(blk))) - return; + return LGUEST_IRQS; bitmap_andnot(blk, cpu->irqs_pending, blk, LGUEST_IRQS); /* Find the first interrupt. */ irq = find_first_bit(blk, LGUEST_IRQS); - /* None? Nothing to do */ - if (irq >= LGUEST_IRQS) - return; + + return irq; +} + +/* This actually diverts the Guest to running an interrupt handler, once an + * interrupt has been identified by interrupt_pending(). */ +void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq) +{ + struct desc_struct *idt; + + BUG_ON(irq >= LGUEST_IRQS); /* They may be in the middle of an iret, where they asked us never to * deliver interrupts. */ diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h index af92a176697f..6743cf147d97 100644 --- a/drivers/lguest/lg.h +++ b/drivers/lguest/lg.h @@ -139,7 +139,8 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user); #define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT) /* interrupts_and_traps.c: */ -void maybe_do_interrupt(struct lg_cpu *cpu); +unsigned int interrupt_pending(struct lg_cpu *cpu); +void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq); bool deliver_trap(struct lg_cpu *cpu, unsigned int num); void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int i, u32 low, u32 hi); -- cgit v1.2.3 From a32a8813d0173163ba44d8f9556e0d89fdc4fb46 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 12 Jun 2009 22:27:02 -0600 Subject: lguest: improve interrupt handling, speed up stream networking lguest never checked for pending interrupts when enabling interrupts, and things still worked. However, it makes a significant difference to TCP performance, so it's time we fixed it by introducing a pending_irq flag and checking it on irq_restore and irq_enable. These two routines are now too big to patch into the 8/10 bytes patch space, so we drop that code. Note: The high latency on interrupt delivery had a very curious effect: once everything else was optimized, networking without GSO was faster than networking with GSO, since more interrupts were sent and hence a greater chance of one getting through to the Guest! Note2: (Almost) Closing the same loophole for iret doesn't have any measurable effect, so I'm leaving that patch for the moment. Before: 1GB tcpblast Guest->Host: 30.7 seconds 1GB tcpblast Guest->Host (no GSO): 76.0 seconds After: 1GB tcpblast Guest->Host: 6.8 seconds 1GB tcpblast Guest->Host (no GSO): 27.8 seconds Signed-off-by: Rusty Russell --- drivers/lguest/core.c | 7 ++++--- drivers/lguest/hypercalls.c | 4 ++++ drivers/lguest/interrupts_and_traps.c | 16 +++++++++++++--- drivers/lguest/lg.h | 4 ++-- 4 files changed, 23 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index 8ca1def5b142..03fbc88c0023 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -189,6 +189,7 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) /* We stop running once the Guest is dead. */ while (!cpu->lg->dead) { unsigned int irq; + bool more; /* First we run any hypercalls the Guest wants done. */ if (cpu->hcall) @@ -213,9 +214,9 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) /* Check if there are any interrupts which can be delivered now: * if so, this sets up the hander to be executed when we next * run the Guest. */ - irq = interrupt_pending(cpu); + irq = interrupt_pending(cpu, &more); if (irq < LGUEST_IRQS) - try_deliver_interrupt(cpu, irq); + try_deliver_interrupt(cpu, irq, more); /* All long-lived kernel loops need to check with this horrible * thing called the freezer. If the Host is trying to suspend, @@ -233,7 +234,7 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) set_current_state(TASK_INTERRUPTIBLE); /* Just before we sleep, make sure nothing snuck in * which we should be doing. */ - if (interrupt_pending(cpu) < LGUEST_IRQS + if (interrupt_pending(cpu, &more) < LGUEST_IRQS || cpu->break_out) set_current_state(TASK_RUNNING); else diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c index 54d66f05fefa..f252b71ae79e 100644 --- a/drivers/lguest/hypercalls.c +++ b/drivers/lguest/hypercalls.c @@ -37,6 +37,10 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) /* This call does nothing, except by breaking out of the Guest * it makes us process all the asynchronous hypercalls. */ break; + case LHCALL_SEND_INTERRUPTS: + /* This call does nothing too, but by breaking out of the Guest + * it makes us process any pending interrupts. */ + break; case LHCALL_LGUEST_INIT: /* You can't get here unless you're already initialized. Don't * do that. */ diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c index a8c966fee1e4..5a10754b4790 100644 --- a/drivers/lguest/interrupts_and_traps.c +++ b/drivers/lguest/interrupts_and_traps.c @@ -131,7 +131,7 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, * interrupt_pending() returns the first pending interrupt which isn't blocked * by the Guest. It is called before every entry to the Guest, and just before * we go to sleep when the Guest has halted itself. */ -unsigned int interrupt_pending(struct lg_cpu *cpu) +unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more) { unsigned int irq; DECLARE_BITMAP(blk, LGUEST_IRQS); @@ -149,13 +149,14 @@ unsigned int interrupt_pending(struct lg_cpu *cpu) /* Find the first interrupt. */ irq = find_first_bit(blk, LGUEST_IRQS); + *more = find_next_bit(blk, LGUEST_IRQS, irq+1); return irq; } /* This actually diverts the Guest to running an interrupt handler, once an * interrupt has been identified by interrupt_pending(). */ -void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq) +void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more) { struct desc_struct *idt; @@ -178,8 +179,12 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq) u32 irq_enabled; if (get_user(irq_enabled, &cpu->lg->lguest_data->irq_enabled)) irq_enabled = 0; - if (!irq_enabled) + if (!irq_enabled) { + /* Make sure they know an IRQ is pending. */ + put_user(X86_EFLAGS_IF, + &cpu->lg->lguest_data->irq_pending); return; + } } /* Look at the IDT entry the Guest gave us for this interrupt. The @@ -202,6 +207,11 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq) * here is a compromise which means at least it gets updated every * timer interrupt. */ write_timestamp(cpu); + + /* If there are no other interrupts we want to deliver, clear + * the pending flag. */ + if (!more) + put_user(0, &cpu->lg->lguest_data->irq_pending); } /*:*/ diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h index 6743cf147d97..573896533ac9 100644 --- a/drivers/lguest/lg.h +++ b/drivers/lguest/lg.h @@ -139,8 +139,8 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user); #define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT) /* interrupts_and_traps.c: */ -unsigned int interrupt_pending(struct lg_cpu *cpu); -void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq); +unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more); +void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more); bool deliver_trap(struct lg_cpu *cpu, unsigned int num); void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int i, u32 low, u32 hi); -- cgit v1.2.3 From 81b79b01d057f8c5a378c38d2f738775b972934a Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Wed, 20 May 2009 01:45:45 +0200 Subject: lguest: beyond ARRAY_SIZE of cpu->arch.gdt Do not go beyond ARRAY_SIZE of cpu->arch.gdt Signed-off-by: Roel Kluin Signed-off-by: Rusty Russell --- drivers/lguest/segments.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c index 7ede64ffeef9..482ed5a18750 100644 --- a/drivers/lguest/segments.c +++ b/drivers/lguest/segments.c @@ -150,7 +150,7 @@ void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi) { /* We assume the Guest has the same number of GDT entries as the * Host, otherwise we'd have to dynamically allocate the Guest GDT. */ - if (num > ARRAY_SIZE(cpu->arch.gdt)) + if (num >= ARRAY_SIZE(cpu->arch.gdt)) kill_guest(cpu, "too many gdt entries %i", num); /* Set it up, then fix it. */ -- cgit v1.2.3 From f086122bb6e885f926f935b1418fca3b293375f0 Mon Sep 17 00:00:00 2001 From: Matias Zabaljauregui Date: Fri, 12 Jun 2009 22:27:04 -0600 Subject: lguest: Segment selectors are 16-bit long. Fix lg_cpu.ss1 definition. If GDT_ENTRIES were every > 256, this could become a problem. Signed-off-by: Matias Zabaljauregui Signed-off-by: Rusty Russell --- drivers/lguest/lg.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h index 573896533ac9..74af503ad63c 100644 --- a/drivers/lguest/lg.h +++ b/drivers/lguest/lg.h @@ -49,7 +49,7 @@ struct lg_cpu { u32 cr2; int ts; u32 esp1; - u8 ss1; + u16 ss1; /* Bitmap of what has changed: see CHANGED_* above. */ int changed; -- cgit v1.2.3 From ed1dc77810159a733240ba6751c1b31023bf8dd7 Mon Sep 17 00:00:00 2001 From: Matias Zabaljauregui Date: Sat, 30 May 2009 15:35:49 -0300 Subject: lguest: map switcher with executable page table entries Map switcher with executable page table entries. (This bug didn't matter before PAE and hence NX support -- RR) Signed-off-by: Matias Zabaljauregui Signed-off-by: Rusty Russell --- drivers/lguest/core.c | 2 +- drivers/lguest/page_tables.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index 03fbc88c0023..d0298dc45d97 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -95,7 +95,7 @@ static __init int map_switcher(void) * array of struct pages. It increments that pointer, but we don't * care. */ pagep = switcher_page; - err = map_vm_area(switcher_vma, PAGE_KERNEL, &pagep); + err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep); if (err) { printk("lguest: map_vm_area failed: %i\n", err); goto free_vma; diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index a059cf9980f7..496995370fbc 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c @@ -714,7 +714,7 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) /* Make the last PGD entry for this Guest point to the Switcher's PTE * page for this CPU (with appropriate flags). */ - switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL); + switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC); cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; -- cgit v1.2.3 From 90603d15fa95605d1d08235b73e220d766f04bb0 Mon Sep 17 00:00:00 2001 From: Matias Zabaljauregui Date: Fri, 12 Jun 2009 22:27:06 -0600 Subject: lguest: use native_set_* macros, which properly handle 64-bit entries when PAE is activated Some cleanups and replace direct assignment with native_set_* macros which properly handle 64-bit entries when PAE is activated Signed-off-by: Matias Zabaljauregui Signed-off-by: Rusty Russell --- drivers/lguest/page_tables.c | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index 496995370fbc..ffba723cd98d 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c @@ -90,7 +90,7 @@ static pte_t *spte_addr(pgd_t spgd, unsigned long vaddr) pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT); /* You should never call this if the PGD entry wasn't valid */ BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); - return &page[(vaddr >> PAGE_SHIFT) % PTRS_PER_PTE]; + return &page[pte_index(vaddr)]; } /* These two functions just like the above two, except they access the Guest @@ -105,7 +105,7 @@ static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr) { unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); - return gpage + ((vaddr>>PAGE_SHIFT) % PTRS_PER_PTE) * sizeof(pte_t); + return gpage + pte_index(vaddr) * sizeof(pte_t); } /*:*/ @@ -171,7 +171,7 @@ static void release_pte(pte_t pte) /* Remember that get_user_pages_fast() took a reference to the page, in * get_pfn()? We have to put it back now. */ if (pte_flags(pte) & _PAGE_PRESENT) - put_page(pfn_to_page(pte_pfn(pte))); + put_page(pte_page(pte)); } /*:*/ @@ -273,7 +273,7 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) * table entry, even if the Guest says it's writable. That way * we will come back here when a write does actually occur, so * we can update the Guest's _PAGE_DIRTY flag. */ - *spte = gpte_to_spte(cpu, pte_wrprotect(gpte), 0); + native_set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0)); /* Finally, we write the Guest PTE entry back: we've set the * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */ @@ -323,7 +323,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr) } /*H:450 If we chase down the release_pgd() code, it looks like this: */ -static void release_pgd(struct lguest *lg, pgd_t *spgd) +static void release_pgd(pgd_t *spgd) { /* If the entry's not present, there's nothing to release. */ if (pgd_flags(*spgd) & _PAGE_PRESENT) { @@ -350,7 +350,7 @@ static void flush_user_mappings(struct lguest *lg, int idx) unsigned int i; /* Release every pgd entry up to the kernel's address. */ for (i = 0; i < pgd_index(lg->kernel_address); i++) - release_pgd(lg, lg->pgdirs[idx].pgdir + i); + release_pgd(lg->pgdirs[idx].pgdir + i); } /*H:440 (v) Flushing (throwing away) page tables, @@ -431,7 +431,7 @@ static unsigned int new_pgdir(struct lg_cpu *cpu, /*H:430 (iv) Switching page tables * - * Now we've seen all the page table setting and manipulation, let's see what + * Now we've seen all the page table setting and manipulation, let's see * what happens when the Guest changes page tables (ie. changes the top-level * pgdir). This occurs on almost every context switch. */ void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) @@ -463,7 +463,7 @@ static void release_all_pagetables(struct lguest *lg) if (lg->pgdirs[i].pgdir) /* Every PGD entry except the Switcher at the top */ for (j = 0; j < SWITCHER_PGD_INDEX; j++) - release_pgd(lg, lg->pgdirs[i].pgdir + j); + release_pgd(lg->pgdirs[i].pgdir + j); } /* We also throw away everything when a Guest tells us it's changed a kernel @@ -581,7 +581,7 @@ void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx) pgdir = find_pgdir(lg, gpgdir); if (pgdir < ARRAY_SIZE(lg->pgdirs)) /* ... throw it away. */ - release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx); + release_pgd(lg->pgdirs[pgdir].pgdir + idx); } /* Once we know how much memory we have we can construct simple identity @@ -726,8 +726,9 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) * page is already mapped there, we don't have to copy them out * again. */ pfn = __pa(cpu->regs_page) >> PAGE_SHIFT; - regs_pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL)); - switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTRS_PER_PTE] = regs_pte; + native_set_pte(®s_pte, pfn_pte(pfn, PAGE_KERNEL)); + native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], + regs_pte); } /*:*/ @@ -752,21 +753,21 @@ static __init void populate_switcher_pte_page(unsigned int cpu, /* The first entries are easy: they map the Switcher code. */ for (i = 0; i < pages; i++) { - pte[i] = mk_pte(switcher_page[i], - __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)); + native_set_pte(&pte[i], mk_pte(switcher_page[i], + __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED))); } /* The only other thing we map is this CPU's pair of pages. */ i = pages + cpu*2; /* First page (Guest registers) is writable from the Guest */ - pte[i] = pfn_pte(page_to_pfn(switcher_page[i]), - __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)); + native_set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]), + __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW))); /* The second page contains the "struct lguest_ro_state", and is * read-only. */ - pte[i+1] = pfn_pte(page_to_pfn(switcher_page[i+1]), - __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)); + native_set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]), + __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED))); } /* We've made it through the page table code. Perhaps our tired brains are -- cgit v1.2.3 From ebe0ba84f55950a89cb7af94c7ffc35ee3992f9e Mon Sep 17 00:00:00 2001 From: Matias Zabaljauregui Date: Sat, 30 May 2009 15:48:08 -0300 Subject: lguest: replace hypercall name LHCALL_SET_PMD with LHCALL_SET_PGD replace LHCALL_SET_PMD with LHCALL_SET_PGD hypercall name (That's really what it is, and the confusion gets worse with PAE support) Signed-off-by: Matias Zabaljauregui Signed-off-by: Rusty Russell Reported-by: Jeremy Fitzhardinge --- drivers/lguest/hypercalls.c | 4 ++-- drivers/lguest/lg.h | 2 +- drivers/lguest/page_tables.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c index f252b71ae79e..51149ca14617 100644 --- a/drivers/lguest/hypercalls.c +++ b/drivers/lguest/hypercalls.c @@ -79,8 +79,8 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) case LHCALL_SET_PTE: guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3)); break; - case LHCALL_SET_PMD: - guest_set_pmd(cpu->lg, args->arg1, args->arg2); + case LHCALL_SET_PGD: + guest_set_pgd(cpu->lg, args->arg1, args->arg2); break; case LHCALL_SET_CLOCKEVENT: guest_set_clockevent(cpu, args->arg1); diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h index 74af503ad63c..cacc2da2058d 100644 --- a/drivers/lguest/lg.h +++ b/drivers/lguest/lg.h @@ -169,7 +169,7 @@ void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt); int init_guest_pagetable(struct lguest *lg); void free_guest_pagetable(struct lguest *lg); void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable); -void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i); +void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 i); void guest_pagetable_clear_all(struct lg_cpu *cpu); void guest_pagetable_flush_user(struct lg_cpu *cpu); void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir, diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index ffba723cd98d..6a54d76b6236 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c @@ -568,7 +568,7 @@ void guest_set_pte(struct lg_cpu *cpu, * * So with that in mind here's our code to to update a (top-level) PGD entry: */ -void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx) +void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx) { int pgdir; -- cgit v1.2.3 From acdd0b6292b282c4511897ac2691a47befbf1c6a Mon Sep 17 00:00:00 2001 From: Matias Zabaljauregui Date: Fri, 12 Jun 2009 22:27:07 -0600 Subject: lguest: PAE support This version requires that host and guest have the same PAE status. NX cap is not offered to the guest, yet. Signed-off-by: Matias Zabaljauregui Signed-off-by: Rusty Russell --- drivers/lguest/Kconfig | 2 +- drivers/lguest/hypercalls.c | 10 ++ drivers/lguest/lg.h | 5 + drivers/lguest/page_tables.c | 351 ++++++++++++++++++++++++++++++++++++++----- 4 files changed, 329 insertions(+), 39 deletions(-) (limited to 'drivers') diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig index a3d3cbab359a..8f63845db830 100644 --- a/drivers/lguest/Kconfig +++ b/drivers/lguest/Kconfig @@ -1,6 +1,6 @@ config LGUEST tristate "Linux hypervisor example code" - depends on X86_32 && EXPERIMENTAL && !X86_PAE && FUTEX + depends on X86_32 && EXPERIMENTAL && FUTEX select HVC_DRIVER ---help--- This is a very simple module which allows you to run diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c index 51149ca14617..c29ffa19cb74 100644 --- a/drivers/lguest/hypercalls.c +++ b/drivers/lguest/hypercalls.c @@ -77,11 +77,21 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) guest_set_stack(cpu, args->arg1, args->arg2, args->arg3); break; case LHCALL_SET_PTE: +#ifdef CONFIG_X86_PAE + guest_set_pte(cpu, args->arg1, args->arg2, + __pte(args->arg3 | (u64)args->arg4 << 32)); +#else guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3)); +#endif break; case LHCALL_SET_PGD: guest_set_pgd(cpu->lg, args->arg1, args->arg2); break; +#ifdef CONFIG_X86_PAE + case LHCALL_SET_PMD: + guest_set_pmd(cpu->lg, args->arg1, args->arg2); + break; +#endif case LHCALL_SET_CLOCKEVENT: guest_set_clockevent(cpu, args->arg1); break; diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h index cacc2da2058d..6201ce59e886 100644 --- a/drivers/lguest/lg.h +++ b/drivers/lguest/lg.h @@ -137,6 +137,8 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user); * in the kernel. */ #define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK) #define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT) +#define pmd_flags(x) (pmd_val(x) & ~PAGE_MASK) +#define pmd_pfn(x) (pmd_val(x) >> PAGE_SHIFT) /* interrupts_and_traps.c: */ unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more); @@ -170,6 +172,9 @@ int init_guest_pagetable(struct lguest *lg); void free_guest_pagetable(struct lguest *lg); void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable); void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 i); +#ifdef CONFIG_X86_PAE +void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i); +#endif void guest_pagetable_clear_all(struct lg_cpu *cpu); void guest_pagetable_flush_user(struct lg_cpu *cpu); void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir, diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index 6a54d76b6236..5e2c26adcf06 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c @@ -53,6 +53,17 @@ * page. */ #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1) +/* For PAE we need the PMD index as well. We use the last 2MB, so we + * will need the last pmd entry of the last pmd page. */ +#ifdef CONFIG_X86_PAE +#define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1) +#define RESERVE_MEM 2U +#define CHECK_GPGD_MASK _PAGE_PRESENT +#else +#define RESERVE_MEM 4U +#define CHECK_GPGD_MASK _PAGE_TABLE +#endif + /* We actually need a separate PTE page for each CPU. Remember that after the * Switcher code itself comes two pages for each CPU, and we don't want this * CPU's guest to see the pages of any other CPU. */ @@ -73,23 +84,58 @@ static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr) { unsigned int index = pgd_index(vaddr); +#ifndef CONFIG_X86_PAE /* We kill any Guest trying to touch the Switcher addresses. */ if (index >= SWITCHER_PGD_INDEX) { kill_guest(cpu, "attempt to access switcher pages"); index = 0; } +#endif /* Return a pointer index'th pgd entry for the i'th page table. */ return &cpu->lg->pgdirs[i].pgdir[index]; } +#ifdef CONFIG_X86_PAE +/* This routine then takes the PGD entry given above, which contains the + * address of the PMD page. It then returns a pointer to the PMD entry for the + * given address. */ +static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) +{ + unsigned int index = pmd_index(vaddr); + pmd_t *page; + + /* We kill any Guest trying to touch the Switcher addresses. */ + if (pgd_index(vaddr) == SWITCHER_PGD_INDEX && + index >= SWITCHER_PMD_INDEX) { + kill_guest(cpu, "attempt to access switcher pages"); + index = 0; + } + + /* You should never call this if the PGD entry wasn't valid */ + BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); + page = __va(pgd_pfn(spgd) << PAGE_SHIFT); + + return &page[index]; +} +#endif + /* This routine then takes the page directory entry returned above, which * contains the address of the page table entry (PTE) page. It then returns a * pointer to the PTE entry for the given address. */ -static pte_t *spte_addr(pgd_t spgd, unsigned long vaddr) +static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) { +#ifdef CONFIG_X86_PAE + pmd_t *pmd = spmd_addr(cpu, spgd, vaddr); + pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT); + + /* You should never call this if the PMD entry wasn't valid */ + BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)); +#else pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT); /* You should never call this if the PGD entry wasn't valid */ BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); +#endif + return &page[pte_index(vaddr)]; } @@ -101,10 +147,31 @@ static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t); } -static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr) +#ifdef CONFIG_X86_PAE +static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr) { unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); + return gpage + pmd_index(vaddr) * sizeof(pmd_t); +} +#endif + +static unsigned long gpte_addr(struct lg_cpu *cpu, + pgd_t gpgd, unsigned long vaddr) +{ +#ifdef CONFIG_X86_PAE + pmd_t gpmd; +#endif + unsigned long gpage; + + BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); +#ifdef CONFIG_X86_PAE + gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); + gpage = pmd_pfn(gpmd) << PAGE_SHIFT; + BUG_ON(!(pmd_flags(gpmd) & _PAGE_PRESENT)); +#else + gpage = pgd_pfn(gpgd) << PAGE_SHIFT; +#endif return gpage + pte_index(vaddr) * sizeof(pte_t); } /*:*/ @@ -184,11 +251,20 @@ static void check_gpte(struct lg_cpu *cpu, pte_t gpte) static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd) { - if ((pgd_flags(gpgd) & ~_PAGE_TABLE) || + if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) || (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) kill_guest(cpu, "bad page directory entry"); } +#ifdef CONFIG_X86_PAE +static void check_gpmd(struct lg_cpu *cpu, pmd_t gpmd) +{ + if ((pmd_flags(gpmd) & ~_PAGE_TABLE) || + (pmd_pfn(gpmd) >= cpu->lg->pfn_limit)) + kill_guest(cpu, "bad page middle directory entry"); +} +#endif + /*H:330 * (i) Looking up a page table entry when the Guest faults. * @@ -207,6 +283,11 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) pte_t gpte; pte_t *spte; +#ifdef CONFIG_X86_PAE + pmd_t *spmd; + pmd_t gpmd; +#endif + /* First step: get the top-level Guest page table entry. */ gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); /* Toplevel not present? We can't map it in. */ @@ -228,12 +309,40 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) check_gpgd(cpu, gpgd); /* And we copy the flags to the shadow PGD entry. The page * number in the shadow PGD is the page we just allocated. */ - *spgd = __pgd(__pa(ptepage) | pgd_flags(gpgd)); + set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags(gpgd))); } +#ifdef CONFIG_X86_PAE + gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); + /* middle level not present? We can't map it in. */ + if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) + return false; + + /* Now look at the matching shadow entry. */ + spmd = spmd_addr(cpu, *spgd, vaddr); + + if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) { + /* No shadow entry: allocate a new shadow PTE page. */ + unsigned long ptepage = get_zeroed_page(GFP_KERNEL); + + /* This is not really the Guest's fault, but killing it is + * simple for this corner case. */ + if (!ptepage) { + kill_guest(cpu, "out of memory allocating pte page"); + return false; + } + + /* We check that the Guest pmd is OK. */ + check_gpmd(cpu, gpmd); + + /* And we copy the flags to the shadow PMD entry. The page + * number in the shadow PMD is the page we just allocated. */ + native_set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd))); + } +#endif /* OK, now we look at the lower level in the Guest page table: keep its * address, because we might update it later. */ - gpte_ptr = gpte_addr(gpgd, vaddr); + gpte_ptr = gpte_addr(cpu, gpgd, vaddr); gpte = lgread(cpu, gpte_ptr, pte_t); /* If this page isn't in the Guest page tables, we can't page it in. */ @@ -259,7 +368,7 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) gpte = pte_mkdirty(gpte); /* Get the pointer to the shadow PTE entry we're going to set. */ - spte = spte_addr(*spgd, vaddr); + spte = spte_addr(cpu, *spgd, vaddr); /* If there was a valid shadow PTE entry here before, we release it. * This can happen with a write to a previously read-only entry. */ release_pte(*spte); @@ -301,14 +410,23 @@ static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr) pgd_t *spgd; unsigned long flags; +#ifdef CONFIG_X86_PAE + pmd_t *spmd; +#endif /* Look at the current top level entry: is it present? */ spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) return false; +#ifdef CONFIG_X86_PAE + spmd = spmd_addr(cpu, *spgd, vaddr); + if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) + return false; +#endif + /* Check the flags on the pte entry itself: it must be present and * writable. */ - flags = pte_flags(*(spte_addr(*spgd, vaddr))); + flags = pte_flags(*(spte_addr(cpu, *spgd, vaddr))); return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW); } @@ -322,6 +440,41 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr) kill_guest(cpu, "bad stack page %#lx", vaddr); } +#ifdef CONFIG_X86_PAE +static void release_pmd(pmd_t *spmd) +{ + /* If the entry's not present, there's nothing to release. */ + if (pmd_flags(*spmd) & _PAGE_PRESENT) { + unsigned int i; + pte_t *ptepage = __va(pmd_pfn(*spmd) << PAGE_SHIFT); + /* For each entry in the page, we might need to release it. */ + for (i = 0; i < PTRS_PER_PTE; i++) + release_pte(ptepage[i]); + /* Now we can free the page of PTEs */ + free_page((long)ptepage); + /* And zero out the PMD entry so we never release it twice. */ + native_set_pmd(spmd, __pmd(0)); + } +} + +static void release_pgd(pgd_t *spgd) +{ + /* If the entry's not present, there's nothing to release. */ + if (pgd_flags(*spgd) & _PAGE_PRESENT) { + unsigned int i; + pmd_t *pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); + + for (i = 0; i < PTRS_PER_PMD; i++) + release_pmd(&pmdpage[i]); + + /* Now we can free the page of PMDs */ + free_page((long)pmdpage); + /* And zero out the PGD entry so we never release it twice. */ + set_pgd(spgd, __pgd(0)); + } +} + +#else /* !CONFIG_X86_PAE */ /*H:450 If we chase down the release_pgd() code, it looks like this: */ static void release_pgd(pgd_t *spgd) { @@ -341,7 +494,7 @@ static void release_pgd(pgd_t *spgd) *spgd = __pgd(0); } } - +#endif /*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings() * hypercall and once in new_pgdir() when we re-used a top-level pgdir page. * It simply releases every PTE page from 0 up to the Guest's kernel address. */ @@ -370,6 +523,9 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) pgd_t gpgd; pte_t gpte; +#ifdef CONFIG_X86_PAE + pmd_t gpmd; +#endif /* First step: get the top-level Guest page table entry. */ gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); /* Toplevel not present? We can't map it in. */ @@ -378,7 +534,13 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) return -1UL; } - gpte = lgread(cpu, gpte_addr(gpgd, vaddr), pte_t); + gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t); +#ifdef CONFIG_X86_PAE + gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); + if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) + kill_guest(cpu, "Bad address %#lx", vaddr); +#endif + gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t); if (!(pte_flags(gpte) & _PAGE_PRESENT)) kill_guest(cpu, "Bad address %#lx", vaddr); @@ -405,6 +567,9 @@ static unsigned int new_pgdir(struct lg_cpu *cpu, int *blank_pgdir) { unsigned int next; +#ifdef CONFIG_X86_PAE + pmd_t *pmd_table; +#endif /* We pick one entry at random to throw out. Choosing the Least * Recently Used might be better, but this is easy. */ @@ -416,10 +581,27 @@ static unsigned int new_pgdir(struct lg_cpu *cpu, /* If the allocation fails, just keep using the one we have */ if (!cpu->lg->pgdirs[next].pgdir) next = cpu->cpu_pgd; - else - /* This is a blank page, so there are no kernel - * mappings: caller must map the stack! */ + else { +#ifdef CONFIG_X86_PAE + /* In PAE mode, allocate a pmd page and populate the + * last pgd entry. */ + pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL); + if (!pmd_table) { + free_page((long)cpu->lg->pgdirs[next].pgdir); + set_pgd(cpu->lg->pgdirs[next].pgdir, __pgd(0)); + next = cpu->cpu_pgd; + } else { + set_pgd(cpu->lg->pgdirs[next].pgdir + + SWITCHER_PGD_INDEX, + __pgd(__pa(pmd_table) | _PAGE_PRESENT)); + /* This is a blank page, so there are no kernel + * mappings: caller must map the stack! */ + *blank_pgdir = 1; + } +#else *blank_pgdir = 1; +#endif + } } /* Record which Guest toplevel this shadows. */ cpu->lg->pgdirs[next].gpgdir = gpgdir; @@ -460,10 +642,25 @@ static void release_all_pagetables(struct lguest *lg) /* Every shadow pagetable this Guest has */ for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) - if (lg->pgdirs[i].pgdir) + if (lg->pgdirs[i].pgdir) { +#ifdef CONFIG_X86_PAE + pgd_t *spgd; + pmd_t *pmdpage; + unsigned int k; + + /* Get the last pmd page. */ + spgd = lg->pgdirs[i].pgdir + SWITCHER_PGD_INDEX; + pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); + + /* And release the pmd entries of that pmd page, + * except for the switcher pmd. */ + for (k = 0; k < SWITCHER_PMD_INDEX; k++) + release_pmd(&pmdpage[k]); +#endif /* Every PGD entry except the Switcher at the top */ for (j = 0; j < SWITCHER_PGD_INDEX; j++) release_pgd(lg->pgdirs[i].pgdir + j); + } } /* We also throw away everything when a Guest tells us it's changed a kernel @@ -504,24 +701,37 @@ static void do_set_pte(struct lg_cpu *cpu, int idx, { /* Look up the matching shadow page directory entry. */ pgd_t *spgd = spgd_addr(cpu, idx, vaddr); +#ifdef CONFIG_X86_PAE + pmd_t *spmd; +#endif /* If the top level isn't present, there's no entry to update. */ if (pgd_flags(*spgd) & _PAGE_PRESENT) { - /* Otherwise, we start by releasing the existing entry. */ - pte_t *spte = spte_addr(*spgd, vaddr); - release_pte(*spte); - - /* If they're setting this entry as dirty or accessed, we might - * as well put that entry they've given us in now. This shaves - * 10% off a copy-on-write micro-benchmark. */ - if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { - check_gpte(cpu, gpte); - *spte = gpte_to_spte(cpu, gpte, - pte_flags(gpte) & _PAGE_DIRTY); - } else - /* Otherwise kill it and we can demand_page() it in - * later. */ - *spte = __pte(0); +#ifdef CONFIG_X86_PAE + spmd = spmd_addr(cpu, *spgd, vaddr); + if (pmd_flags(*spmd) & _PAGE_PRESENT) { +#endif + /* Otherwise, we start by releasing + * the existing entry. */ + pte_t *spte = spte_addr(cpu, *spgd, vaddr); + release_pte(*spte); + + /* If they're setting this entry as dirty or accessed, + * we might as well put that entry they've given us + * in now. This shaves 10% off a + * copy-on-write micro-benchmark. */ + if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { + check_gpte(cpu, gpte); + native_set_pte(spte, + gpte_to_spte(cpu, gpte, + pte_flags(gpte) & _PAGE_DIRTY)); + } else + /* Otherwise kill it and we can demand_page() + * it in later. */ + native_set_pte(spte, __pte(0)); +#ifdef CONFIG_X86_PAE + } +#endif } } @@ -572,8 +782,6 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx) { int pgdir; - /* The kernel seems to try to initialize this early on: we ignore its - * attempts to map over the Switcher. */ if (idx >= SWITCHER_PGD_INDEX) return; @@ -583,6 +791,12 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx) /* ... throw it away. */ release_pgd(lg->pgdirs[pgdir].pgdir + idx); } +#ifdef CONFIG_X86_PAE +void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx) +{ + guest_pagetable_clear_all(&lg->cpus[0]); +} +#endif /* Once we know how much memory we have we can construct simple identity * (which set virtual == physical) and linear mappings @@ -596,8 +810,16 @@ static unsigned long setup_pagetables(struct lguest *lg, { pgd_t __user *pgdir; pte_t __user *linear; - unsigned int mapped_pages, i, linear_pages, phys_linear; unsigned long mem_base = (unsigned long)lg->mem_base; + unsigned int mapped_pages, i, linear_pages; +#ifdef CONFIG_X86_PAE + pmd_t __user *pmds; + unsigned int j; + pgd_t pgd; + pmd_t pmd; +#else + unsigned int phys_linear; +#endif /* We have mapped_pages frames to map, so we need * linear_pages page tables to map them. */ @@ -610,6 +832,9 @@ static unsigned long setup_pagetables(struct lguest *lg, /* Now we use the next linear_pages pages as pte pages */ linear = (void *)pgdir - linear_pages * PAGE_SIZE; +#ifdef CONFIG_X86_PAE + pmds = (void *)linear - PAGE_SIZE; +#endif /* Linear mapping is easy: put every page's address into the * mapping in order. */ for (i = 0; i < mapped_pages; i++) { @@ -621,6 +846,22 @@ static unsigned long setup_pagetables(struct lguest *lg, /* The top level points to the linear page table pages above. * We setup the identity and linear mappings here. */ +#ifdef CONFIG_X86_PAE + for (i = 0, j; i < mapped_pages && j < PTRS_PER_PMD; + i += PTRS_PER_PTE, j++) { + native_set_pmd(&pmd, __pmd(((unsigned long)(linear + i) + - mem_base) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); + + if (copy_to_user(&pmds[j], &pmd, sizeof(pmd)) != 0) + return -EFAULT; + } + + set_pgd(&pgd, __pgd(((u32)pmds - mem_base) | _PAGE_PRESENT)); + if (copy_to_user(&pgdir[0], &pgd, sizeof(pgd)) != 0) + return -EFAULT; + if (copy_to_user(&pgdir[3], &pgd, sizeof(pgd)) != 0) + return -EFAULT; +#else phys_linear = (unsigned long)linear - mem_base; for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) { pgd_t pgd; @@ -633,6 +874,7 @@ static unsigned long setup_pagetables(struct lguest *lg, &pgd, sizeof(pgd))) return -EFAULT; } +#endif /* We return the top level (guest-physical) address: remember where * this is. */ @@ -648,7 +890,10 @@ int init_guest_pagetable(struct lguest *lg) u64 mem; u32 initrd_size; struct boot_params __user *boot = (struct boot_params *)lg->mem_base; - +#ifdef CONFIG_X86_PAE + pgd_t *pgd; + pmd_t *pmd_table; +#endif /* Get the Guest memory size and the ramdisk size from the boot header * located at lg->mem_base (Guest address 0). */ if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem)) @@ -663,6 +908,15 @@ int init_guest_pagetable(struct lguest *lg) lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL); if (!lg->pgdirs[0].pgdir) return -ENOMEM; +#ifdef CONFIG_X86_PAE + pgd = lg->pgdirs[0].pgdir; + pmd_table = (pmd_t *) get_zeroed_page(GFP_KERNEL); + if (!pmd_table) + return -ENOMEM; + + set_pgd(pgd + SWITCHER_PGD_INDEX, + __pgd(__pa(pmd_table) | _PAGE_PRESENT)); +#endif lg->cpus[0].cpu_pgd = 0; return 0; } @@ -672,17 +926,24 @@ void page_table_guest_data_init(struct lg_cpu *cpu) { /* We get the kernel address: above this is all kernel memory. */ if (get_user(cpu->lg->kernel_address, - &cpu->lg->lguest_data->kernel_address) - /* We tell the Guest that it can't use the top 4MB of virtual - * addresses used by the Switcher. */ - || put_user(4U*1024*1024, &cpu->lg->lguest_data->reserve_mem) - || put_user(cpu->lg->pgdirs[0].gpgdir, &cpu->lg->lguest_data->pgdir)) + &cpu->lg->lguest_data->kernel_address) + /* We tell the Guest that it can't use the top 2 or 4 MB + * of virtual addresses used by the Switcher. */ + || put_user(RESERVE_MEM * 1024 * 1024, + &cpu->lg->lguest_data->reserve_mem) + || put_user(cpu->lg->pgdirs[0].gpgdir, + &cpu->lg->lguest_data->pgdir)) kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); /* In flush_user_mappings() we loop from 0 to * "pgd_index(lg->kernel_address)". This assumes it won't hit the * Switcher mappings, so check that now. */ +#ifdef CONFIG_X86_PAE + if (pgd_index(cpu->lg->kernel_address) == SWITCHER_PGD_INDEX && + pmd_index(cpu->lg->kernel_address) == SWITCHER_PMD_INDEX) +#else if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX) +#endif kill_guest(cpu, "bad kernel address %#lx", cpu->lg->kernel_address); } @@ -708,16 +969,30 @@ void free_guest_pagetable(struct lguest *lg) void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) { pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); - pgd_t switcher_pgd; pte_t regs_pte; unsigned long pfn; +#ifdef CONFIG_X86_PAE + pmd_t switcher_pmd; + pmd_t *pmd_table; + + native_set_pmd(&switcher_pmd, pfn_pmd(__pa(switcher_pte_page) >> + PAGE_SHIFT, PAGE_KERNEL_EXEC)); + + pmd_table = __va(pgd_pfn(cpu->lg-> + pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX]) + << PAGE_SHIFT); + native_set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd); +#else + pgd_t switcher_pgd; + /* Make the last PGD entry for this Guest point to the Switcher's PTE * page for this CPU (with appropriate flags). */ switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC); cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; +#endif /* We also change the Switcher PTE page. When we're running the Guest, * we want the Guest's "regs" page to appear where the first Switcher * page for this CPU is. This is an optimization: when the Switcher -- cgit v1.2.3 From 92b4d8df8436cdd74d22a2a5b6b23b9abc737a3e Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 12 Jun 2009 22:27:08 -0600 Subject: lguest: PAE fixes 1) j wasn't initialized in setup_pagetables, so they weren't set up for me causing immediate guest crashes. 2) gpte_addr should not re-read the pmd from the Guest. Especially not BUG_ON() based on the value. If we ever supported SMP guests, they could trigger that. And the Launcher could also trigger it (tho currently root-only). Signed-off-by: Rusty Russell --- drivers/lguest/page_tables.c | 38 +++++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index 5e2c26adcf06..a6fe1abda240 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c @@ -154,26 +154,25 @@ static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr) BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); return gpage + pmd_index(vaddr) * sizeof(pmd_t); } -#endif static unsigned long gpte_addr(struct lg_cpu *cpu, - pgd_t gpgd, unsigned long vaddr) + pmd_t gpmd, unsigned long vaddr) { -#ifdef CONFIG_X86_PAE - pmd_t gpmd; -#endif - unsigned long gpage; + unsigned long gpage = pmd_pfn(gpmd) << PAGE_SHIFT; - BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); -#ifdef CONFIG_X86_PAE - gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); - gpage = pmd_pfn(gpmd) << PAGE_SHIFT; BUG_ON(!(pmd_flags(gpmd) & _PAGE_PRESENT)); + return gpage + pte_index(vaddr) * sizeof(pte_t); +} #else - gpage = pgd_pfn(gpgd) << PAGE_SHIFT; -#endif +static unsigned long gpte_addr(struct lg_cpu *cpu, + pgd_t gpgd, unsigned long vaddr) +{ + unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; + + BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); return gpage + pte_index(vaddr) * sizeof(pte_t); } +#endif /*:*/ /*M:014 get_pfn is slow: we could probably try to grab batches of pages here as @@ -339,10 +338,15 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) * number in the shadow PMD is the page we just allocated. */ native_set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd))); } -#endif + + /* OK, now we look at the lower level in the Guest page table: keep its + * address, because we might update it later. */ + gpte_ptr = gpte_addr(cpu, gpmd, vaddr); +#else /* OK, now we look at the lower level in the Guest page table: keep its * address, because we might update it later. */ gpte_ptr = gpte_addr(cpu, gpgd, vaddr); +#endif gpte = lgread(cpu, gpte_ptr, pte_t); /* If this page isn't in the Guest page tables, we can't page it in. */ @@ -522,7 +526,6 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) { pgd_t gpgd; pte_t gpte; - #ifdef CONFIG_X86_PAE pmd_t gpmd; #endif @@ -534,13 +537,14 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) return -1UL; } - gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t); #ifdef CONFIG_X86_PAE gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) kill_guest(cpu, "Bad address %#lx", vaddr); -#endif + gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t); +#else gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t); +#endif if (!(pte_flags(gpte) & _PAGE_PRESENT)) kill_guest(cpu, "Bad address %#lx", vaddr); @@ -847,7 +851,7 @@ static unsigned long setup_pagetables(struct lguest *lg, /* The top level points to the linear page table pages above. * We setup the identity and linear mappings here. */ #ifdef CONFIG_X86_PAE - for (i = 0, j; i < mapped_pages && j < PTRS_PER_PMD; + for (i = j = 0; i < mapped_pages && j < PTRS_PER_PMD; i += PTRS_PER_PTE, j++) { native_set_pmd(&pmd, __pmd(((unsigned long)(linear + i) - mem_base) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); -- cgit v1.2.3 From 9f155a9b3d5a5444bcc5e049ec2547bb5107150e Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 12 Jun 2009 22:27:08 -0600 Subject: lguest: allow any process to send interrupts We currently only allow the Launcher process to send interrupts, but it as we already send interrupts from the hrtimer, it's a simple matter of extracting that code into a common set_interrupt routine. As we switch to a thread per virtqueue, this avoids a bottleneck through the main Launcher process. Signed-off-by: Rusty Russell --- drivers/lguest/interrupts_and_traps.c | 19 +++++++++++++++---- drivers/lguest/lg.h | 1 + drivers/lguest/lguest_user.c | 10 ++-------- 3 files changed, 18 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c index 5a10754b4790..0e9067b0d507 100644 --- a/drivers/lguest/interrupts_and_traps.c +++ b/drivers/lguest/interrupts_and_traps.c @@ -213,6 +213,20 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more) if (!more) put_user(0, &cpu->lg->lguest_data->irq_pending); } + +/* And this is the routine when we want to set an interrupt for the Guest. */ +void set_interrupt(struct lg_cpu *cpu, unsigned int irq) +{ + /* Next time the Guest runs, the core code will see if it can deliver + * this interrupt. */ + set_bit(irq, cpu->irqs_pending); + + /* Make sure it sees it; it might be asleep (eg. halted), or + * running the Guest right now, in which case kick_process() + * will knock it out. */ + if (!wake_up_process(cpu->tsk)) + kick_process(cpu->tsk); +} /*:*/ /* Linux uses trap 128 for system calls. Plan9 uses 64, and Ron Minnich sent @@ -528,10 +542,7 @@ static enum hrtimer_restart clockdev_fn(struct hrtimer *timer) struct lg_cpu *cpu = container_of(timer, struct lg_cpu, hrt); /* Remember the first interrupt is the timer interrupt. */ - set_bit(0, cpu->irqs_pending); - /* Guest may be stopped or running on another CPU. */ - if (!wake_up_process(cpu->tsk)) - kick_process(cpu->tsk); + set_interrupt(cpu, 0); return HRTIMER_NORESTART; } diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h index 6201ce59e886..040cb70780e7 100644 --- a/drivers/lguest/lg.h +++ b/drivers/lguest/lg.h @@ -143,6 +143,7 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user); /* interrupts_and_traps.c: */ unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more); void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more); +void set_interrupt(struct lg_cpu *cpu, unsigned int irq); bool deliver_trap(struct lg_cpu *cpu, unsigned int num); void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int i, u32 low, u32 hi); diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index bcdcf3453e78..1982b45bd935 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c @@ -45,9 +45,8 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input) return -EFAULT; if (irq >= LGUEST_IRQS) return -EINVAL; - /* Next time the Guest runs, the core code will see if it can deliver - * this interrupt. */ - set_bit(irq, cpu->irqs_pending); + + set_interrupt(cpu, irq); return 0; } @@ -252,11 +251,6 @@ static ssize_t write(struct file *file, const char __user *in, /* Once the Guest is dead, you can only read() why it died. */ if (lg->dead) return -ENOENT; - - /* If you're not the task which owns the Guest, all you can do - * is break the Launcher out of running the Guest. */ - if (current != cpu->tsk && req != LHREQ_BREAK) - return -EPERM; } switch (req) { -- cgit v1.2.3 From df60aeef4f4fe0645d9a195a7689005520422de5 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 12 Jun 2009 22:27:09 -0600 Subject: lguest: use eventfds for device notification Currently, when a Guest wants to perform I/O it calls LHCALL_NOTIFY with an address: the main Launcher process returns with this address, and figures out what device to run. A far nicer model is to let processes bind an eventfd to an address: if we find one, we simply signal the eventfd. Signed-off-by: Rusty Russell Cc: Davide Libenzi --- drivers/lguest/Kconfig | 2 +- drivers/lguest/core.c | 8 ++-- drivers/lguest/lg.h | 13 ++++++ drivers/lguest/lguest_user.c | 98 +++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 115 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig index 8f63845db830..0aaa0597a622 100644 --- a/drivers/lguest/Kconfig +++ b/drivers/lguest/Kconfig @@ -1,6 +1,6 @@ config LGUEST tristate "Linux hypervisor example code" - depends on X86_32 && EXPERIMENTAL && FUTEX + depends on X86_32 && EXPERIMENTAL && EVENTFD select HVC_DRIVER ---help--- This is a very simple module which allows you to run diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index d0298dc45d97..508569c9571a 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -198,9 +198,11 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) /* It's possible the Guest did a NOTIFY hypercall to the * Launcher, in which case we return from the read() now. */ if (cpu->pending_notify) { - if (put_user(cpu->pending_notify, user)) - return -EFAULT; - return sizeof(cpu->pending_notify); + if (!send_notify_to_eventfd(cpu)) { + if (put_user(cpu->pending_notify, user)) + return -EFAULT; + return sizeof(cpu->pending_notify); + } } /* Check for signals */ diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h index 040cb70780e7..32fefdc6ad3e 100644 --- a/drivers/lguest/lg.h +++ b/drivers/lguest/lg.h @@ -82,6 +82,16 @@ struct lg_cpu { struct lg_cpu_arch arch; }; +struct lg_eventfd { + unsigned long addr; + struct file *event; +}; + +struct lg_eventfd_map { + unsigned int num; + struct lg_eventfd map[]; +}; + /* The private info the thread maintains about the guest. */ struct lguest { @@ -102,6 +112,8 @@ struct lguest unsigned int stack_pages; u32 tsc_khz; + struct lg_eventfd_map *eventfds; + /* Dead? */ const char *dead; }; @@ -154,6 +166,7 @@ void setup_default_idt_entries(struct lguest_ro_state *state, void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt, const unsigned long *def); void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta); +bool send_notify_to_eventfd(struct lg_cpu *cpu); void init_clockdev(struct lg_cpu *cpu); bool check_syscall_vector(struct lguest *lg); int init_interrupts(void); diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index 1982b45bd935..f6bf255f1837 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c @@ -7,6 +7,8 @@ #include #include #include +#include +#include #include "lg.h" /*L:055 When something happens, the Waker process needs a way to stop the @@ -35,6 +37,81 @@ static int break_guest_out(struct lg_cpu *cpu, const unsigned long __user*input) } } +bool send_notify_to_eventfd(struct lg_cpu *cpu) +{ + unsigned int i; + struct lg_eventfd_map *map; + + /* lg->eventfds is RCU-protected */ + rcu_read_lock(); + map = rcu_dereference(cpu->lg->eventfds); + for (i = 0; i < map->num; i++) { + if (map->map[i].addr == cpu->pending_notify) { + eventfd_signal(map->map[i].event, 1); + cpu->pending_notify = 0; + break; + } + } + rcu_read_unlock(); + return cpu->pending_notify == 0; +} + +static int add_eventfd(struct lguest *lg, unsigned long addr, int fd) +{ + struct lg_eventfd_map *new, *old = lg->eventfds; + + if (!addr) + return -EINVAL; + + /* Replace the old array with the new one, carefully: others can + * be accessing it at the same time */ + new = kmalloc(sizeof(*new) + sizeof(new->map[0]) * (old->num + 1), + GFP_KERNEL); + if (!new) + return -ENOMEM; + + /* First make identical copy. */ + memcpy(new->map, old->map, sizeof(old->map[0]) * old->num); + new->num = old->num; + + /* Now append new entry. */ + new->map[new->num].addr = addr; + new->map[new->num].event = eventfd_fget(fd); + if (IS_ERR(new->map[new->num].event)) { + kfree(new); + return PTR_ERR(new->map[new->num].event); + } + new->num++; + + /* Now put new one in place. */ + rcu_assign_pointer(lg->eventfds, new); + + /* We're not in a big hurry. Wait until noone's looking at old + * version, then delete it. */ + synchronize_rcu(); + kfree(old); + + return 0; +} + +static int attach_eventfd(struct lguest *lg, const unsigned long __user *input) +{ + unsigned long addr, fd; + int err; + + if (get_user(addr, input) != 0) + return -EFAULT; + input++; + if (get_user(fd, input) != 0) + return -EFAULT; + + mutex_lock(&lguest_lock); + err = add_eventfd(lg, addr, fd); + mutex_unlock(&lguest_lock); + + return 0; +} + /*L:050 Sending an interrupt is done by writing LHREQ_IRQ and an interrupt * number to /dev/lguest. */ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input) @@ -184,6 +261,13 @@ static int initialize(struct file *file, const unsigned long __user *input) goto unlock; } + lg->eventfds = kmalloc(sizeof(*lg->eventfds), GFP_KERNEL); + if (!lg->eventfds) { + err = -ENOMEM; + goto free_lg; + } + lg->eventfds->num = 0; + /* Populate the easy fields of our "struct lguest" */ lg->mem_base = (void __user *)args[0]; lg->pfn_limit = args[1]; @@ -191,7 +275,7 @@ static int initialize(struct file *file, const unsigned long __user *input) /* This is the first cpu (cpu 0) and it will start booting at args[2] */ err = lg_cpu_start(&lg->cpus[0], 0, args[2]); if (err) - goto release_guest; + goto free_eventfds; /* Initialize the Guest's shadow page tables, using the toplevel * address the Launcher gave us. This allocates memory, so can fail. */ @@ -210,7 +294,9 @@ static int initialize(struct file *file, const unsigned long __user *input) free_regs: /* FIXME: This should be in free_vcpu */ free_page(lg->cpus[0].regs_page); -release_guest: +free_eventfds: + kfree(lg->eventfds); +free_lg: kfree(lg); unlock: mutex_unlock(&lguest_lock); @@ -260,6 +346,8 @@ static ssize_t write(struct file *file, const char __user *in, return user_send_irq(cpu, input); case LHREQ_BREAK: return break_guest_out(cpu, input); + case LHREQ_EVENTFD: + return attach_eventfd(lg, input); default: return -EINVAL; } @@ -297,6 +385,12 @@ static int close(struct inode *inode, struct file *file) * the Launcher's memory management structure. */ mmput(lg->cpus[i].mm); } + + /* Release any eventfds they registered. */ + for (i = 0; i < lg->eventfds->num; i++) + fput(lg->eventfds->map[i].event); + kfree(lg->eventfds); + /* If lg->dead doesn't contain an error code it will be NULL or a * kmalloc()ed string, either of which is ok to hand to kfree(). */ if (!IS_ERR(lg->dead)) -- cgit v1.2.3 From 5dac051bc6030963181b69faddd9e0ad04f85fa8 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 12 Jun 2009 22:27:10 -0600 Subject: lguest: remove obsolete LHREQ_BREAK call We no longer need an efficient mechanism to force the Guest back into host userspace, as each device is serviced without bothering the main Guest process (aka. the Launcher). Signed-off-by: Rusty Russell --- drivers/lguest/core.c | 11 +++-------- drivers/lguest/lg.h | 4 +--- drivers/lguest/lguest_user.c | 31 ------------------------------- 3 files changed, 4 insertions(+), 42 deletions(-) (limited to 'drivers') diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index 508569c9571a..a6974e9b8ebf 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -209,10 +209,6 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) if (signal_pending(current)) return -ERESTARTSYS; - /* If Waker set break_out, return to Launcher. */ - if (cpu->break_out) - return -EAGAIN; - /* Check if there are any interrupts which can be delivered now: * if so, this sets up the hander to be executed when we next * run the Guest. */ @@ -231,13 +227,12 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) break; /* If the Guest asked to be stopped, we sleep. The Guest's - * clock timer or LHREQ_BREAK from the Waker will wake us. */ + * clock timer will wake us. */ if (cpu->halted) { set_current_state(TASK_INTERRUPTIBLE); - /* Just before we sleep, make sure nothing snuck in + /* Just before we sleep, make sure no interrupt snuck in * which we should be doing. */ - if (interrupt_pending(cpu, &more) < LGUEST_IRQS - || cpu->break_out) + if (interrupt_pending(cpu, &more) < LGUEST_IRQS) set_current_state(TASK_RUNNING); else schedule(); diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h index 32fefdc6ad3e..d4e8979735cb 100644 --- a/drivers/lguest/lg.h +++ b/drivers/lguest/lg.h @@ -71,9 +71,7 @@ struct lg_cpu { /* Virtual clock device */ struct hrtimer hrt; - /* Do we need to stop what we're doing and return to userspace? */ - int break_out; - wait_queue_head_t break_wq; + /* Did the Guest tell us to halt? */ int halted; /* Pending virtual interrupts */ diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index f6bf255f1837..32e297121058 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c @@ -11,32 +11,6 @@ #include #include "lg.h" -/*L:055 When something happens, the Waker process needs a way to stop the - * kernel running the Guest and return to the Launcher. So the Waker writes - * LHREQ_BREAK and the value "1" to /dev/lguest to do this. Once the Launcher - * has done whatever needs attention, it writes LHREQ_BREAK and "0" to release - * the Waker. */ -static int break_guest_out(struct lg_cpu *cpu, const unsigned long __user*input) -{ - unsigned long on; - - /* Fetch whether they're turning break on or off. */ - if (get_user(on, input) != 0) - return -EFAULT; - - if (on) { - cpu->break_out = 1; - if (!wake_up_process(cpu->tsk)) - kick_process(cpu->tsk); - /* Wait for them to reset it */ - return wait_event_interruptible(cpu->break_wq, !cpu->break_out); - } else { - cpu->break_out = 0; - wake_up(&cpu->break_wq); - return 0; - } -} - bool send_notify_to_eventfd(struct lg_cpu *cpu) { unsigned int i; @@ -202,9 +176,6 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) * address. */ lguest_arch_setup_regs(cpu, start_ip); - /* Initialize the queue for the Waker to wait on */ - init_waitqueue_head(&cpu->break_wq); - /* We keep a pointer to the Launcher task (ie. current task) for when * other Guests want to wake this one (eg. console input). */ cpu->tsk = current; @@ -344,8 +315,6 @@ static ssize_t write(struct file *file, const char __user *in, return initialize(file, input); case LHREQ_IRQ: return user_send_irq(cpu, input); - case LHREQ_BREAK: - return break_guest_out(cpu, input); case LHREQ_EVENTFD: return attach_eventfd(lg, input); default: -- cgit v1.2.3 From 0b1b51f50ed7b4225d0631140de8873fb235a6c0 Mon Sep 17 00:00:00 2001 From: Paulius Zaleckas Date: Mon, 6 Apr 2009 16:10:54 +0300 Subject: trivial: mtd: fix Kconfig comment about 'armflash' The real 'armflash' map driver is selected by CONFIG_MTD_ARM_INTEGRATOR Signed-off-by: Paulius Zaleckas Signed-off-by: Jiri Kosina --- drivers/mtd/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index 7d04fb9ddcaa..b8e35a0b4d72 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig @@ -154,7 +154,8 @@ config MTD_AFS_PARTS You will still need the parsing functions to be called by the driver for your particular device. It won't happen automatically. The - 'armflash' map driver (CONFIG_MTD_ARMFLASH) does this, for example. + 'armflash' map driver (CONFIG_MTD_ARM_INTEGRATOR) does this, for + example. config MTD_OF_PARTS tristate "Flash partition map based on OF description" -- cgit v1.2.3 From 4b512d26f425be1c779c8319249b42ce3c3424d2 Mon Sep 17 00:00:00 2001 From: Thadeu Lima de Souza Cascardo Date: Tue, 14 Apr 2009 23:14:10 -0300 Subject: trivial: typo (en|dis|avail|remove)bale -> (en|dis|avail|remove)able Signed-off-by: Thadeu Lima de Souza Cascardo Signed-off-by: Jiri Kosina --- drivers/char/amiserial.c | 2 +- drivers/media/video/hdpvr/hdpvr-video.c | 2 +- drivers/net/b44.h | 2 +- drivers/net/e100.c | 2 +- drivers/net/niu.h | 4 ++-- drivers/scsi/megaraid.h | 2 +- drivers/scsi/megaraid/mbox_defs.h | 2 +- drivers/staging/rt2860/common/mlme.c | 2 +- drivers/staging/rt2870/common/mlme.c | 2 +- drivers/staging/rt3070/common/mlme.c | 2 +- drivers/watchdog/iop_wdt.c | 2 +- 11 files changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c index fd3ebd1be570..72429b6b2fa8 100644 --- a/drivers/char/amiserial.c +++ b/drivers/char/amiserial.c @@ -779,7 +779,7 @@ static void change_speed(struct async_struct *info, info->IER |= UART_IER_MSI; } /* TBD: - * Does clearing IER_MSI imply that we should disbale the VBL interrupt ? + * Does clearing IER_MSI imply that we should disable the VBL interrupt ? */ /* diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c index 3e6ffee8dfed..ccd47f57f42c 100644 --- a/drivers/media/video/hdpvr/hdpvr-video.c +++ b/drivers/media/video/hdpvr/hdpvr-video.c @@ -181,7 +181,7 @@ static int hdpvr_submit_buffers(struct hdpvr_device *dev) buff_list); if (buf->status != BUFSTAT_AVAILABLE) { v4l2_err(&dev->v4l2_dev, - "buffer not marked as availbale\n"); + "buffer not marked as available\n"); ret = -EFAULT; goto err; } diff --git a/drivers/net/b44.h b/drivers/net/b44.h index e678498de6db..d24158e7f309 100644 --- a/drivers/net/b44.h +++ b/drivers/net/b44.h @@ -97,7 +97,7 @@ #define B44_DMARX_STAT 0x021CUL /* DMA RX Current Active Desc. + Status */ #define DMARX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */ #define DMARX_STAT_SMASK 0x0000f000 /* State Mask */ -#define DMARX_STAT_SDISABLED 0x00000000 /* State Disbaled */ +#define DMARX_STAT_SDISABLED 0x00000000 /* State Disabled */ #define DMARX_STAT_SACTIVE 0x00001000 /* State Active */ #define DMARX_STAT_SIDLE 0x00002000 /* State Idle Wait */ #define DMARX_STAT_SSTOPPED 0x00003000 /* State Stopped */ diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 0f9ee1348552..af5364f49550 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c @@ -2785,7 +2785,7 @@ static int e100_resume(struct pci_dev *pdev) /* ack any pending wake events, disable PME */ pci_enable_wake(pdev, 0, 0); - /* disbale reverse auto-negotiation */ + /* disable reverse auto-negotiation */ if (nic->phy == phy_82552_v) { u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, E100_82552_SMARTSPEED); diff --git a/drivers/net/niu.h b/drivers/net/niu.h index 8754e44cadae..3bd0b5933d59 100644 --- a/drivers/net/niu.h +++ b/drivers/net/niu.h @@ -3242,8 +3242,8 @@ struct niu { struct niu_parent *parent; u32 flags; -#define NIU_FLAGS_HOTPLUG_PHY_PRESENT 0x02000000 /* Removebale PHY detected*/ -#define NIU_FLAGS_HOTPLUG_PHY 0x01000000 /* Removebale PHY */ +#define NIU_FLAGS_HOTPLUG_PHY_PRESENT 0x02000000 /* Removeable PHY detected*/ +#define NIU_FLAGS_HOTPLUG_PHY 0x01000000 /* Removeable PHY */ #define NIU_FLAGS_VPD_VALID 0x00800000 /* VPD has valid version */ #define NIU_FLAGS_MSIX 0x00400000 /* MSI-X in use */ #define NIU_FLAGS_MCAST 0x00200000 /* multicast filter enabled */ diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h index 795201fa0b48..512c2cc1a33f 100644 --- a/drivers/scsi/megaraid.h +++ b/drivers/scsi/megaraid.h @@ -469,7 +469,7 @@ typedef struct { u8 type; /* Type of the device */ u8 cur_status; /* current status of the device */ u8 tag_depth; /* Level of tagging */ - u8 sync_neg; /* sync negotiation - ENABLE or DISBALE */ + u8 sync_neg; /* sync negotiation - ENABLE or DISABLE */ u32 size; /* configurable size in terms of 512 byte blocks */ }__attribute__ ((packed)) phys_drv; diff --git a/drivers/scsi/megaraid/mbox_defs.h b/drivers/scsi/megaraid/mbox_defs.h index 170399ef06f4..b25b74764ec3 100644 --- a/drivers/scsi/megaraid/mbox_defs.h +++ b/drivers/scsi/megaraid/mbox_defs.h @@ -686,7 +686,7 @@ typedef struct { * @type : Type of the device * @cur_status : current status of the device * @tag_depth : Level of tagging - * @sync_neg : sync negotiation - ENABLE or DISBALE + * @sync_neg : sync negotiation - ENABLE or DISABLE * @size : configurable size in terms of 512 byte */ typedef struct { diff --git a/drivers/staging/rt2860/common/mlme.c b/drivers/staging/rt2860/common/mlme.c index c00f9ab9c46c..2edf2999f5c8 100644 --- a/drivers/staging/rt2860/common/mlme.c +++ b/drivers/staging/rt2860/common/mlme.c @@ -5664,7 +5664,7 @@ VOID AsicUpdateProtect( #if 0 MacReg |= (pAd->CommonCfg.RtsThreshold << 8); #else - // If the user want disable RtsThreshold and enbale Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096 + // If the user want disable RtsThreshold and enable Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096 if (( #ifdef DOT11_N_SUPPORT (pAd->CommonCfg.BACapability.field.AmsduEnable) || diff --git a/drivers/staging/rt2870/common/mlme.c b/drivers/staging/rt2870/common/mlme.c index 8a82cee8bf26..a26bc033337d 100644 --- a/drivers/staging/rt2870/common/mlme.c +++ b/drivers/staging/rt2870/common/mlme.c @@ -5561,7 +5561,7 @@ VOID AsicUpdateProtect( #if 0 MacReg |= (pAd->CommonCfg.RtsThreshold << 8); #else - // If the user want disable RtsThreshold and enbale Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096 + // If the user want disable RtsThreshold and enable Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096 if (( #ifdef DOT11_N_SUPPORT (pAd->CommonCfg.BACapability.field.AmsduEnable) || diff --git a/drivers/staging/rt3070/common/mlme.c b/drivers/staging/rt3070/common/mlme.c index 0ffbfa36699e..0189bab013cf 100644 --- a/drivers/staging/rt3070/common/mlme.c +++ b/drivers/staging/rt3070/common/mlme.c @@ -5575,7 +5575,7 @@ VOID AsicUpdateProtect( RTMP_IO_READ32(pAd, TX_RTS_CFG, &MacReg); MacReg &= 0xFF0000FF; - // If the user want disable RtsThreshold and enbale Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096 + // If the user want disable RtsThreshold and enable Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096 if (( #ifdef DOT11_N_SUPPORT (pAd->CommonCfg.BACapability.field.AmsduEnable) || diff --git a/drivers/watchdog/iop_wdt.c b/drivers/watchdog/iop_wdt.c index 96eb2cbe5874..0c9059676690 100644 --- a/drivers/watchdog/iop_wdt.c +++ b/drivers/watchdog/iop_wdt.c @@ -192,7 +192,7 @@ static int iop_wdt_release(struct inode *inode, struct file *file) if (test_bit(WDT_ENABLED, &wdt_status)) state = wdt_disable(); - /* if the timer is not disbaled reload and notify that we are still + /* if the timer is not disabled reload and notify that we are still * going down */ if (state != 0) { -- cgit v1.2.3 From 6d60f9dfc8d437e914d46fa355c50c695cad24e7 Mon Sep 17 00:00:00 2001 From: Martin Olsson Date: Tue, 7 Apr 2009 10:30:24 +0200 Subject: trivial: Fix paramater/parameter typo in dmesg and source comments Signed-off-by: Martin Olsson Signed-off-by: Jiri Kosina --- drivers/net/wireless/rndis_wlan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index bebf735cd4bd..ff0042ffe3e9 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -584,7 +584,7 @@ static int rndis_set_config_parameter(struct usbnet *dev, char *param, ret = rndis_set_oid(dev, OID_GEN_RNDIS_CONFIG_PARAMETER, infobuf, info_len); if (ret != 0) - devdbg(dev, "setting rndis config paramater failed, %d.", ret); + devdbg(dev, "setting rndis config parameter failed, %d.", ret); kfree(infobuf); return ret; -- cgit v1.2.3 From 98a1708de1bfa5fe1c490febba850d6043d3c7fa Mon Sep 17 00:00:00 2001 From: Martin Olsson Date: Wed, 22 Apr 2009 18:21:29 +0200 Subject: trivial: fix typos s/paramter/parameter/ and s/excute/execute/ in documentation and source comments. Signed-off-by: Martin Olsson Signed-off-by: Jiri Kosina --- drivers/ata/libata-acpi.c | 4 ++-- drivers/ata/libata-eh.c | 2 +- drivers/edac/e752x_edac.c | 2 +- drivers/isdn/divert/isdn_divert.c | 2 +- drivers/net/appletalk/ltpc.c | 2 +- drivers/net/e1000e/e1000.h | 2 +- drivers/net/ehea/ehea.h | 2 +- drivers/net/igbvf/igbvf.h | 2 +- drivers/net/mlx4/en_netdev.c | 2 +- drivers/net/qlge/qlge_mpi.c | 6 +++--- drivers/net/skfp/h/smt.h | 2 +- drivers/net/tokenring/3c359.c | 2 +- drivers/net/tokenring/lanstreamer.c | 2 +- drivers/net/tokenring/olympic.c | 2 +- drivers/net/ucc_geth_ethtool.c | 2 +- drivers/net/wireless/rt2x00/rt2x00lib.h | 2 +- drivers/net/wireless/wavelan_cs.c | 2 +- drivers/scsi/lpfc/lpfc_scsi.c | 4 ++-- 18 files changed, 22 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index 6273d98d00eb..ac176da1f94e 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c @@ -748,9 +748,9 @@ static int ata_acpi_run_tf(struct ata_device *dev, /** * ata_acpi_exec_tfs - get then write drive taskfile settings * @dev: target ATA device - * @nr_executed: out paramter for the number of executed commands + * @nr_executed: out parameter for the number of executed commands * - * Evaluate _GTF and excute returned taskfiles. + * Evaluate _GTF and execute returned taskfiles. * * LOCKING: * EH context. diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 94919ad03df1..fa22f94ca415 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2864,7 +2864,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link, /** * ata_set_mode - Program timings and issue SET FEATURES - XFER * @link: link on which timings will be programmed - * @r_failed_dev: out paramter for failed device + * @r_failed_dev: out parameter for failed device * * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If * ata_set_mode() fails, pointer to the failing device is diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c index facfdb1fa71c..d205d493a68a 100644 --- a/drivers/edac/e752x_edac.c +++ b/drivers/edac/e752x_edac.c @@ -1084,7 +1084,7 @@ static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt) struct pci_dev *dev = pvt->dev_d0f1; int enable = 1; - /* Allow module paramter override, else see if CPU supports parity */ + /* Allow module parameter override, else see if CPU supports parity */ if (sysbus_parity != -1) { enable = sysbus_parity; } else if (cpu_id[0] && diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c index 7d97d54588d9..77e9fdda0597 100644 --- a/drivers/isdn/divert/isdn_divert.c +++ b/drivers/isdn/divert/isdn_divert.c @@ -183,7 +183,7 @@ int cf_command(int drvid, int mode, (mode != 1) ? "" : " 0 ", (mode != 1) ? "" : fwd_nr); - retval = divert_if.ll_cmd(&cs->ics); /* excute command */ + retval = divert_if.ll_cmd(&cs->ics); /* execute command */ if (!retval) { cs->prev = NULL; diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c index 78cc71469136..b642647170be 100644 --- a/drivers/net/appletalk/ltpc.c +++ b/drivers/net/appletalk/ltpc.c @@ -1220,7 +1220,7 @@ static int __init ltpc_setup(char *str) if (ints[0] > 2) { dma = ints[3]; } - /* ignore any other paramters */ + /* ignore any other parameters */ } return 1; } diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index f37360aa12a8..44f0bf23dafc 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h @@ -62,7 +62,7 @@ struct e1000_info; e_printk(KERN_NOTICE, adapter, format, ## arg) -/* Interrupt modes, as used by the IntMode paramter */ +/* Interrupt modes, as used by the IntMode parameter */ #define E1000E_INT_MODE_LEGACY 0 #define E1000E_INT_MODE_MSI 1 #define E1000E_INT_MODE_MSIX 2 diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 16a41389575a..78952f8324e2 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h @@ -268,7 +268,7 @@ struct ehea_qp_init_attr { }; /* - * Event Queue attributes, passed as paramter + * Event Queue attributes, passed as parameter */ struct ehea_eq_attr { u32 type; diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h index 4bff35e46871..d488733893a6 100644 --- a/drivers/net/igbvf/igbvf.h +++ b/drivers/net/igbvf/igbvf.h @@ -45,7 +45,7 @@ struct igbvf_adapter; /* Interrupt defines */ #define IGBVF_START_ITR 648 /* ~6000 ints/sec */ -/* Interrupt modes, as used by the IntMode paramter */ +/* Interrupt modes, as used by the IntMode parameter */ #define IGBVF_INT_MODE_LEGACY 0 #define IGBVF_INT_MODE_MSI 1 #define IGBVF_INT_MODE_MSIX 2 diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index 7bcc49de1637..e8eeef0c9c9a 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c @@ -371,7 +371,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) int i; /* If we haven't received a specific coalescing setting - * (module param), we set the moderation paramters as follows: + * (module param), we set the moderation parameters as follows: * - moder_cnt is set to the number of mtu sized packets to * satisfy our coelsing target. * - moder_time is set to a fixed value. diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c index 9f81b797f10b..ac9493f6c1a1 100644 --- a/drivers/net/qlge/qlge_mpi.c +++ b/drivers/net/qlge/qlge_mpi.c @@ -141,7 +141,7 @@ end: /* We are being asked by firmware to accept * a change to the port. This is only * a change to max frame sizes (Tx/Rx), pause - * paramters, or loopback mode. We wake up a worker + * parameters, or loopback mode. We wake up a worker * to handler processing this since a mailbox command * will need to be sent to ACK the request. */ @@ -371,7 +371,7 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp) /* We are being asked by firmware to accept * a change to the port. This is only * a change to max frame sizes (Tx/Rx), pause - * paramters, or loopback mode. + * parameters, or loopback mode. */ case AEN_IDC_REQ: status = ql_idc_req_aen(qdev); @@ -380,7 +380,7 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp) /* Process and inbound IDC event. * This will happen when we're trying to * change tx/rx max frame size, change pause - * paramters or loopback mode. + * parameters or loopback mode. */ case AEN_IDC_CMPLT: case AEN_IDC_EXT: diff --git a/drivers/net/skfp/h/smt.h b/drivers/net/skfp/h/smt.h index 1ff589988d10..2976757a36fb 100644 --- a/drivers/net/skfp/h/smt.h +++ b/drivers/net/skfp/h/smt.h @@ -413,7 +413,7 @@ struct smt_p_reason { #define SMT_RDF_SUCCESS 0x00000003 /* success (PMF) */ #define SMT_RDF_BADSET 0x00000004 /* bad set count (PMF) */ #define SMT_RDF_ILLEGAL 0x00000005 /* read only (PMF) */ -#define SMT_RDF_NOPARAM 0x6 /* paramter not supported (PMF) */ +#define SMT_RDF_NOPARAM 0x6 /* parameter not supported (PMF) */ #define SMT_RDF_RANGE 0x8 /* out of range */ #define SMT_RDF_AUTHOR 0x9 /* not autohorized */ #define SMT_RDF_LENGTH 0x0a /* length error */ diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c index 534c0f38483c..0337b9d673f4 100644 --- a/drivers/net/tokenring/3c359.c +++ b/drivers/net/tokenring/3c359.c @@ -79,7 +79,7 @@ MODULE_AUTHOR("Mike Phillips ") ; MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver \n") ; MODULE_FIRMWARE(FW_NAME); -/* Module paramters */ +/* Module parameters */ /* Ring Speed 0,4,16 * 0 = Autosense diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c index 2e70ee8f1459..46a2cc92d979 100644 --- a/drivers/net/tokenring/lanstreamer.c +++ b/drivers/net/tokenring/lanstreamer.c @@ -169,7 +169,7 @@ static char *open_min_error[] = { "Monitor Contention failer for RPL", "FDX Protocol Error" }; -/* Module paramters */ +/* Module parameters */ /* Ring Speed 0,4,16 * 0 = Autosense diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c index d068a9d36883..2d819fc85589 100644 --- a/drivers/net/tokenring/olympic.c +++ b/drivers/net/tokenring/olympic.c @@ -132,7 +132,7 @@ static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost", "Reserved", "Reserved", "No Monitor Detected for RPL", "Monitor Contention failer for RPL", "FDX Protocol Error"}; -/* Module paramters */ +/* Module parameters */ MODULE_AUTHOR("Mike Phillips ") ; MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ; diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c index 6fcb500257bc..61fe80dda3e3 100644 --- a/drivers/net/ucc_geth_ethtool.c +++ b/drivers/net/ucc_geth_ethtool.c @@ -7,7 +7,7 @@ * * Limitation: * Can only get/set setttings of the first queue. - * Need to re-open the interface manually after changing some paramters. + * Need to re-open the interface manually after changing some parameters. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h index a631613177d0..d83e3794d340 100644 --- a/drivers/net/wireless/rt2x00/rt2x00lib.h +++ b/drivers/net/wireless/rt2x00/rt2x00lib.h @@ -235,7 +235,7 @@ void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna); * @rt2x00dev: Pointer to &struct rt2x00_dev. * * Initialize work structure and all link tuning related - * paramters. This will not start the link tuning process itself. + * parameters. This will not start the link tuning process itself. */ void rt2x00link_register(struct rt2x00_dev *rt2x00dev); diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c index e55b33961aeb..fa2821be44c2 100644 --- a/drivers/net/wireless/wavelan_cs.c +++ b/drivers/net/wireless/wavelan_cs.c @@ -138,7 +138,7 @@ psa_read(struct net_device * dev, /*------------------------------------------------------------------*/ /* - * Write the Paramter Storage Area to the WaveLAN card's memory + * Write the Parameter Storage Area to the WaveLAN card's memory */ static void psa_write(struct net_device * dev, diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 8032c5adb6a9..679adfff0bf8 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -827,8 +827,8 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc) * @reftag: out: ref tag (reference tag) * * Description: - * Extract DIF paramters from the command if possible. Otherwise, - * use default paratmers. + * Extract DIF parameters from the command if possible. Otherwise, + * use default parameters. * **/ static inline void -- cgit v1.2.3 From 19af5cdb7c79ff5ec96a99893ffb7f894f4a3dc1 Mon Sep 17 00:00:00 2001 From: Martin Olsson Date: Thu, 23 Apr 2009 11:37:37 +0200 Subject: trivial: fix typo milisecond/millisecond for documentation and source comments. Signed-off-by: Martin Olsson Signed-off-by: Jiri Kosina --- drivers/ide/ide-atapi.c | 2 +- drivers/isdn/mISDN/dsp_core.c | 2 +- drivers/net/ipg.h | 2 +- drivers/s390/scsi/zfcp_fc.c | 2 +- drivers/scsi/dpt/osd_util.h | 2 +- drivers/usb/serial/io_ti.c | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index 757e5956b132..ae1cae38a078 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c @@ -577,7 +577,7 @@ static ide_startstop_t ide_transfer_pc(ide_drive_t *drive) /* * If necessary schedule the packet transfer to occur 'timeout' - * miliseconds later in ide_delayed_transfer_pc() after the + * milliseconds later in ide_delayed_transfer_pc() after the * device says it's ready for a packet. */ if (drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) { diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c index 3083338716b2..47dbfe298b43 100644 --- a/drivers/isdn/mISDN/dsp_core.c +++ b/drivers/isdn/mISDN/dsp_core.c @@ -502,7 +502,7 @@ tone_off: break; } dsp->cmx_delay = (*((int *)data)) << 3; - /* miliseconds to samples */ + /* milliseconds to samples */ if (dsp->cmx_delay >= (CMX_BUFF_HALF>>1)) /* clip to half of maximum usable buffer (half of half buffer) */ diff --git a/drivers/net/ipg.h b/drivers/net/ipg.h index dd9318f19497..dfc2541bb556 100644 --- a/drivers/net/ipg.h +++ b/drivers/net/ipg.h @@ -514,7 +514,7 @@ enum ipg_regs { #define IPG_DMALIST_ALIGN_PAD 0x07 #define IPG_MULTICAST_HASHTABLE_SIZE 0x40 -/* Number of miliseconds to wait after issuing a software reset. +/* Number of milliseconds to wait after issuing a software reset. * 0x05 <= IPG_AC_RESETWAIT to account for proper 10Mbps operation. */ #define IPG_AC_RESETWAIT 0x05 diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 19ae0842047c..18fd975412d3 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -116,7 +116,7 @@ static void zfcp_wka_port_put(struct zfcp_wka_port *wka_port) { if (atomic_dec_return(&wka_port->refcount) != 0) return; - /* wait 10 miliseconds, other reqs might pop in */ + /* wait 10 milliseconds, other reqs might pop in */ schedule_delayed_work(&wka_port->work, HZ / 100); } diff --git a/drivers/scsi/dpt/osd_util.h b/drivers/scsi/dpt/osd_util.h index 4b56c0436ba2..b2613c2eaac7 100644 --- a/drivers/scsi/dpt/osd_util.h +++ b/drivers/scsi/dpt/osd_util.h @@ -342,7 +342,7 @@ uLONG osdGetThreadID(void); /* wakes up the specifed thread */ void osdWakeThread(uLONG); -/* osd sleep for x miliseconds */ +/* osd sleep for x milliseconds */ void osdSleep(uLONG); #define DPT_THREAD_PRIORITY_LOWEST 0x00 diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index eabf20eeb370..db964db42d3c 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c @@ -102,7 +102,7 @@ struct edgeport_port { __u8 shadow_mcr; __u8 shadow_lsr; __u8 lsr_mask; - __u32 ump_read_timeout; /* Number of miliseconds the UMP will + __u32 ump_read_timeout; /* Number of milliseconds the UMP will wait without data before completing a read short */ int baud_rate; -- cgit v1.2.3 From 19f594600110377ec4037fdf7fb93a25ec516212 Mon Sep 17 00:00:00 2001 From: Matt LaPlante Date: Mon, 27 Apr 2009 15:06:31 +0200 Subject: trivial: Miscellaneous documentation typo fixes Fix various typos in documentation txts. Signed-off-by: Matt LaPlante Signed-off-by: Jiri Kosina --- drivers/message/fusion/lsi/mpi_history.txt | 6 +++--- drivers/staging/go7007/go7007.txt | 4 ++-- drivers/staging/panel/lcd-panel-cgram.txt | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/message/fusion/lsi/mpi_history.txt b/drivers/message/fusion/lsi/mpi_history.txt index 693e4b511354..fa9249b4971a 100644 --- a/drivers/message/fusion/lsi/mpi_history.txt +++ b/drivers/message/fusion/lsi/mpi_history.txt @@ -130,7 +130,7 @@ mpi_ioc.h * 08-08-01 01.02.01 Original release for v1.2 work. * New format for FWVersion and ProductId in * MSG_IOC_FACTS_REPLY and MPI_FW_HEADER. - * 08-31-01 01.02.02 Addded event MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE and + * 08-31-01 01.02.02 Added event MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE and * related structure and defines. * Added event MPI_EVENT_ON_BUS_TIMER_EXPIRED. * Added MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE. @@ -190,7 +190,7 @@ mpi_ioc.h * 10-11-06 01.05.12 Added MPI_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED. * Added MaxInitiators field to PortFacts reply. * Added SAS Device Status Change ReasonCode for - * asynchronous notificaiton. + * asynchronous notification. * Added MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE and event * data structure. * Added new ImageType values for FWDownload and FWUpload @@ -623,7 +623,7 @@ mpi_fc.h * 11-02-00 01.01.01 Original release for post 1.0 work * 12-04-00 01.01.02 Added messages for Common Transport Send and * Primitive Send. - * 01-09-01 01.01.03 Modifed some of the new flags to have an MPI prefix + * 01-09-01 01.01.03 Modified some of the new flags to have an MPI prefix * and modified the FcPrimitiveSend flags. * 01-25-01 01.01.04 Move InitiatorIndex in LinkServiceRsp reply to a larger * field. diff --git a/drivers/staging/go7007/go7007.txt b/drivers/staging/go7007/go7007.txt index 9f6772bc68c2..1c2907c1dc81 100644 --- a/drivers/staging/go7007/go7007.txt +++ b/drivers/staging/go7007/go7007.txt @@ -2,7 +2,7 @@ This is a driver for the WIS GO7007SB multi-format video encoder. Pete Eberlein -The driver was orignally released under the GPL and is currently hosted at: +The driver was originally released under the GPL and is currently hosted at: http://nikosapi.org/wiki/index.php/WIS_Go7007_Linux_driver The go7007 firmware can be acquired from the package on the site above. @@ -24,7 +24,7 @@ These should be used instead of the non-standard GO7007 ioctls described below. -The README files from the orignal package appear below: +The README files from the original package appears below: --------------------------------------------------------------------------- WIS GO7007SB Public Linux Driver diff --git a/drivers/staging/panel/lcd-panel-cgram.txt b/drivers/staging/panel/lcd-panel-cgram.txt index f9ceef4322a3..7f82c905763d 100644 --- a/drivers/staging/panel/lcd-panel-cgram.txt +++ b/drivers/staging/panel/lcd-panel-cgram.txt @@ -3,7 +3,7 @@ characters 0 to 7. The escape code to define a new character is '\e[LG' followed by one digit from 0 to 7, representing the character number, and up to 8 couples of hex digits terminated by a semi-colon (';'). Each couple of digits represents a line, with 1-bits for each -illuminated pixel with LSB on the right. Lines are numberred from the +illuminated pixel with LSB on the right. Lines are numbered from the top of the character to the bottom. On a 5x7 matrix, only the 5 lower bits of the 7 first bytes are used for each character. If the string is incomplete, only complete lines will be redefined. Here are some -- cgit v1.2.3 From 0fa1b0a144ee3e57f63ae25a7c5402f57232853d Mon Sep 17 00:00:00 2001 From: Alex Chiang Date: Thu, 14 May 2009 23:15:22 +0200 Subject: trivial: fix grammo in bus_for_each_dev() kerneldoc Signed-off-by: Alex Chiang Acked-by: Randy Dunlap Signed-off-by: Jiri Kosina --- drivers/base/bus.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/base/bus.c b/drivers/base/bus.c index c6599618523e..4b04a15146d7 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -279,7 +279,7 @@ static struct device *next_device(struct klist_iter *i) * * NOTE: The device that returns a non-zero value is not retained * in any way, nor is its refcount incremented. If the caller needs - * to retain this data, it should do, and increment the reference + * to retain this data, it should do so, and increment the reference * count in the supplied callback. */ int bus_for_each_dev(struct bus_type *bus, struct device *start, -- cgit v1.2.3 From 492d0f95e6927d60be6234c4b0dd500216e87e18 Mon Sep 17 00:00:00 2001 From: Alessio Igor Bogani Date: Thu, 21 May 2009 19:54:33 +0200 Subject: trivial: input/misc: Fix typo in Kconfig Signed-off-by: Alessio Igor Bogani Signed-off-by: Jiri Kosina --- drivers/input/misc/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 5c0a631d1455..06f46fcc0772 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -232,7 +232,7 @@ config INPUT_GPIO_ROTARY_ENCODER depends on GPIOLIB && GENERIC_GPIO help Say Y here to add support for rotary encoders connected to GPIO lines. - Check file:Documentation/incput/rotary_encoder.txt for more + Check file:Documentation/input/rotary-encoder.txt for more information. To compile this driver as a module, choose M here: the -- cgit v1.2.3 From 4737f0978d6e64eae468e01fa181abf6499e6b84 Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Fri, 5 Jun 2009 00:44:53 +0200 Subject: trivial: Kconfig: .ko is normally not included in module names .ko is normally not included in Kconfig help, make it consistent. Signed-off-by: Pavel Machek Signed-off-by: Jiri Kosina --- drivers/block/Kconfig | 2 +- drivers/char/Kconfig | 2 +- drivers/connector/Kconfig | 2 +- drivers/crypto/Kconfig | 6 +++--- drivers/ide/Kconfig | 2 +- drivers/media/dvb/dvb-usb/Kconfig | 2 +- drivers/mtd/devices/Kconfig | 2 +- drivers/mtd/nand/Kconfig | 4 ++-- drivers/net/Kconfig | 4 ++-- drivers/net/wireless/Kconfig | 2 +- drivers/net/wireless/hostap/Kconfig | 8 ++++---- drivers/net/wireless/iwlwifi/Kconfig | 4 ++-- drivers/net/wireless/rt2x00/Kconfig | 14 +++++++------- drivers/s390/net/Kconfig | 14 +++++++------- drivers/serial/Kconfig | 2 +- drivers/w1/Kconfig | 2 +- drivers/w1/masters/Kconfig | 6 +++--- drivers/watchdog/Kconfig | 2 +- 18 files changed, 40 insertions(+), 40 deletions(-) (limited to 'drivers') diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index f42fa50d3550..1b98cc52c220 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -112,7 +112,7 @@ config GDROM with up to 1 GB of data. This drive will also read standard CD ROM disks. Select this option to access any disks in your GD ROM drive. Most users will want to say "Y" here. - You can also build this as a module which will be called gdrom.ko + You can also build this as a module which will be called gdrom. source "drivers/block/paride/Kconfig" diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 02ecfd5fa61c..067e9dcb95cd 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -692,7 +692,7 @@ config HVCS this driver. To compile this driver as a module, choose M here: the - module will be called hvcs.ko. Additionally, this module + module will be called hvcs. Additionally, this module will depend on arch specific APIs exported from hvcserver.ko which will also be compiled when this driver is built as a module. diff --git a/drivers/connector/Kconfig b/drivers/connector/Kconfig index 100bfd422066..6e6730f9dfd1 100644 --- a/drivers/connector/Kconfig +++ b/drivers/connector/Kconfig @@ -7,7 +7,7 @@ menuconfig CONNECTOR of the netlink socket protocol. Connector support can also be built as a module. If so, the module - will be called cn.ko. + will be called cn. if CONNECTOR diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 01afd758072f..3e72a6a96d73 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -34,7 +34,7 @@ config CRYPTO_DEV_PADLOCK_AES Available in VIA C3 and newer CPUs. If unsure say M. The compiled module will be - called padlock-aes.ko + called padlock-aes. config CRYPTO_DEV_PADLOCK_SHA tristate "PadLock driver for SHA1 and SHA256 algorithms" @@ -47,7 +47,7 @@ config CRYPTO_DEV_PADLOCK_SHA Available in VIA C7 and newer processors. If unsure say M. The compiled module will be - called padlock-sha.ko + called padlock-sha. config CRYPTO_DEV_GEODE tristate "Support for the Geode LX AES engine" @@ -79,7 +79,7 @@ config ZCRYPT_MONOLITHIC bool "Monolithic zcrypt module" depends on ZCRYPT="m" help - Select this option if you want to have a single module z90crypt.ko + Select this option if you want to have a single module z90crypt, that contains all parts of the crypto device driver (ap bus, request router and all the card drivers). diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index cf06494bb744..9a5d0aaac9d0 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig @@ -46,7 +46,7 @@ menuconfig IDE SMART parameters from disk drives. To compile this driver as a module, choose M here: the - module will be called ide-core.ko. + module will be called ide-core. For further information, please read . diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig index 60955a70d880..1bb66e1ed5a7 100644 --- a/drivers/media/dvb/dvb-usb/Kconfig +++ b/drivers/media/dvb/dvb-usb/Kconfig @@ -216,7 +216,7 @@ config DVB_USB_TTUSB2 help Say Y here to support the Pinnacle 400e DVB-S USB2.0 receiver. The firmware protocol used by this module is similar to the one used by the - old ttusb-driver - that's why the module is called dvb-usb-ttusb2.ko. + old ttusb-driver - that's why the module is called dvb-usb-ttusb2. config DVB_USB_DTT200U tristate "WideView WT-200U and WT-220U (pen) DVB-T USB2.0 support (Yakumo/Hama/Typhoon/Yuan)" diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index 6fde0a2e3567..325fab92a62c 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig @@ -49,7 +49,7 @@ config MTD_MS02NV If you want to compile this driver as a module ( = code which can be inserted in and removed from the running kernel whenever you want), say M here and read . - The module will be called ms02-nv.ko. + The module will be called ms02-nv. config MTD_DATAFLASH tristate "Support for AT45xxx DataFlash" diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 890936d0275e..f3276897859e 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -260,7 +260,7 @@ config MTD_NAND_BASLER_EXCITE help This enables the driver for the NAND flash device found on the Basler eXcite Smart Camera. If built as a module, the driver - will be named "excite_nandflash.ko". + will be named excite_nandflash. config MTD_NAND_CAFE tristate "NAND support for OLPC CAFÉ chip" @@ -282,7 +282,7 @@ config MTD_NAND_CS553X controller is enabled for NAND, and currently requires that the controller be in MMIO mode. - If you say "m", the module will be called "cs553x_nand.ko". + If you say "m", the module will be called cs553x_nand. config MTD_NAND_ATMEL tristate "Support for NAND Flash / SmartMedia on AT91 and AVR32" diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 214a92d1ef75..b4683ce5564e 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1880,7 +1880,7 @@ config FEC_MPC52xx ---help--- This option enables support for the MPC5200's on-chip Fast Ethernet Controller - If compiled as module, it will be called 'fec_mpc52xx.ko'. + If compiled as module, it will be called fec_mpc52xx. config FEC_MPC52xx_MDIO bool "MPC52xx FEC MDIO bus driver" @@ -1892,7 +1892,7 @@ config FEC_MPC52xx_MDIO (Motorola? industry standard). If your board uses an external PHY connected to FEC, enable this. If not sure, enable. - If compiled as module, it will be called 'fec_mpc52xx_phy.ko'. + If compiled as module, it will be called fec_mpc52xx_phy. config NE_H8300 tristate "NE2000 compatible support for H8/300" diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 3d94e7dfea69..3359497012aa 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -310,7 +310,7 @@ config PRISM54 If you want to compile the driver as a module ( = code which can be inserted in and removed from the running kernel whenever you want), say M here and read . - The module will be called prism54.ko. + The module will be called prism54. config USB_ZD1201 tristate "USB ZD1201 based Wireless device support" diff --git a/drivers/net/wireless/hostap/Kconfig b/drivers/net/wireless/hostap/Kconfig index 932d207bce23..c15db2293515 100644 --- a/drivers/net/wireless/hostap/Kconfig +++ b/drivers/net/wireless/hostap/Kconfig @@ -29,7 +29,7 @@ config HOSTAP PLX/PCI/CS version of the driver to actually use the driver. The driver can be compiled as a module and it will be called - "hostap.ko". + hostap. config HOSTAP_FIRMWARE bool "Support downloading firmware images with Host AP driver" @@ -68,7 +68,7 @@ config HOSTAP_PLX driver. The driver can be compiled as a module and will be named - "hostap_plx.ko". + hostap_plx. config HOSTAP_PCI tristate "Host AP driver for Prism2.5 PCI adaptors" @@ -81,7 +81,7 @@ config HOSTAP_PCI driver. The driver can be compiled as a module and will be named - "hostap_pci.ko". + hostap_pci. config HOSTAP_CS tristate "Host AP driver for Prism2/2.5/3 PC Cards" @@ -94,4 +94,4 @@ config HOSTAP_CS driver. The driver can be compiled as a module and will be named - "hostap_cs.ko". + hostap_cs. diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig index 8304f6406a17..736162324ba4 100644 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ b/drivers/net/wireless/iwlwifi/Kconfig @@ -75,7 +75,7 @@ config IWLAGN If you want to compile the driver as a module ( = code which can be inserted in and removed from the running kernel whenever you want), say M here and read . The - module will be called iwlagn.ko. + module will be called iwlagn. config IWL4965 @@ -113,7 +113,7 @@ config IWL3945 If you want to compile the driver as a module ( = code which can be inserted in and removed from the running kernel whenever you want), say M here and read . The - module will be called iwl3945.ko. + module will be called iwl3945. config IWL3945_SPECTRUM_MEASUREMENT bool "Enable Spectrum Measurement in iwl3945 driver" diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig index bfc5d9cf716e..1ae11c7f17af 100644 --- a/drivers/net/wireless/rt2x00/Kconfig +++ b/drivers/net/wireless/rt2x00/Kconfig @@ -9,11 +9,11 @@ menuconfig RT2X00 When building one of the individual drivers, the rt2x00 library will also be created. That library (when the driver is built as - a module) will be called "rt2x00lib.ko". + a module) will be called rt2x00lib. Additionally PCI and USB libraries will also be build depending on the types of drivers being selected, these libraries will be - called "rt2x00pci.ko" and "rt2x00usb.ko". + called rt2x00pci and rt2x00usb. if RT2X00 @@ -26,7 +26,7 @@ config RT2400PCI This adds support for rt2400 wireless chipset family. Supported chips: RT2460. - When compiled as a module, this driver will be called "rt2400pci.ko". + When compiled as a module, this driver will be called rt2400pci. config RT2500PCI tristate "Ralink rt2500 (PCI/PCMCIA) support" @@ -37,7 +37,7 @@ config RT2500PCI This adds support for rt2500 wireless chipset family. Supported chips: RT2560. - When compiled as a module, this driver will be called "rt2500pci.ko". + When compiled as a module, this driver will be called rt2500pci. config RT61PCI tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support" @@ -51,7 +51,7 @@ config RT61PCI This adds support for rt2501 wireless chipset family. Supported chips: RT2561, RT2561S & RT2661. - When compiled as a module, this driver will be called "rt61pci.ko". + When compiled as a module, this driver will be called rt61pci. config RT2500USB tristate "Ralink rt2500 (USB) support" @@ -62,7 +62,7 @@ config RT2500USB This adds support for rt2500 wireless chipset family. Supported chips: RT2571 & RT2572. - When compiled as a module, this driver will be called "rt2500usb.ko". + When compiled as a module, this driver will be called rt2500usb. config RT73USB tristate "Ralink rt2501/rt73 (USB) support" @@ -75,7 +75,7 @@ config RT73USB This adds support for rt2501 wireless chipset family. Supported chips: RT2571W, RT2573 & RT2671. - When compiled as a module, this driver will be called "rt73usb.ko". + When compiled as a module, this driver will be called rt73usb. config RT2X00_LIB_PCI tristate diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index a7745c82b4ae..cb909a5b5047 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig @@ -8,7 +8,7 @@ config LCS Select this option if you want to use LCS networking on IBM System z. This device driver supports Token Ring (IEEE 802.5), FDDI (IEEE 802.7) and Ethernet. - To compile as a module, choose M. The module name is lcs.ko. + To compile as a module, choose M. The module name is lcs. If you do not know what it is, it's safe to choose Y. config CTCM @@ -21,7 +21,7 @@ config CTCM It also supports virtual CTCs when running under VM. This driver also supports channel-to-channel MPC SNA devices. MPC is an SNA protocol device used by Communication Server for Linux. - To compile as a module, choose M. The module name is ctcm.ko. + To compile as a module, choose M. The module name is ctcm. To compile into the kernel, choose Y. If you do not need any channel-to-channel connection, choose N. @@ -34,7 +34,7 @@ config NETIUCV link between VM guests. Using ifconfig a point-to-point connection can be established to the Linux on IBM System z running on the other VM guest. To compile as a module, choose M. - The module name is netiucv.ko. If unsure, choose Y. + The module name is netiucv. If unsure, choose Y. config SMSGIUCV tristate "IUCV special message support (VM only)" @@ -50,7 +50,7 @@ config CLAW This driver supports channel attached CLAW devices. CLAW is Common Link Access for Workstation. Common devices that use CLAW are RS/6000s, Cisco Routers (CIP) and 3172 devices. - To compile as a module, choose M. The module name is claw.ko. + To compile as a module, choose M. The module name is claw. To compile into the kernel, choose Y. config QETH @@ -65,14 +65,14 @@ config QETH To compile this driver as a module, choose M. - The module name is qeth.ko. + The module name is qeth. config QETH_L2 tristate "qeth layer 2 device support" depends on QETH help Select this option to be able to run qeth devices in layer 2 mode. - To compile as a module, choose M. The module name is qeth_l2.ko. + To compile as a module, choose M. The module name is qeth_l2. If unsure, choose y. config QETH_L3 @@ -80,7 +80,7 @@ config QETH_L3 depends on QETH help Select this option to be able to run qeth devices in layer 3 mode. - To compile as a module choose M. The module name is qeth_l3.ko. + To compile as a module choose M. The module name is qeth_l3. If unsure, choose Y. config QETH_IPV6 diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index 641e800ed693..1132c5cae7ab 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig @@ -861,7 +861,7 @@ config SERIAL_UARTLITE Say Y here if you want to use the Xilinx uartlite serial controller. To compile this driver as a module, choose M here: the - module will be called uartlite.ko. + module will be called uartlite. config SERIAL_UARTLITE_CONSOLE bool "Support for console on Xilinx uartlite serial port" diff --git a/drivers/w1/Kconfig b/drivers/w1/Kconfig index 9adbb4f90479..fd2c7bd9dfbe 100644 --- a/drivers/w1/Kconfig +++ b/drivers/w1/Kconfig @@ -8,7 +8,7 @@ menuconfig W1 If you want W1 support, you should say Y here. This W1 support can also be built as a module. If so, the module - will be called wire.ko. + will be called wire. if W1 diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig index 96d2f8e4c275..3195fb8b7d9a 100644 --- a/drivers/w1/masters/Kconfig +++ b/drivers/w1/masters/Kconfig @@ -12,7 +12,7 @@ config W1_MASTER_MATROX using Matrox's G400 GPIO pins. This support is also available as a module. If so, the module - will be called matrox_w1.ko. + will be called matrox_w1. config W1_MASTER_DS2490 tristate "DS2490 USB <-> W1 transport layer for 1-wire" @@ -22,7 +22,7 @@ config W1_MASTER_DS2490 for example DS9490*. This support is also available as a module. If so, the module - will be called ds2490.ko. + will be called ds2490. config W1_MASTER_DS2482 tristate "Maxim DS2482 I2C to 1-Wire bridge" @@ -56,7 +56,7 @@ config W1_MASTER_GPIO GPIO pins. This driver uses the GPIO API to control the wire. This support is also available as a module. If so, the module - will be called w1-gpio.ko. + will be called w1-gpio. config HDQ_MASTER_OMAP tristate "OMAP HDQ driver" diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 5eb8f21da82e..452082f87946 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -531,7 +531,7 @@ config SBC8360_WDT Board Computer produced by Axiomtek Co., Ltd. (www.axiomtek.com). To compile this driver as a module, choose M here: the - module will be called sbc8360.ko. + module will be called sbc8360. Most people will say N. -- cgit v1.2.3 From 3ac49a1c9928b4a242b3cb1d83bc1d5c9b8fcb50 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Thu, 4 Jun 2009 16:20:28 +0200 Subject: trivial: fix ETIMEOUT -> ETIMEDOUT typos fix ETIMEOUT -> ETIMEDOUT typos Signed-off-by: Jean Delvare Signed-off-by: Jiri Kosina --- drivers/net/qlge/qlge_main.c | 2 +- drivers/net/usb/usbnet.c | 2 +- drivers/staging/wlan-ng/hfa384x_usb.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index c92ced247947..1fd5ecb24425 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c @@ -3174,7 +3174,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev) if (value & RST_FO_FR) { QPRINTK(qdev, IFDOWN, ERR, - "ETIMEOUT!!! errored out of resetting the chip!\n"); + "ETIMEDOUT!!! errored out of resetting the chip!\n"); status = -ETIMEDOUT; } diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index f3a2fce6166c..47f68cfa7e21 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -398,7 +398,7 @@ static void rx_complete (struct urb *urb) /* stalls need manual reset. this is rare ... except that * when going through USB 2.0 TTs, unplug appears this way. - * we avoid the highspeed version of the ETIMEOUT/EILSEQ + * we avoid the highspeed version of the ETIMEDOUT/EILSEQ * storm, recovering as needed. */ case -EPIPE: diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c index 888198c9a106..824e65bdc433 100644 --- a/drivers/staging/wlan-ng/hfa384x_usb.c +++ b/drivers/staging/wlan-ng/hfa384x_usb.c @@ -2424,7 +2424,7 @@ int hfa384x_drvr_ramdl_write(hfa384x_t *hw, u32 daddr, void *buf, u32 len) * 0 success * >0 f/w reported error - f/w status code * <0 driver reported error -* -ETIMEOUT timout waiting for the cmd regs to become +* -ETIMEDOUT timout waiting for the cmd regs to become * available, or waiting for the control reg * to indicate the Aux port is enabled. * -ENODATA the buffer does NOT contain a valid PDA. -- cgit v1.2.3 From db5ed9beabc0a2084ab0e0cc46bf911b8bf16fa8 Mon Sep 17 00:00:00 2001 From: Peter Huewe Date: Sat, 6 Jun 2009 14:58:56 +0200 Subject: trivial: pci hotplug: adding __init/__exit macros to sgi_hotplug Trivial patch which adds the __init and __exit macros to the module_init / module_exit functions from drivers/pci/hotplug/sgi_hotplug.c linux version 2.6.30-rc8 Signed-off-by: Peter Huewe Signed-off-by: Jiri Kosina --- drivers/pci/hotplug/sgi_hotplug.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c index 3eee70928d45..2d6da78fddb6 100644 --- a/drivers/pci/hotplug/sgi_hotplug.c +++ b/drivers/pci/hotplug/sgi_hotplug.c @@ -679,7 +679,7 @@ alloc_err: return rc; } -static int sn_pci_hotplug_init(void) +static int __init sn_pci_hotplug_init(void) { struct pci_bus *pci_bus = NULL; int rc; @@ -716,7 +716,7 @@ static int sn_pci_hotplug_init(void) return registered == 1 ? 0 : -ENODEV; } -static void sn_pci_hotplug_exit(void) +static void __exit sn_pci_hotplug_exit(void) { struct hotplug_slot *bss_hotplug_slot; -- cgit v1.2.3 From 638772c7553f6893f7b346bfee4d46851af59afc Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Wed, 11 Feb 2009 17:25:24 +0800 Subject: fb: add support of LCD display controller on pxa168/910 (base layer) This driver is originally written by Lennert, modified by Green to be feature complete, and ported by Jun Nie and Kevin Liu for pxa168/910 processors. The patch adds support for the on-chip LCD display controller, it currently supports the base (graphics) layer only. Signed-off-by: Lennert Buytenhek Signed-off-by: Green Wan Cc: Peter Liao Signed-off-by: Jun Nie Signed-off-by: Kevin Liu Acked-by: Krzysztof Helt Signed-off-by: Eric Miao --- drivers/video/Kconfig | 10 + drivers/video/Makefile | 1 + drivers/video/pxa168fb.c | 803 +++++++++++++++++++++++++++++++++++++++++++++++ drivers/video/pxa168fb.h | 558 ++++++++++++++++++++++++++++++++ 4 files changed, 1372 insertions(+) create mode 100644 drivers/video/pxa168fb.c create mode 100644 drivers/video/pxa168fb.h (limited to 'drivers') diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 0048f1185a60..13fd66a1f102 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -1759,6 +1759,16 @@ config FB_68328 Say Y here if you want to support the built-in frame buffer of the Motorola 68328 CPU family. +config FB_PXA168 + tristate "PXA168/910 LCD framebuffer support" + depends on FB && (CPU_PXA168 || CPU_PXA910) + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + ---help--- + Frame buffer driver for the built-in LCD controller in the Marvell + MMP processor. + config FB_PXA tristate "PXA LCD framebuffer support" depends on FB && ARCH_PXA diff --git a/drivers/video/Makefile b/drivers/video/Makefile index d8d0be5151e3..01a819f47371 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile @@ -97,6 +97,7 @@ obj-$(CONFIG_FB_GBE) += gbefb.o obj-$(CONFIG_FB_CIRRUS) += cirrusfb.o obj-$(CONFIG_FB_ASILIANT) += asiliantfb.o obj-$(CONFIG_FB_PXA) += pxafb.o +obj-$(CONFIG_FB_PXA168) += pxa168fb.o obj-$(CONFIG_FB_W100) += w100fb.o obj-$(CONFIG_FB_TMIO) += tmiofb.o obj-$(CONFIG_FB_AU1100) += au1100fb.o diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c new file mode 100644 index 000000000000..84d8327e47db --- /dev/null +++ b/drivers/video/pxa168fb.c @@ -0,0 +1,803 @@ +/* + * linux/drivers/video/pxa168fb.c -- Marvell PXA168 LCD Controller + * + * Copyright (C) 2008 Marvell International Ltd. + * All rights reserved. + * + * 2009-02-16 adapted from original version for PXA168/910 + * Jun Nie + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include