summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/pci_root.c11
-rw-r--r--drivers/base/Kconfig7
-rw-r--r--drivers/base/sys.c3
-rw-r--r--drivers/block/cciss.c86
-rw-r--r--drivers/block/cciss.h1
-rw-r--r--drivers/block/cciss_cmd.h1
-rw-r--r--drivers/block/cciss_scsi.c13
-rw-r--r--drivers/block/drbd/drbd_actlog.c335
-rw-r--r--drivers/block/drbd/drbd_bitmap.c752
-rw-r--r--drivers/block/drbd/drbd_int.h270
-rw-r--r--drivers/block/drbd/drbd_main.c673
-rw-r--r--drivers/block/drbd/drbd_nl.c183
-rw-r--r--drivers/block/drbd/drbd_proc.c114
-rw-r--r--drivers/block/drbd/drbd_receiver.c608
-rw-r--r--drivers/block/drbd/drbd_req.c169
-rw-r--r--drivers/block/drbd/drbd_req.h36
-rw-r--r--drivers/block/drbd/drbd_strings.c6
-rw-r--r--drivers/block/drbd/drbd_worker.c360
-rw-r--r--drivers/block/drbd/drbd_wrappers.h2
-rw-r--r--drivers/cpufreq/cpufreq.c66
-rw-r--r--drivers/gpio/adp5588-gpio.c8
-rw-r--r--drivers/gpio/gpiolib.c45
-rw-r--r--drivers/gpio/max732x.c8
-rw-r--r--drivers/gpio/pca953x.c6
-rw-r--r--drivers/gpio/pl061.c14
-rw-r--r--drivers/gpio/stmpe-gpio.c12
-rw-r--r--drivers/gpio/sx150x.c9
-rw-r--r--drivers/gpio/tc3589x-gpio.c12
-rw-r--r--drivers/gpio/timbgpio.c18
-rw-r--r--drivers/gpio/vr41xx_giu.c12
-rw-r--r--drivers/gpu/drm/drm_crtc.c51
-rw-r--r--drivers/gpu/drm/drm_gem.c5
-rw-r--r--drivers/gpu/drm/drm_ioctl.c3
-rw-r--r--drivers/gpu/drm/drm_irq.c15
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c70
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c21
-rw-r--r--drivers/gpu/drm/i915/intel_display.c39
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c109
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c8
-rw-r--r--drivers/hwmon/Kconfig17
-rw-r--r--drivers/hwmon/f71882fg.c126
-rw-r--r--drivers/hwmon/pmbus_core.c70
-rw-r--r--drivers/hwspinlock/Kconfig1
-rw-r--r--drivers/ide/ide-io.c12
-rw-r--r--drivers/infiniband/hw/mlx4/main.c11
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c2
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/input/touchscreen/tsc2005.c18
-rw-r--r--drivers/leds/led-triggers.c20
-rw-r--r--drivers/md/Kconfig6
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/dm-crypt.c19
-rw-r--r--drivers/md/dm-flakey.c212
-rw-r--r--drivers/md/dm-ioctl.c46
-rw-r--r--drivers/md/dm-log.c2
-rw-r--r--drivers/md/dm-mpath.c17
-rw-r--r--drivers/md/dm-snap.c2
-rw-r--r--drivers/md/dm-stripe.c23
-rw-r--r--drivers/media/rc/ite-cir.c6
-rw-r--r--drivers/memstick/host/Kconfig12
-rw-r--r--drivers/memstick/host/Makefile1
-rw-r--r--drivers/memstick/host/r592.c908
-rw-r--r--drivers/memstick/host/r592.h175
-rw-r--r--drivers/mfd/88pm860x-core.c11
-rw-r--r--drivers/mfd/Kconfig1
-rw-r--r--drivers/mfd/Makefile2
-rw-r--r--drivers/mfd/ab3550-core.c12
-rw-r--r--drivers/mfd/ab8500-core.c12
-rw-r--r--drivers/mfd/asic3.c38
-rw-r--r--drivers/mfd/cs5535-mfd.c16
-rw-r--r--drivers/mfd/ezx-pcap.c34
-rw-r--r--drivers/mfd/htc-egpio.c23
-rw-r--r--drivers/mfd/htc-i2cpld.c33
-rw-r--r--drivers/mfd/jz4740-adc.c16
-rw-r--r--drivers/mfd/max8925-core.c10
-rw-r--r--drivers/mfd/max8997-irq.c377
-rw-r--r--drivers/mfd/max8998-irq.c8
-rw-r--r--drivers/mfd/max8998.c4
-rw-r--r--drivers/mfd/mfd-core.c53
-rw-r--r--drivers/mfd/pcf50633-core.c2
-rw-r--r--drivers/mfd/rdc321x-southbridge.c1
-rw-r--r--drivers/mfd/stmpe.c12
-rw-r--r--drivers/mfd/t7l66xb.c21
-rw-r--r--drivers/mfd/tc3589x.c12
-rw-r--r--drivers/mfd/tc6393xb.c21
-rw-r--r--drivers/mfd/tps6586x.c6
-rw-r--r--drivers/mfd/twl4030-irq.c66
-rw-r--r--drivers/mfd/twl6030-irq.c25
-rw-r--r--drivers/mfd/wl1273-core.c2
-rw-r--r--drivers/mfd/wm831x-irq.c8
-rw-r--r--drivers/mfd/wm8350-irq.c8
-rw-r--r--drivers/mfd/wm8994-irq.c8
-rw-r--r--drivers/misc/kgdbts.c2
-rw-r--r--drivers/mtd/Kconfig18
-rw-r--r--drivers/mtd/Makefile4
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c3
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c2
-rw-r--r--drivers/mtd/devices/m25p80.c5
-rw-r--r--drivers/mtd/devices/mtdram.c1
-rw-r--r--drivers/mtd/devices/phram.c3
-rw-r--r--drivers/mtd/maps/Kconfig13
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/ceiva.c6
-rw-r--r--drivers/mtd/maps/integrator-flash.c10
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c272
-rw-r--r--drivers/mtd/maps/physmap.c8
-rw-r--r--drivers/mtd/maps/physmap_of.c8
-rw-r--r--drivers/mtd/maps/sa1100-flash.c8
-rw-r--r--drivers/mtd/maps/ts5500_flash.c1
-rw-r--r--drivers/mtd/mtd_blkdevs.c42
-rw-r--r--drivers/mtd/mtdconcat.c8
-rw-r--r--drivers/mtd/mtdcore.c6
-rw-r--r--drivers/mtd/mtdswap.c1587
-rw-r--r--drivers/mtd/nand/Kconfig15
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/atmel_nand.c166
-rw-r--r--drivers/mtd/nand/davinci_nand.c3
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c5
-rw-r--r--drivers/mtd/nand/mxc_nand.c31
-rw-r--r--drivers/mtd/nand/nand_base.c42
-rw-r--r--drivers/mtd/nand/nand_bbt.c8
-rw-r--r--drivers/mtd/nand/nand_bch.c243
-rw-r--r--drivers/mtd/nand/nandsim.c43
-rw-r--r--drivers/mtd/nand/omap2.c16
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c977
-rw-r--r--drivers/mtd/onenand/omap2.c7
-rw-r--r--drivers/mtd/onenand/onenand_base.c15
-rw-r--r--drivers/mtd/sm_ftl.c18
-rw-r--r--drivers/mtd/tests/mtd_speedtest.c80
-rw-r--r--drivers/mtd/tests/mtd_subpagetest.c10
-rw-r--r--drivers/net/a2065.c10
-rw-r--r--drivers/net/ariadne.c10
-rw-r--r--drivers/net/bonding/bond_main.c56
-rw-r--r--drivers/net/bonding/bonding.h1
-rw-r--r--drivers/net/davinci_cpdma.c11
-rw-r--r--drivers/net/davinci_cpdma.h1
-rw-r--r--drivers/net/davinci_emac.c5
-rw-r--r--drivers/net/mlx4/alloc.c13
-rw-r--r--drivers/net/mlx4/cq.c2
-rw-r--r--drivers/net/mlx4/en_cq.c38
-rw-r--r--drivers/net/mlx4/en_ethtool.c66
-rw-r--r--drivers/net/mlx4/en_main.c22
-rw-r--r--drivers/net/mlx4/en_netdev.c199
-rw-r--r--drivers/net/mlx4/en_port.c13
-rw-r--r--drivers/net/mlx4/en_port.h19
-rw-r--r--drivers/net/mlx4/en_rx.c11
-rw-r--r--drivers/net/mlx4/en_tx.c72
-rw-r--r--drivers/net/mlx4/eq.c107
-rw-r--r--drivers/net/mlx4/fw.c25
-rw-r--r--drivers/net/mlx4/fw.h3
-rw-r--r--drivers/net/mlx4/main.c119
-rw-r--r--drivers/net/mlx4/mcg.c646
-rw-r--r--drivers/net/mlx4/mlx4.h50
-rw-r--r--drivers/net/mlx4/mlx4_en.h27
-rw-r--r--drivers/net/mlx4/pd.c102
-rw-r--r--drivers/net/mlx4/port.c165
-rw-r--r--drivers/net/mlx4/profile.c4
-rw-r--r--drivers/net/myri10ge/myri10ge.c1
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c6
-rw-r--r--drivers/net/sfc/efx.c18
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/usb/smsc95xx.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c7
-rw-r--r--drivers/net/wireless/orinoco/cfg.c3
-rw-r--r--drivers/net/wireless/orinoco/main.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c7
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c31
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c1
-rw-r--r--drivers/pci/intel-iommu.c38
-rw-r--r--drivers/pci/pci.c6
-rw-r--r--drivers/pci/pcie/aspm.c33
-rw-r--r--drivers/pci/pcie/portdrv_core.c5
-rw-r--r--drivers/power/Kconfig14
-rw-r--r--drivers/power/bq20z75.c310
-rw-r--r--drivers/power/bq27x00_battery.c725
-rw-r--r--drivers/power/ds2782_battery.c1
-rw-r--r--drivers/power/power_supply_core.c4
-rw-r--r--drivers/power/power_supply_leds.c19
-rw-r--r--drivers/power/power_supply_sysfs.c2
-rw-r--r--drivers/power/s3c_adc_battery.c4
-rw-r--r--drivers/power/twl4030_charger.c25
-rw-r--r--drivers/power/z2_battery.c3
-rw-r--r--drivers/regulator/Kconfig2
-rw-r--r--drivers/regulator/ab3100.c51
-rw-r--r--drivers/regulator/ab8500.c270
-rw-r--r--drivers/regulator/core.c116
-rw-r--r--drivers/regulator/max8997.c1
-rw-r--r--drivers/regulator/max8998.c1
-rw-r--r--drivers/regulator/tps6524x-regulator.c2
-rw-r--r--drivers/regulator/wm831x-dcdc.c32
-rw-r--r--drivers/regulator/wm831x-isink.c8
-rw-r--r--drivers/regulator/wm831x-ldo.c17
-rw-r--r--drivers/s390/block/dasd_eckd.c6
-rw-r--r--drivers/s390/block/dasd_fba.c6
-rw-r--r--drivers/s390/char/con3215.c6
-rw-r--r--drivers/s390/char/raw3270.c6
-rw-r--r--drivers/s390/char/tape_34xx.c6
-rw-r--r--drivers/s390/char/tape_3590.c6
-rw-r--r--drivers/s390/char/vmur.c6
-rw-r--r--drivers/s390/cio/ccwgroup.c6
-rw-r--r--drivers/s390/cio/device.c11
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/qdio_main.c3
-rw-r--r--drivers/s390/net/claw.c12
-rw-r--r--drivers/s390/net/ctcm_main.c12
-rw-r--r--drivers/s390/net/lcs.c12
-rw-r--r--drivers/s390/net/qeth_core_main.c10
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c6
-rw-r--r--drivers/scsi/aacraid/Makefile2
-rw-r--r--drivers/scsi/aacraid/aachba.c7
-rw-r--r--drivers/scsi/aacraid/aacraid.h106
-rw-r--r--drivers/scsi/aacraid/commctrl.c3
-rw-r--r--drivers/scsi/aacraid/comminit.c58
-rw-r--r--drivers/scsi/aacraid/commsup.c41
-rw-r--r--drivers/scsi/aacraid/dpcsup.c85
-rw-r--r--drivers/scsi/aacraid/linit.c15
-rw-r--r--drivers/scsi/aacraid/nark.c3
-rw-r--r--drivers/scsi/aacraid/rkt.c3
-rw-r--r--drivers/scsi/aacraid/rx.c33
-rw-r--r--drivers/scsi/aacraid/sa.c7
-rw-r--r--drivers/scsi/aacraid/src.c594
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h22
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c98
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c15
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c73
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c4
-rw-r--r--drivers/scsi/libiscsi_tcp.c15
-rw-r--r--drivers/scsi/lpfc/Makefile2
-rw-r--r--drivers/scsi/lpfc/lpfc.h27
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c36
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h113
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c41
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c43
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c513
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c49
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h20
-rw-r--r--drivers/scsi/mvsas/mv_init.c7
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h12
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c207
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c31
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c34
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c146
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/sd.c63
-rw-r--r--drivers/scsi/ses.c48
-rw-r--r--drivers/staging/westbridge/astoria/block/cyasblkdev_block.c2
-rw-r--r--drivers/target/Kconfig2
-rw-r--r--drivers/target/Makefile7
-rw-r--r--drivers/target/loopback/Kconfig11
-rw-r--r--drivers/target/loopback/Makefile1
-rw-r--r--drivers/target/loopback/tcm_loop.c1579
-rw-r--r--drivers/target/loopback/tcm_loop.h77
-rw-r--r--drivers/target/target_core_configfs.c117
-rw-r--r--drivers/target/target_core_device.c40
-rw-r--r--drivers/target/target_core_fabric_configfs.c209
-rw-r--r--drivers/target/target_core_fabric_lib.c1
-rw-r--r--drivers/target/target_core_file.c28
-rw-r--r--drivers/target/target_core_hba.c15
-rw-r--r--drivers/target/target_core_iblock.c28
-rw-r--r--drivers/target/target_core_pscsi.c22
-rw-r--r--drivers/target/target_core_rd.c15
-rw-r--r--drivers/target/target_core_rd.h2
-rw-r--r--drivers/target/target_core_stat.c1810
-rw-r--r--drivers/target/target_core_stat.h8
-rw-r--r--drivers/target/target_core_transport.c37
-rw-r--r--drivers/tty/serial/kgdboc.c2
-rw-r--r--drivers/tty/sysrq.c2
-rw-r--r--drivers/tty/vt/keyboard.c2
289 files changed, 16581 insertions, 4241 deletions
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 85249395623b..f911a2f8cc34 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -32,6 +32,7 @@
#include <linux/pm_runtime.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
+#include <linux/pci-aspm.h>
#include <linux/acpi.h>
#include <linux/slab.h>
#include <acpi/acpi_bus.h>
@@ -564,7 +565,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
/* Indicate support for various _OSC capabilities. */
if (pci_ext_cfg_avail(root->bus->self))
flags |= OSC_EXT_PCI_CONFIG_SUPPORT;
- if (pcie_aspm_enabled())
+ if (pcie_aspm_support_enabled())
flags |= OSC_ACTIVE_STATE_PWR_SUPPORT |
OSC_CLOCK_PWR_CAPABILITY_SUPPORT;
if (pci_msi_enabled())
@@ -591,12 +592,16 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
status = acpi_pci_osc_control_set(device->handle, &flags,
OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
- if (ACPI_SUCCESS(status))
+ if (ACPI_SUCCESS(status)) {
dev_info(root->bus->bridge,
"ACPI _OSC control (0x%02x) granted\n", flags);
- else
+ } else {
dev_dbg(root->bus->bridge,
"ACPI _OSC request failed (code %d)\n", status);
+ printk(KERN_INFO "Unable to assume _OSC PCIe control. "
+ "Disabling ASPM\n");
+ pcie_no_aspm();
+ }
}
pci_acpi_add_bus_pm_notifier(device, root->bus);
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index d57e8d0fb823..e9e5238f3106 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -168,4 +168,11 @@ config SYS_HYPERVISOR
bool
default n
+config ARCH_NO_SYSDEV_OPS
+ bool
+ ---help---
+ To be selected by architectures that don't use sysdev class or
+ sysdev driver power management (suspend/resume) and shutdown
+ operations.
+
endmenu
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index f6fb54741602..fbe72da6c414 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -329,7 +329,7 @@ void sysdev_unregister(struct sys_device *sysdev)
}
-
+#ifndef CONFIG_ARCH_NO_SYSDEV_OPS
/**
* sysdev_shutdown - Shut down all system devices.
*
@@ -524,6 +524,7 @@ int sysdev_resume(void)
return 0;
}
EXPORT_SYMBOL_GPL(sysdev_resume);
+#endif /* CONFIG_ARCH_NO_SYSDEV_OPS */
int __init system_bus_init(void)
{
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 35658f445fca..9bf13988f1a2 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -193,7 +193,7 @@ static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev,
u64 *cfg_offset);
static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
unsigned long *memory_bar);
-
+static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag);
/* performant mode helper functions */
static void calc_bucket_map(int *bucket, int num_buckets, int nsgs,
@@ -231,7 +231,7 @@ static const struct block_device_operations cciss_fops = {
*/
static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c)
{
- if (likely(h->transMethod == CFGTBL_Trans_Performant))
+ if (likely(h->transMethod & CFGTBL_Trans_Performant))
c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
}
@@ -556,6 +556,44 @@ static void __devinit cciss_procinit(ctlr_info_t *h)
#define to_hba(n) container_of(n, struct ctlr_info, dev)
#define to_drv(n) container_of(n, drive_info_struct, dev)
+/* List of controllers which cannot be reset on kexec with reset_devices */
+static u32 unresettable_controller[] = {
+ 0x324a103C, /* Smart Array P712m */
+ 0x324b103C, /* SmartArray P711m */
+ 0x3223103C, /* Smart Array P800 */
+ 0x3234103C, /* Smart Array P400 */
+ 0x3235103C, /* Smart Array P400i */
+ 0x3211103C, /* Smart Array E200i */
+ 0x3212103C, /* Smart Array E200 */
+ 0x3213103C, /* Smart Array E200i */
+ 0x3214103C, /* Smart Array E200i */
+ 0x3215103C, /* Smart Array E200i */
+ 0x3237103C, /* Smart Array E500 */
+ 0x323D103C, /* Smart Array P700m */
+ 0x409C0E11, /* Smart Array 6400 */
+ 0x409D0E11, /* Smart Array 6400 EM */
+};
+
+static int ctlr_is_resettable(struct ctlr_info *h)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
+ if (unresettable_controller[i] == h->board_id)
+ return 0;
+ return 1;
+}
+
+static ssize_t host_show_resettable(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ctlr_info *h = to_hba(dev);
+
+ return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h));
+}
+static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL);
+
static ssize_t host_store_rescan(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -741,6 +779,7 @@ static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL);
static struct attribute *cciss_host_attrs[] = {
&dev_attr_rescan.attr,
+ &dev_attr_resettable.attr,
NULL
};
@@ -973,8 +1012,8 @@ static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c)
temp64.val32.upper = c->ErrDesc.Addr.upper;
pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
c->err_info, (dma_addr_t) temp64.val);
- pci_free_consistent(h->pdev, sizeof(CommandList_struct),
- c, (dma_addr_t) c->busaddr);
+ pci_free_consistent(h->pdev, sizeof(CommandList_struct), c,
+ (dma_addr_t) cciss_tag_discard_error_bits(h, (u32) c->busaddr));
}
static inline ctlr_info_t *get_host(struct gendisk *disk)
@@ -1490,8 +1529,7 @@ static int cciss_bigpassthru(ctlr_info_t *h, void __user *argp)
return -EINVAL;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
- ioc = (BIG_IOCTL_Command_struct *)
- kmalloc(sizeof(*ioc), GFP_KERNEL);
+ ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
if (!ioc) {
status = -ENOMEM;
goto cleanup1;
@@ -2653,6 +2691,10 @@ static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c)
c->Request.CDB[0]);
return_status = IO_NEEDS_RETRY;
break;
+ case CMD_UNABORTABLE:
+ dev_warn(&h->pdev->dev, "cmd unabortable\n");
+ return_status = IO_ERROR;
+ break;
default:
dev_warn(&h->pdev->dev, "cmd 0x%02x returned "
"unknown status %x\n", c->Request.CDB[0],
@@ -3103,6 +3145,13 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
DID_PASSTHROUGH : DID_ERROR);
break;
+ case CMD_UNABORTABLE:
+ dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd);
+ rq->errors = make_status_bytes(SAM_STAT_GOOD,
+ cmd->err_info->CommandStatus, DRIVER_OK,
+ cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC ?
+ DID_PASSTHROUGH : DID_ERROR);
+ break;
default:
dev_warn(&h->pdev->dev, "cmd %p returned "
"unknown status %x\n", cmd,
@@ -3136,10 +3185,13 @@ static inline u32 cciss_tag_to_index(u32 tag)
return tag >> DIRECT_LOOKUP_SHIFT;
}
-static inline u32 cciss_tag_discard_error_bits(u32 tag)
+static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag)
{
-#define CCISS_ERROR_BITS 0x03
- return tag & ~CCISS_ERROR_BITS;
+#define CCISS_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
+#define CCISS_SIMPLE_ERROR_BITS 0x03
+ if (likely(h->transMethod & CFGTBL_Trans_Performant))
+ return tag & ~CCISS_PERF_ERROR_BITS;
+ return tag & ~CCISS_SIMPLE_ERROR_BITS;
}
static inline void cciss_mark_tag_indexed(u32 *tag)
@@ -3359,7 +3411,7 @@ static inline u32 next_command(ctlr_info_t *h)
{
u32 a;
- if (unlikely(h->transMethod != CFGTBL_Trans_Performant))
+ if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
return h->access.command_completed(h);
if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
@@ -3394,14 +3446,12 @@ static inline u32 process_indexed_cmd(ctlr_info_t *h, u32 raw_tag)
/* process completion of a non-indexed command */
static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag)
{
- u32 tag;
CommandList_struct *c = NULL;
__u32 busaddr_masked, tag_masked;
- tag = cciss_tag_discard_error_bits(raw_tag);
+ tag_masked = cciss_tag_discard_error_bits(h, raw_tag);
list_for_each_entry(c, &h->cmpQ, list) {
- busaddr_masked = cciss_tag_discard_error_bits(c->busaddr);
- tag_masked = cciss_tag_discard_error_bits(tag);
+ busaddr_masked = cciss_tag_discard_error_bits(h, c->busaddr);
if (busaddr_masked == tag_masked) {
finish_cmd(h, c, raw_tag);
return next_command(h);
@@ -3753,7 +3803,8 @@ static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h)
}
}
-static __devinit void cciss_enter_performant_mode(ctlr_info_t *h)
+static __devinit void cciss_enter_performant_mode(ctlr_info_t *h,
+ u32 use_short_tags)
{
/* This is a bit complicated. There are 8 registers on
* the controller which we write to to tell it 8 different
@@ -3808,7 +3859,7 @@ static __devinit void cciss_enter_performant_mode(ctlr_info_t *h)
writel(0, &h->transtable->RepQCtrAddrHigh32);
writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
writel(0, &h->transtable->RepQAddr0High32);
- writel(CFGTBL_Trans_Performant,
+ writel(CFGTBL_Trans_Performant | use_short_tags,
&(h->cfgtable->HostWrite.TransportRequest));
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
@@ -3855,7 +3906,8 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL))
goto clean_up;
- cciss_enter_performant_mode(h);
+ cciss_enter_performant_mode(h,
+ trans_support & CFGTBL_Trans_use_short_tags);
/* Change the access methods to the performant access methods */
h->access = SA5_performant_access;
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 579f74918493..554bbd907d14 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -222,6 +222,7 @@ static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c)
h->ctlr, c->busaddr);
#endif /* CCISS_DEBUG */
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
+ readl(h->vaddr + SA5_REQUEST_PORT_OFFSET);
h->commands_outstanding++;
if ( h->commands_outstanding > h->max_outstanding)
h->max_outstanding = h->commands_outstanding;
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index 35463d2f0ee7..cd441bef031f 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -56,6 +56,7 @@
#define CFGTBL_Trans_Simple 0x00000002l
#define CFGTBL_Trans_Performant 0x00000004l
+#define CFGTBL_Trans_use_short_tags 0x20000000l
#define CFGTBL_BusType_Ultra2 0x00000001l
#define CFGTBL_BusType_Ultra3 0x00000002l
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 727d0225b7d0..df793803f5ae 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -824,13 +824,18 @@ static void complete_scsi_command(CommandList_struct *c, int timeout,
break;
case CMD_UNSOLICITED_ABORT:
cmd->result = DID_ABORT << 16;
- dev_warn(&h->pdev->dev, "%p aborted do to an "
+ dev_warn(&h->pdev->dev, "%p aborted due to an "
"unsolicited abort\n", c);
break;
case CMD_TIMEOUT:
cmd->result = DID_TIME_OUT << 16;
dev_warn(&h->pdev->dev, "%p timedout\n", c);
break;
+ case CMD_UNABORTABLE:
+ cmd->result = DID_ERROR << 16;
+ dev_warn(&h->pdev->dev, "c %p command "
+ "unabortable\n", c);
+ break;
default:
cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev,
@@ -1007,11 +1012,15 @@ cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c)
break;
case CMD_UNSOLICITED_ABORT:
dev_warn(&h->pdev->dev,
- "%p aborted do to an unsolicited abort\n", c);
+ "%p aborted due to an unsolicited abort\n", c);
break;
case CMD_TIMEOUT:
dev_warn(&h->pdev->dev, "%p timedout\n", c);
break;
+ case CMD_UNABORTABLE:
+ dev_warn(&h->pdev->dev,
+ "%p unabortable\n", c);
+ break;
default:
dev_warn(&h->pdev->dev,
"%p returned unknown status %x\n",
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index aca302492ff2..2a1642bc451d 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -92,7 +92,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
bio->bi_end_io = drbd_md_io_complete;
bio->bi_rw = rw;
- if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
+ if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
bio_endio(bio, -EIO);
else
submit_bio(rw, bio);
@@ -176,13 +176,17 @@ static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr)
struct lc_element *al_ext;
struct lc_element *tmp;
unsigned long al_flags = 0;
+ int wake;
spin_lock_irq(&mdev->al_lock);
tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT);
if (unlikely(tmp != NULL)) {
struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
+ wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags);
spin_unlock_irq(&mdev->al_lock);
+ if (wake)
+ wake_up(&mdev->al_wait);
return NULL;
}
}
@@ -258,6 +262,33 @@ void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector)
spin_unlock_irqrestore(&mdev->al_lock, flags);
}
+#if (PAGE_SHIFT + 3) < (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT)
+/* Currently BM_BLOCK_SHIFT, BM_EXT_SHIFT and AL_EXTENT_SHIFT
+ * are still coupled, or assume too much about their relation.
+ * Code below will not work if this is violated.
+ * Will be cleaned up with some followup patch.
+ */
+# error FIXME
+#endif
+
+static unsigned int al_extent_to_bm_page(unsigned int al_enr)
+{
+ return al_enr >>
+ /* bit to page */
+ ((PAGE_SHIFT + 3) -
+ /* al extent number to bit */
+ (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT));
+}
+
+static unsigned int rs_extent_to_bm_page(unsigned int rs_enr)
+{
+ return rs_enr >>
+ /* bit to page */
+ ((PAGE_SHIFT + 3) -
+ /* al extent number to bit */
+ (BM_EXT_SHIFT - BM_BLOCK_SHIFT));
+}
+
int
w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
{
@@ -285,7 +316,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
* For now, we must not write the transaction,
* if we cannot write out the bitmap of the evicted extent. */
if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE)
- drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT);
+ drbd_bm_write_page(mdev, al_extent_to_bm_page(evicted));
/* The bitmap write may have failed, causing a state change. */
if (mdev->state.disk < D_INCONSISTENT) {
@@ -334,7 +365,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+ mdev->ldev->md.al_offset + mdev->al_tr_pos;
if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE))
- drbd_chk_io_error(mdev, 1, TRUE);
+ drbd_chk_io_error(mdev, 1, true);
if (++mdev->al_tr_pos >
div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
@@ -511,225 +542,6 @@ cancel:
return 1;
}
-static void atodb_endio(struct bio *bio, int error)
-{
- struct drbd_atodb_wait *wc = bio->bi_private;
- struct drbd_conf *mdev = wc->mdev;
- struct page *page;
- int uptodate = bio_flagged(bio, BIO_UPTODATE);
-
- /* strange behavior of some lower level drivers...
- * fail the request by clearing the uptodate flag,
- * but do not return any error?! */
- if (!error && !uptodate)
- error = -EIO;
-
- drbd_chk_io_error(mdev, error, TRUE);
- if (error && wc->error == 0)
- wc->error = error;
-
- if (atomic_dec_and_test(&wc->count))
- complete(&wc->io_done);
-
- page = bio->bi_io_vec[0].bv_page;
- put_page(page);
- bio_put(bio);
- mdev->bm_writ_cnt++;
- put_ldev(mdev);
-}
-
-/* sector to word */
-#define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
-
-/* activity log to on disk bitmap -- prepare bio unless that sector
- * is already covered by previously prepared bios */
-static int atodb_prepare_unless_covered(struct drbd_conf *mdev,
- struct bio **bios,
- unsigned int enr,
- struct drbd_atodb_wait *wc) __must_hold(local)
-{
- struct bio *bio;
- struct page *page;
- sector_t on_disk_sector;
- unsigned int page_offset = PAGE_SIZE;
- int offset;
- int i = 0;
- int err = -ENOMEM;
-
- /* We always write aligned, full 4k blocks,
- * so we can ignore the logical_block_size (for now) */
- enr &= ~7U;
- on_disk_sector = enr + mdev->ldev->md.md_offset
- + mdev->ldev->md.bm_offset;
-
- D_ASSERT(!(on_disk_sector & 7U));
-
- /* Check if that enr is already covered by an already created bio.
- * Caution, bios[] is not NULL terminated,
- * but only initialized to all NULL.
- * For completely scattered activity log,
- * the last invocation iterates over all bios,
- * and finds the last NULL entry.
- */
- while ((bio = bios[i])) {
- if (bio->bi_sector == on_disk_sector)
- return 0;
- i++;
- }
- /* bios[i] == NULL, the next not yet used slot */
-
- /* GFP_KERNEL, we are not in the write-out path */
- bio = bio_alloc(GFP_KERNEL, 1);
- if (bio == NULL)
- return -ENOMEM;
-
- if (i > 0) {
- const struct bio_vec *prev_bv = bios[i-1]->bi_io_vec;
- page_offset = prev_bv->bv_offset + prev_bv->bv_len;
- page = prev_bv->bv_page;
- }
- if (page_offset == PAGE_SIZE) {
- page = alloc_page(__GFP_HIGHMEM);
- if (page == NULL)
- goto out_bio_put;
- page_offset = 0;
- } else {
- get_page(page);
- }
-
- offset = S2W(enr);
- drbd_bm_get_lel(mdev, offset,
- min_t(size_t, S2W(8), drbd_bm_words(mdev) - offset),
- kmap(page) + page_offset);
- kunmap(page);
-
- bio->bi_private = wc;
- bio->bi_end_io = atodb_endio;
- bio->bi_bdev = mdev->ldev->md_bdev;
- bio->bi_sector = on_disk_sector;
-
- if (bio_add_page(bio, page, 4096, page_offset) != 4096)
- goto out_put_page;
-
- atomic_inc(&wc->count);
- /* we already know that we may do this...
- * get_ldev_if_state(mdev,D_ATTACHING);
- * just get the extra reference, so that the local_cnt reflects
- * the number of pending IO requests DRBD at its backing device.
- */
- atomic_inc(&mdev->local_cnt);
-
- bios[i] = bio;
-
- return 0;
-
-out_put_page:
- err = -EINVAL;
- put_page(page);
-out_bio_put:
- bio_put(bio);
- return err;
-}
-
-/**
- * drbd_al_to_on_disk_bm() - * Writes bitmap parts covered by active AL extents
- * @mdev: DRBD device.
- *
- * Called when we detach (unconfigure) local storage,
- * or when we go from R_PRIMARY to R_SECONDARY role.
- */
-void drbd_al_to_on_disk_bm(struct drbd_conf *mdev)
-{
- int i, nr_elements;
- unsigned int enr;
- struct bio **bios;
- struct drbd_atodb_wait wc;
-
- ERR_IF (!get_ldev_if_state(mdev, D_ATTACHING))
- return; /* sorry, I don't have any act_log etc... */
-
- wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
-
- nr_elements = mdev->act_log->nr_elements;
-
- /* GFP_KERNEL, we are not in anyone's write-out path */
- bios = kzalloc(sizeof(struct bio *) * nr_elements, GFP_KERNEL);
- if (!bios)
- goto submit_one_by_one;
-
- atomic_set(&wc.count, 0);
- init_completion(&wc.io_done);
- wc.mdev = mdev;
- wc.error = 0;
-
- for (i = 0; i < nr_elements; i++) {
- enr = lc_element_by_index(mdev->act_log, i)->lc_number;
- if (enr == LC_FREE)
- continue;
- /* next statement also does atomic_inc wc.count and local_cnt */
- if (atodb_prepare_unless_covered(mdev, bios,
- enr/AL_EXT_PER_BM_SECT,
- &wc))
- goto free_bios_submit_one_by_one;
- }
-
- /* unnecessary optimization? */
- lc_unlock(mdev->act_log);
- wake_up(&mdev->al_wait);
-
- /* all prepared, submit them */
- for (i = 0; i < nr_elements; i++) {
- if (bios[i] == NULL)
- break;
- if (FAULT_ACTIVE(mdev, DRBD_FAULT_MD_WR)) {
- bios[i]->bi_rw = WRITE;
- bio_endio(bios[i], -EIO);
- } else {
- submit_bio(WRITE, bios[i]);
- }
- }
-
- /* always (try to) flush bitmap to stable storage */
- drbd_md_flush(mdev);
-
- /* In case we did not submit a single IO do not wait for
- * them to complete. ( Because we would wait forever here. )
- *
- * In case we had IOs and they are already complete, there
- * is not point in waiting anyways.
- * Therefore this if () ... */
- if (atomic_read(&wc.count))
- wait_for_completion(&wc.io_done);
-
- put_ldev(mdev);
-
- kfree(bios);
- return;
-
- free_bios_submit_one_by_one:
- /* free everything by calling the endio callback directly. */
- for (i = 0; i < nr_elements && bios[i]; i++)
- bio_endio(bios[i], 0);
-
- kfree(bios);
-
- submit_one_by_one:
- dev_warn(DEV, "Using the slow drbd_al_to_on_disk_bm()\n");
-
- for (i = 0; i < mdev->act_log->nr_elements; i++) {
- enr = lc_element_by_index(mdev->act_log, i)->lc_number;
- if (enr == LC_FREE)
- continue;
- /* Really slow: if we have al-extents 16..19 active,
- * sector 4 will be written four times! Synchronous! */
- drbd_bm_write_sect(mdev, enr/AL_EXT_PER_BM_SECT);
- }
-
- lc_unlock(mdev->act_log);
- wake_up(&mdev->al_wait);
- put_ldev(mdev);
-}
-
/**
* drbd_al_apply_to_bm() - Sets the bitmap to diry(1) where covered ba active AL extents
* @mdev: DRBD device.
@@ -809,7 +621,7 @@ static int w_update_odbm(struct drbd_conf *mdev, struct drbd_work *w, int unused
return 1;
}
- drbd_bm_write_sect(mdev, udw->enr);
+ drbd_bm_write_page(mdev, rs_extent_to_bm_page(udw->enr));
put_ldev(mdev);
kfree(udw);
@@ -889,7 +701,6 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
dev_warn(DEV, "Kicking resync_lru element enr=%u "
"out with rs_failed=%d\n",
ext->lce.lc_number, ext->rs_failed);
- set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags);
}
ext->rs_left = rs_left;
ext->rs_failed = success ? 0 : count;
@@ -908,7 +719,6 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
drbd_queue_work_front(&mdev->data.work, &udw->w);
} else {
dev_warn(DEV, "Could not kmalloc an udw\n");
- set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags);
}
}
} else {
@@ -919,6 +729,22 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
}
}
+void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go)
+{
+ unsigned long now = jiffies;
+ unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark];
+ int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS;
+ if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) {
+ if (mdev->rs_mark_left[mdev->rs_last_mark] != still_to_go &&
+ mdev->state.conn != C_PAUSED_SYNC_T &&
+ mdev->state.conn != C_PAUSED_SYNC_S) {
+ mdev->rs_mark_time[next] = now;
+ mdev->rs_mark_left[next] = still_to_go;
+ mdev->rs_last_mark = next;
+ }
+ }
+}
+
/* clear the bit corresponding to the piece of storage in question:
* size byte of data starting from sector. Only clear a bits of the affected
* one ore more _aligned_ BM_BLOCK_SIZE blocks.
@@ -936,7 +762,7 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
int wake_up = 0;
unsigned long flags;
- if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
+ if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n",
(unsigned long long)sector, size);
return;
@@ -969,21 +795,9 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
*/
count = drbd_bm_clear_bits(mdev, sbnr, ebnr);
if (count && get_ldev(mdev)) {
- unsigned long now = jiffies;
- unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark];
- int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS;
- if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) {
- unsigned long tw = drbd_bm_total_weight(mdev);
- if (mdev->rs_mark_left[mdev->rs_last_mark] != tw &&
- mdev->state.conn != C_PAUSED_SYNC_T &&
- mdev->state.conn != C_PAUSED_SYNC_S) {
- mdev->rs_mark_time[next] = now;
- mdev->rs_mark_left[next] = tw;
- mdev->rs_last_mark = next;
- }
- }
+ drbd_advance_rs_marks(mdev, drbd_bm_total_weight(mdev));
spin_lock_irqsave(&mdev->al_lock, flags);
- drbd_try_clear_on_disk_bm(mdev, sector, count, TRUE);
+ drbd_try_clear_on_disk_bm(mdev, sector, count, true);
spin_unlock_irqrestore(&mdev->al_lock, flags);
/* just wake_up unconditional now, various lc_chaged(),
@@ -998,27 +812,27 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
/*
* this is intended to set one request worth of data out of sync.
* affects at least 1 bit,
- * and at most 1+DRBD_MAX_SEGMENT_SIZE/BM_BLOCK_SIZE bits.
+ * and at most 1+DRBD_MAX_BIO_SIZE/BM_BLOCK_SIZE bits.
*
* called by tl_clear and drbd_send_dblock (==drbd_make_request).
* so this can be _any_ process.
*/
-void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
+int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
const char *file, const unsigned int line)
{
unsigned long sbnr, ebnr, lbnr, flags;
sector_t esector, nr_sectors;
- unsigned int enr, count;
+ unsigned int enr, count = 0;
struct lc_element *e;
- if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
+ if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
dev_err(DEV, "sector: %llus, size: %d\n",
(unsigned long long)sector, size);
- return;
+ return 0;
}
if (!get_ldev(mdev))
- return; /* no disk, no metadata, no bitmap to set bits in */
+ return 0; /* no disk, no metadata, no bitmap to set bits in */
nr_sectors = drbd_get_capacity(mdev->this_bdev);
esector = sector + (size >> 9) - 1;
@@ -1048,6 +862,8 @@ void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
out:
put_ldev(mdev);
+
+ return count;
}
static
@@ -1128,7 +944,10 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
unsigned int enr = BM_SECT_TO_EXT(sector);
struct bm_extent *bm_ext;
int i, sig;
+ int sa = 200; /* Step aside 200 times, then grab the extent and let app-IO wait.
+ 200 times -> 20 seconds. */
+retry:
sig = wait_event_interruptible(mdev->al_wait,
(bm_ext = _bme_get(mdev, enr)));
if (sig)
@@ -1139,16 +958,25 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
sig = wait_event_interruptible(mdev->al_wait,
- !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i));
- if (sig) {
+ !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i) ||
+ test_bit(BME_PRIORITY, &bm_ext->flags));
+
+ if (sig || (test_bit(BME_PRIORITY, &bm_ext->flags) && sa)) {
spin_lock_irq(&mdev->al_lock);
if (lc_put(mdev->resync, &bm_ext->lce) == 0) {
- clear_bit(BME_NO_WRITES, &bm_ext->flags);
+ bm_ext->flags = 0; /* clears BME_NO_WRITES and eventually BME_PRIORITY */
mdev->resync_locked--;
wake_up(&mdev->al_wait);
}
spin_unlock_irq(&mdev->al_lock);
- return -EINTR;
+ if (sig)
+ return -EINTR;
+ if (schedule_timeout_interruptible(HZ/10))
+ return -EINTR;
+ if (sa && --sa == 0)
+ dev_warn(DEV,"drbd_rs_begin_io() stepped aside for 20sec."
+ "Resync stalled?\n");
+ goto retry;
}
}
set_bit(BME_LOCKED, &bm_ext->flags);
@@ -1291,8 +1119,7 @@ void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector)
}
if (lc_put(mdev->resync, &bm_ext->lce) == 0) {
- clear_bit(BME_LOCKED, &bm_ext->flags);
- clear_bit(BME_NO_WRITES, &bm_ext->flags);
+ bm_ext->flags = 0; /* clear BME_LOCKED, BME_NO_WRITES and BME_PRIORITY */
mdev->resync_locked--;
wake_up(&mdev->al_wait);
}
@@ -1383,7 +1210,7 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
sector_t esector, nr_sectors;
int wake_up = 0;
- if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
+ if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n",
(unsigned long long)sector, size);
return;
@@ -1420,7 +1247,7 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
mdev->rs_failed += count;
if (get_ldev(mdev)) {
- drbd_try_clear_on_disk_bm(mdev, sector, count, FALSE);
+ drbd_try_clear_on_disk_bm(mdev, sector, count, false);
put_ldev(mdev);
}
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 0645ca829a94..f0ae63d2df65 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -28,18 +28,58 @@
#include <linux/drbd.h>
#include <linux/slab.h>
#include <asm/kmap_types.h>
+
+#include <asm-generic/bitops/le.h>
+
#include "drbd_int.h"
+
/* OPAQUE outside this file!
* interface defined in drbd_int.h
* convention:
* function name drbd_bm_... => used elsewhere, "public".
* function name bm_... => internal to implementation, "private".
+ */
+
+
+/*
+ * LIMITATIONS:
+ * We want to support >= peta byte of backend storage, while for now still using
+ * a granularity of one bit per 4KiB of storage.
+ * 1 << 50 bytes backend storage (1 PiB)
+ * 1 << (50 - 12) bits needed
+ * 38 --> we need u64 to index and count bits
+ * 1 << (38 - 3) bitmap bytes needed
+ * 35 --> we still need u64 to index and count bytes
+ * (that's 32 GiB of bitmap for 1 PiB storage)
+ * 1 << (35 - 2) 32bit longs needed
+ * 33 --> we'd even need u64 to index and count 32bit long words.
+ * 1 << (35 - 3) 64bit longs needed
+ * 32 --> we could get away with a 32bit unsigned int to index and count
+ * 64bit long words, but I rather stay with unsigned long for now.
+ * We probably should neither count nor point to bytes or long words
+ * directly, but either by bitnumber, or by page index and offset.
+ * 1 << (35 - 12)
+ * 22 --> we need that much 4KiB pages of bitmap.
+ * 1 << (22 + 3) --> on a 64bit arch,
+ * we need 32 MiB to store the array of page pointers.
+ *
+ * Because I'm lazy, and because the resulting patch was too large, too ugly
+ * and still incomplete, on 32bit we still "only" support 16 TiB (minus some),
+ * (1 << 32) bits * 4k storage.
+ *
- * Note that since find_first_bit returns int, at the current granularity of
- * the bitmap (4KB per byte), this implementation "only" supports up to
- * 1<<(32+12) == 16 TB...
+ * bitmap storage and IO:
+ * Bitmap is stored little endian on disk, and is kept little endian in
+ * core memory. Currently we still hold the full bitmap in core as long
+ * as we are "attached" to a local disk, which at 32 GiB for 1PiB storage
+ * seems excessive.
+ *
+ * We plan to reduce the amount of in-core bitmap pages by pageing them in
+ * and out against their on-disk location as necessary, but need to make
+ * sure we don't cause too much meta data IO, and must not deadlock in
+ * tight memory situations. This needs some more work.
*/
/*
@@ -55,13 +95,9 @@
struct drbd_bitmap {
struct page **bm_pages;
spinlock_t bm_lock;
- /* WARNING unsigned long bm_*:
- * 32bit number of bit offset is just enough for 512 MB bitmap.
- * it will blow up if we make the bitmap bigger...
- * not that it makes much sense to have a bitmap that large,
- * rather change the granularity to 16k or 64k or something.
- * (that implies other problems, however...)
- */
+
+ /* see LIMITATIONS: above */
+
unsigned long bm_set; /* nr of set bits; THINK maybe atomic_t? */
unsigned long bm_bits;
size_t bm_words;
@@ -69,29 +105,18 @@ struct drbd_bitmap {
sector_t bm_dev_capacity;
struct mutex bm_change; /* serializes resize operations */
- atomic_t bm_async_io;
- wait_queue_head_t bm_io_wait;
+ wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */
- unsigned long bm_flags;
+ enum bm_flag bm_flags;
/* debugging aid, in case we are still racy somewhere */
char *bm_why;
struct task_struct *bm_task;
};
-/* definition of bits in bm_flags */
-#define BM_LOCKED 0
-#define BM_MD_IO_ERROR 1
-#define BM_P_VMALLOCED 2
-
static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
unsigned long e, int val, const enum km_type km);
-static int bm_is_locked(struct drbd_bitmap *b)
-{
- return test_bit(BM_LOCKED, &b->bm_flags);
-}
-
#define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
{
@@ -108,7 +133,7 @@ static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
b->bm_task == mdev->worker.task ? "worker" : "?");
}
-void drbd_bm_lock(struct drbd_conf *mdev, char *why)
+void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags)
{
struct drbd_bitmap *b = mdev->bitmap;
int trylock_failed;
@@ -131,8 +156,9 @@ void drbd_bm_lock(struct drbd_conf *mdev, char *why)
b->bm_task == mdev->worker.task ? "worker" : "?");
mutex_lock(&b->bm_change);
}
- if (__test_and_set_bit(BM_LOCKED, &b->bm_flags))
+ if (BM_LOCKED_MASK & b->bm_flags)
dev_err(DEV, "FIXME bitmap already locked in bm_lock\n");
+ b->bm_flags |= flags & BM_LOCKED_MASK;
b->bm_why = why;
b->bm_task = current;
@@ -146,31 +172,137 @@ void drbd_bm_unlock(struct drbd_conf *mdev)
return;
}
- if (!__test_and_clear_bit(BM_LOCKED, &mdev->bitmap->bm_flags))
+ if (!(BM_LOCKED_MASK & mdev->bitmap->bm_flags))
dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n");
+ b->bm_flags &= ~BM_LOCKED_MASK;
b->bm_why = NULL;
b->bm_task = NULL;
mutex_unlock(&b->bm_change);
}
-/* word offset to long pointer */
-static unsigned long *__bm_map_paddr(struct drbd_bitmap *b, unsigned long offset, const enum km_type km)
+/* we store some "meta" info about our pages in page->private */
+/* at a granularity of 4k storage per bitmap bit:
+ * one peta byte storage: 1<<50 byte, 1<<38 * 4k storage blocks
+ * 1<<38 bits,
+ * 1<<23 4k bitmap pages.
+ * Use 24 bits as page index, covers 2 peta byte storage
+ * at a granularity of 4k per bit.
+ * Used to report the failed page idx on io error from the endio handlers.
+ */
+#define BM_PAGE_IDX_MASK ((1UL<<24)-1)
+/* this page is currently read in, or written back */
+#define BM_PAGE_IO_LOCK 31
+/* if there has been an IO error for this page */
+#define BM_PAGE_IO_ERROR 30
+/* this is to be able to intelligently skip disk IO,
+ * set if bits have been set since last IO. */
+#define BM_PAGE_NEED_WRITEOUT 29
+/* to mark for lazy writeout once syncer cleared all clearable bits,
+ * we if bits have been cleared since last IO. */
+#define BM_PAGE_LAZY_WRITEOUT 28
+
+/* store_page_idx uses non-atomic assingment. It is only used directly after
+ * allocating the page. All other bm_set_page_* and bm_clear_page_* need to
+ * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap
+ * changes) may happen from various contexts, and wait_on_bit/wake_up_bit
+ * requires it all to be atomic as well. */
+static void bm_store_page_idx(struct page *page, unsigned long idx)
{
- struct page *page;
- unsigned long page_nr;
+ BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK));
+ page_private(page) |= idx;
+}
+
+static unsigned long bm_page_to_idx(struct page *page)
+{
+ return page_private(page) & BM_PAGE_IDX_MASK;
+}
+
+/* As is very unlikely that the same page is under IO from more than one
+ * context, we can get away with a bit per page and one wait queue per bitmap.
+ */
+static void bm_page_lock_io(struct drbd_conf *mdev, int page_nr)
+{
+ struct drbd_bitmap *b = mdev->bitmap;
+ void *addr = &page_private(b->bm_pages[page_nr]);
+ wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr));
+}
+
+static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr)
+{
+ struct drbd_bitmap *b = mdev->bitmap;
+ void *addr = &page_private(b->bm_pages[page_nr]);
+ clear_bit(BM_PAGE_IO_LOCK, addr);
+ smp_mb__after_clear_bit();
+ wake_up(&mdev->bitmap->bm_io_wait);
+}
+
+/* set _before_ submit_io, so it may be reset due to being changed
+ * while this page is in flight... will get submitted later again */
+static void bm_set_page_unchanged(struct page *page)
+{
+ /* use cmpxchg? */
+ clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
+ clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
+}
+static void bm_set_page_need_writeout(struct page *page)
+{
+ set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
+}
+
+static int bm_test_page_unchanged(struct page *page)
+{
+ volatile const unsigned long *addr = &page_private(page);
+ return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0;
+}
+
+static void bm_set_page_io_err(struct page *page)
+{
+ set_bit(BM_PAGE_IO_ERROR, &page_private(page));
+}
+
+static void bm_clear_page_io_err(struct page *page)
+{
+ clear_bit(BM_PAGE_IO_ERROR, &page_private(page));
+}
+
+static void bm_set_page_lazy_writeout(struct page *page)
+{
+ set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
+}
+
+static int bm_test_page_lazy_writeout(struct page *page)
+{
+ return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
+}
+
+/* on a 32bit box, this would allow for exactly (2<<38) bits. */
+static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr)
+{
/* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */
- page_nr = offset >> (PAGE_SHIFT - LN2_BPL + 3);
+ unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3);
BUG_ON(page_nr >= b->bm_number_of_pages);
- page = b->bm_pages[page_nr];
+ return page_nr;
+}
+static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr)
+{
+ /* page_nr = (bitnr/8) >> PAGE_SHIFT; */
+ unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3);
+ BUG_ON(page_nr >= b->bm_number_of_pages);
+ return page_nr;
+}
+
+static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx, const enum km_type km)
+{
+ struct page *page = b->bm_pages[idx];
return (unsigned long *) kmap_atomic(page, km);
}
-static unsigned long * bm_map_paddr(struct drbd_bitmap *b, unsigned long offset)
+static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
{
- return __bm_map_paddr(b, offset, KM_IRQ1);
+ return __bm_map_pidx(b, idx, KM_IRQ1);
}
static void __bm_unmap(unsigned long *p_addr, const enum km_type km)
@@ -202,6 +334,7 @@ static void bm_unmap(unsigned long *p_addr)
* to be able to report device specific.
*/
+
static void bm_free_pages(struct page **pages, unsigned long number)
{
unsigned long i;
@@ -269,6 +402,9 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
bm_vk_free(new_pages, vmalloced);
return NULL;
}
+ /* we want to know which page it is
+ * from the endio handlers */
+ bm_store_page_idx(page, i);
new_pages[i] = page;
}
} else {
@@ -280,9 +416,9 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
}
if (vmalloced)
- set_bit(BM_P_VMALLOCED, &b->bm_flags);
+ b->bm_flags |= BM_P_VMALLOCED;
else
- clear_bit(BM_P_VMALLOCED, &b->bm_flags);
+ b->bm_flags &= ~BM_P_VMALLOCED;
return new_pages;
}
@@ -319,7 +455,7 @@ void drbd_bm_cleanup(struct drbd_conf *mdev)
{
ERR_IF (!mdev->bitmap) return;
bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages);
- bm_vk_free(mdev->bitmap->bm_pages, test_bit(BM_P_VMALLOCED, &mdev->bitmap->bm_flags));
+ bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags));
kfree(mdev->bitmap);
mdev->bitmap = NULL;
}
@@ -329,22 +465,39 @@ void drbd_bm_cleanup(struct drbd_conf *mdev)
* this masks out the remaining bits.
* Returns the number of bits cleared.
*/
+#define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3))
+#define BITS_PER_PAGE_MASK (BITS_PER_PAGE - 1)
+#define BITS_PER_LONG_MASK (BITS_PER_LONG - 1)
static int bm_clear_surplus(struct drbd_bitmap *b)
{
- const unsigned long mask = (1UL << (b->bm_bits & (BITS_PER_LONG-1))) - 1;
- size_t w = b->bm_bits >> LN2_BPL;
- int cleared = 0;
+ unsigned long mask;
unsigned long *p_addr, *bm;
+ int tmp;
+ int cleared = 0;
- p_addr = bm_map_paddr(b, w);
- bm = p_addr + MLPP(w);
- if (w < b->bm_words) {
+ /* number of bits modulo bits per page */
+ tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
+ /* mask the used bits of the word containing the last bit */
+ mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
+ /* bitmap is always stored little endian,
+ * on disk and in core memory alike */
+ mask = cpu_to_lel(mask);
+
+ p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
+ bm = p_addr + (tmp/BITS_PER_LONG);
+ if (mask) {
+ /* If mask != 0, we are not exactly aligned, so bm now points
+ * to the long containing the last bit.
+ * If mask == 0, bm already points to the word immediately
+ * after the last (long word aligned) bit. */
cleared = hweight_long(*bm & ~mask);
*bm &= mask;
- w++; bm++;
+ bm++;
}
- if (w < b->bm_words) {
+ if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
+ /* on a 32bit arch, we may need to zero out
+ * a padding long to align with a 64bit remote */
cleared += hweight_long(*bm);
*bm = 0;
}
@@ -354,66 +507,75 @@ static int bm_clear_surplus(struct drbd_bitmap *b)
static void bm_set_surplus(struct drbd_bitmap *b)
{
- const unsigned long mask = (1UL << (b->bm_bits & (BITS_PER_LONG-1))) - 1;
- size_t w = b->bm_bits >> LN2_BPL;
+ unsigned long mask;
unsigned long *p_addr, *bm;
-
- p_addr = bm_map_paddr(b, w);
- bm = p_addr + MLPP(w);
- if (w < b->bm_words) {
+ int tmp;
+
+ /* number of bits modulo bits per page */
+ tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
+ /* mask the used bits of the word containing the last bit */
+ mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
+ /* bitmap is always stored little endian,
+ * on disk and in core memory alike */
+ mask = cpu_to_lel(mask);
+
+ p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
+ bm = p_addr + (tmp/BITS_PER_LONG);
+ if (mask) {
+ /* If mask != 0, we are not exactly aligned, so bm now points
+ * to the long containing the last bit.
+ * If mask == 0, bm already points to the word immediately
+ * after the last (long word aligned) bit. */
*bm |= ~mask;
- bm++; w++;
+ bm++;
}
- if (w < b->bm_words) {
- *bm = ~(0UL);
+ if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
+ /* on a 32bit arch, we may need to zero out
+ * a padding long to align with a 64bit remote */
+ *bm = ~0UL;
}
bm_unmap(p_addr);
}
-static unsigned long __bm_count_bits(struct drbd_bitmap *b, const int swap_endian)
+/* you better not modify the bitmap while this is running,
+ * or its results will be stale */
+static unsigned long bm_count_bits(struct drbd_bitmap *b)
{
- unsigned long *p_addr, *bm, offset = 0;
+ unsigned long *p_addr;
unsigned long bits = 0;
- unsigned long i, do_now;
-
- while (offset < b->bm_words) {
- i = do_now = min_t(size_t, b->bm_words-offset, LWPP);
- p_addr = __bm_map_paddr(b, offset, KM_USER0);
- bm = p_addr + MLPP(offset);
- while (i--) {
-#ifndef __LITTLE_ENDIAN
- if (swap_endian)
- *bm = lel_to_cpu(*bm);
-#endif
- bits += hweight_long(*bm++);
- }
+ unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1;
+ int idx, i, last_word;
+
+ /* all but last page */
+ for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) {
+ p_addr = __bm_map_pidx(b, idx, KM_USER0);
+ for (i = 0; i < LWPP; i++)
+ bits += hweight_long(p_addr[i]);
__bm_unmap(p_addr, KM_USER0);
- offset += do_now;
cond_resched();
}
-
+ /* last (or only) page */
+ last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL;
+ p_addr = __bm_map_pidx(b, idx, KM_USER0);
+ for (i = 0; i < last_word; i++)
+ bits += hweight_long(p_addr[i]);
+ p_addr[last_word] &= cpu_to_lel(mask);
+ bits += hweight_long(p_addr[last_word]);
+ /* 32bit arch, may have an unused padding long */
+ if (BITS_PER_LONG == 32 && (last_word & 1) == 0)
+ p_addr[last_word+1] = 0;
+ __bm_unmap(p_addr, KM_USER0);
return bits;
}
-static unsigned long bm_count_bits(struct drbd_bitmap *b)
-{
- return __bm_count_bits(b, 0);
-}
-
-static unsigned long bm_count_bits_swap_endian(struct drbd_bitmap *b)
-{
- return __bm_count_bits(b, 1);
-}
-
/* offset and len in long words.*/
static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
{
unsigned long *p_addr, *bm;
+ unsigned int idx;
size_t do_now, end;
-#define BM_SECTORS_PER_BIT (BM_BLOCK_SIZE/512)
-
end = offset + len;
if (end > b->bm_words) {
@@ -423,15 +585,16 @@ static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
while (offset < end) {
do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset;
- p_addr = bm_map_paddr(b, offset);
+ idx = bm_word_to_page_idx(b, offset);
+ p_addr = bm_map_pidx(b, idx);
bm = p_addr + MLPP(offset);
if (bm+do_now > p_addr + LWPP) {
printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n",
p_addr, bm, (int)do_now);
- break; /* breaks to after catch_oob_access_end() only! */
- }
- memset(bm, c, do_now * sizeof(long));
+ } else
+ memset(bm, c, do_now * sizeof(long));
bm_unmap(p_addr);
+ bm_set_page_need_writeout(b->bm_pages[idx]);
offset += do_now;
}
}
@@ -447,7 +610,7 @@ static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
{
struct drbd_bitmap *b = mdev->bitmap;
- unsigned long bits, words, owords, obits, *p_addr, *bm;
+ unsigned long bits, words, owords, obits;
unsigned long want, have, onpages; /* number of pages */
struct page **npages, **opages = NULL;
int err = 0, growing;
@@ -455,7 +618,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
ERR_IF(!b) return -ENOMEM;
- drbd_bm_lock(mdev, "resize");
+ drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK);
dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n",
(unsigned long long)capacity);
@@ -463,7 +626,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
if (capacity == b->bm_dev_capacity)
goto out;
- opages_vmalloced = test_bit(BM_P_VMALLOCED, &b->bm_flags);
+ opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags);
if (capacity == 0) {
spin_lock_irq(&b->bm_lock);
@@ -491,18 +654,23 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
words = ALIGN(bits, 64) >> LN2_BPL;
if (get_ldev(mdev)) {
- D_ASSERT((u64)bits <= (((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12));
+ u64 bits_on_disk = ((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12;
put_ldev(mdev);
+ if (bits > bits_on_disk) {
+ dev_info(DEV, "bits = %lu\n", bits);
+ dev_info(DEV, "bits_on_disk = %llu\n", bits_on_disk);
+ err = -ENOSPC;
+ goto out;
+ }
}
- /* one extra long to catch off by one errors */
- want = ALIGN((words+1)*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
+ want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
have = b->bm_number_of_pages;
if (want == have) {
D_ASSERT(b->bm_pages != NULL);
npages = b->bm_pages;
} else {
- if (FAULT_ACTIVE(mdev, DRBD_FAULT_BM_ALLOC))
+ if (drbd_insert_fault(mdev, DRBD_FAULT_BM_ALLOC))
npages = NULL;
else
npages = bm_realloc_pages(b, want);
@@ -542,11 +710,6 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
bm_free_pages(opages + want, have - want);
}
- p_addr = bm_map_paddr(b, words);
- bm = p_addr + MLPP(words);
- *bm = DRBD_MAGIC;
- bm_unmap(p_addr);
-
(void)bm_clear_surplus(b);
spin_unlock_irq(&b->bm_lock);
@@ -554,7 +717,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
bm_vk_free(opages, opages_vmalloced);
if (!growing)
b->bm_set = bm_count_bits(b);
- dev_info(DEV, "resync bitmap: bits=%lu words=%lu\n", bits, words);
+ dev_info(DEV, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
out:
drbd_bm_unlock(mdev);
@@ -624,6 +787,7 @@ void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
struct drbd_bitmap *b = mdev->bitmap;
unsigned long *p_addr, *bm;
unsigned long word, bits;
+ unsigned int idx;
size_t end, do_now;
end = offset + number;
@@ -638,16 +802,18 @@ void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
spin_lock_irq(&b->bm_lock);
while (offset < end) {
do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
- p_addr = bm_map_paddr(b, offset);
+ idx = bm_word_to_page_idx(b, offset);
+ p_addr = bm_map_pidx(b, idx);
bm = p_addr + MLPP(offset);
offset += do_now;
while (do_now--) {
bits = hweight_long(*bm);
- word = *bm | lel_to_cpu(*buffer++);
+ word = *bm | *buffer++;
*bm++ = word;
b->bm_set += hweight_long(word) - bits;
}
bm_unmap(p_addr);
+ bm_set_page_need_writeout(b->bm_pages[idx]);
}
/* with 32bit <-> 64bit cross-platform connect
* this is only correct for current usage,
@@ -656,7 +822,6 @@ void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
*/
if (end == b->bm_words)
b->bm_set -= bm_clear_surplus(b);
-
spin_unlock_irq(&b->bm_lock);
}
@@ -686,11 +851,11 @@ void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
else {
while (offset < end) {
do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
- p_addr = bm_map_paddr(b, offset);
+ p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset));
bm = p_addr + MLPP(offset);
offset += do_now;
while (do_now--)
- *buffer++ = cpu_to_lel(*bm++);
+ *buffer++ = *bm++;
bm_unmap(p_addr);
}
}
@@ -724,9 +889,22 @@ void drbd_bm_clear_all(struct drbd_conf *mdev)
spin_unlock_irq(&b->bm_lock);
}
+struct bm_aio_ctx {
+ struct drbd_conf *mdev;
+ atomic_t in_flight;
+ struct completion done;
+ unsigned flags;
+#define BM_AIO_COPY_PAGES 1
+ int error;
+};
+
+/* bv_page may be a copy, or may be the original */
static void bm_async_io_complete(struct bio *bio, int error)
{
- struct drbd_bitmap *b = bio->bi_private;
+ struct bm_aio_ctx *ctx = bio->bi_private;
+ struct drbd_conf *mdev = ctx->mdev;
+ struct drbd_bitmap *b = mdev->bitmap;
+ unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
int uptodate = bio_flagged(bio, BIO_UPTODATE);
@@ -737,38 +915,83 @@ static void bm_async_io_complete(struct bio *bio, int error)
if (!error && !uptodate)
error = -EIO;
+ if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
+ !bm_test_page_unchanged(b->bm_pages[idx]))
+ dev_warn(DEV, "bitmap page idx %u changed during IO!\n", idx);
+
if (error) {
- /* doh. what now?
- * for now, set all bits, and flag MD_IO_ERROR */
- __set_bit(BM_MD_IO_ERROR, &b->bm_flags);
+ /* ctx error will hold the completed-last non-zero error code,
+ * in case error codes differ. */
+ ctx->error = error;
+ bm_set_page_io_err(b->bm_pages[idx]);
+ /* Not identical to on disk version of it.
+ * Is BM_PAGE_IO_ERROR enough? */
+ if (__ratelimit(&drbd_ratelimit_state))
+ dev_err(DEV, "IO ERROR %d on bitmap page idx %u\n",
+ error, idx);
+ } else {
+ bm_clear_page_io_err(b->bm_pages[idx]);
+ dynamic_dev_dbg(DEV, "bitmap page idx %u completed\n", idx);
}
- if (atomic_dec_and_test(&b->bm_async_io))
- wake_up(&b->bm_io_wait);
+
+ bm_page_unlock_io(mdev, idx);
+
+ /* FIXME give back to page pool */
+ if (ctx->flags & BM_AIO_COPY_PAGES)
+ put_page(bio->bi_io_vec[0].bv_page);
bio_put(bio);
+
+ if (atomic_dec_and_test(&ctx->in_flight))
+ complete(&ctx->done);
}
-static void bm_page_io_async(struct drbd_conf *mdev, struct drbd_bitmap *b, int page_nr, int rw) __must_hold(local)
+static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
{
/* we are process context. we always get a bio */
struct bio *bio = bio_alloc(GFP_KERNEL, 1);
+ struct drbd_conf *mdev = ctx->mdev;
+ struct drbd_bitmap *b = mdev->bitmap;
+ struct page *page;
unsigned int len;
+
sector_t on_disk_sector =
mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset;
on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
/* this might happen with very small
- * flexible external meta data device */
+ * flexible external meta data device,
+ * or with PAGE_SIZE > 4k */
len = min_t(unsigned int, PAGE_SIZE,
(drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9);
+ /* serialize IO on this page */
+ bm_page_lock_io(mdev, page_nr);
+ /* before memcpy and submit,
+ * so it can be redirtied any time */
+ bm_set_page_unchanged(b->bm_pages[page_nr]);
+
+ if (ctx->flags & BM_AIO_COPY_PAGES) {
+ /* FIXME alloc_page is good enough for now, but actually needs
+ * to use pre-allocated page pool */
+ void *src, *dest;
+ page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT);
+ dest = kmap_atomic(page, KM_USER0);
+ src = kmap_atomic(b->bm_pages[page_nr], KM_USER1);
+ memcpy(dest, src, PAGE_SIZE);
+ kunmap_atomic(src, KM_USER1);
+ kunmap_atomic(dest, KM_USER0);
+ bm_store_page_idx(page, page_nr);
+ } else
+ page = b->bm_pages[page_nr];
+
bio->bi_bdev = mdev->ldev->md_bdev;
bio->bi_sector = on_disk_sector;
- bio_add_page(bio, b->bm_pages[page_nr], len, 0);
- bio->bi_private = b;
+ bio_add_page(bio, page, len, 0);
+ bio->bi_private = ctx;
bio->bi_end_io = bm_async_io_complete;
- if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
+ if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
bio->bi_rw |= rw;
bio_endio(bio, -EIO);
} else {
@@ -776,87 +999,84 @@ static void bm_page_io_async(struct drbd_conf *mdev, struct drbd_bitmap *b, int
}
}
-# if defined(__LITTLE_ENDIAN)
- /* nothing to do, on disk == in memory */
-# define bm_cpu_to_lel(x) ((void)0)
-# else
-static void bm_cpu_to_lel(struct drbd_bitmap *b)
-{
- /* need to cpu_to_lel all the pages ...
- * this may be optimized by using
- * cpu_to_lel(-1) == -1 and cpu_to_lel(0) == 0;
- * the following is still not optimal, but better than nothing */
- unsigned int i;
- unsigned long *p_addr, *bm;
- if (b->bm_set == 0) {
- /* no page at all; avoid swap if all is 0 */
- i = b->bm_number_of_pages;
- } else if (b->bm_set == b->bm_bits) {
- /* only the last page */
- i = b->bm_number_of_pages - 1;
- } else {
- /* all pages */
- i = 0;
- }
- for (; i < b->bm_number_of_pages; i++) {
- p_addr = kmap_atomic(b->bm_pages[i], KM_USER0);
- for (bm = p_addr; bm < p_addr + PAGE_SIZE/sizeof(long); bm++)
- *bm = cpu_to_lel(*bm);
- kunmap_atomic(p_addr, KM_USER0);
- }
-}
-# endif
-/* lel_to_cpu == cpu_to_lel */
-# define bm_lel_to_cpu(x) bm_cpu_to_lel(x)
-
/*
* bm_rw: read/write the whole bitmap from/to its on disk location.
*/
-static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local)
+static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_idx) __must_hold(local)
{
+ struct bm_aio_ctx ctx = {
+ .mdev = mdev,
+ .in_flight = ATOMIC_INIT(1),
+ .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
+ .flags = lazy_writeout_upper_idx ? BM_AIO_COPY_PAGES : 0,
+ };
struct drbd_bitmap *b = mdev->bitmap;
- /* sector_t sector; */
- int bm_words, num_pages, i;
+ int num_pages, i, count = 0;
unsigned long now;
char ppb[10];
int err = 0;
- WARN_ON(!bm_is_locked(b));
-
- /* no spinlock here, the drbd_bm_lock should be enough! */
-
- bm_words = drbd_bm_words(mdev);
- num_pages = (bm_words*sizeof(long) + PAGE_SIZE-1) >> PAGE_SHIFT;
+ /*
+ * We are protected against bitmap disappearing/resizing by holding an
+ * ldev reference (caller must have called get_ldev()).
+ * For read/write, we are protected against changes to the bitmap by
+ * the bitmap lock (see drbd_bitmap_io).
+ * For lazy writeout, we don't care for ongoing changes to the bitmap,
+ * as we submit copies of pages anyways.
+ */
+ if (!ctx.flags)
+ WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
- /* on disk bitmap is little endian */
- if (rw == WRITE)
- bm_cpu_to_lel(b);
+ num_pages = b->bm_number_of_pages;
now = jiffies;
- atomic_set(&b->bm_async_io, num_pages);
- __clear_bit(BM_MD_IO_ERROR, &b->bm_flags);
/* let the layers below us try to merge these bios... */
- for (i = 0; i < num_pages; i++)
- bm_page_io_async(mdev, b, i, rw);
+ for (i = 0; i < num_pages; i++) {
+ /* ignore completely unchanged pages */
+ if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
+ break;
+ if (rw & WRITE) {
+ if (bm_test_page_unchanged(b->bm_pages[i])) {
+ dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i);
+ continue;
+ }
+ /* during lazy writeout,
+ * ignore those pages not marked for lazy writeout. */
+ if (lazy_writeout_upper_idx &&
+ !bm_test_page_lazy_writeout(b->bm_pages[i])) {
+ dynamic_dev_dbg(DEV, "skipped bm lazy write for idx %u\n", i);
+ continue;
+ }
+ }
+ atomic_inc(&ctx.in_flight);
+ bm_page_io_async(&ctx, i, rw);
+ ++count;
+ cond_resched();
+ }
- wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0);
+ /*
+ * We initialize ctx.in_flight to one to make sure bm_async_io_complete
+ * will not complete() early, and decrement / test it here. If there
+ * are still some bios in flight, we need to wait for them here.
+ */
+ if (!atomic_dec_and_test(&ctx.in_flight))
+ wait_for_completion(&ctx.done);
+ dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
+ rw == WRITE ? "WRITE" : "READ",
+ count, jiffies - now);
- if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) {
+ if (ctx.error) {
dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
- drbd_chk_io_error(mdev, 1, TRUE);
- err = -EIO;
+ drbd_chk_io_error(mdev, 1, true);
+ err = -EIO; /* ctx.error ? */
}
now = jiffies;
if (rw == WRITE) {
- /* swap back endianness */
- bm_lel_to_cpu(b);
- /* flush bitmap to stable storage */
drbd_md_flush(mdev);
} else /* rw == READ */ {
- /* just read, if necessary adjust endianness */
- b->bm_set = bm_count_bits_swap_endian(b);
+ b->bm_set = bm_count_bits(b);
dev_info(DEV, "recounting of set bits took additional %lu jiffies\n",
jiffies - now);
}
@@ -874,112 +1094,128 @@ static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local)
*/
int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
{
- return bm_rw(mdev, READ);
+ return bm_rw(mdev, READ, 0);
}
/**
* drbd_bm_write() - Write the whole bitmap to its on disk location.
* @mdev: DRBD device.
+ *
+ * Will only write pages that have changed since last IO.
*/
int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
{
- return bm_rw(mdev, WRITE);
+ return bm_rw(mdev, WRITE, 0);
}
/**
- * drbd_bm_write_sect: Writes a 512 (MD_SECTOR_SIZE) byte piece of the bitmap
+ * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed.
* @mdev: DRBD device.
- * @enr: Extent number in the resync lru (happens to be sector offset)
- *
- * The BM_EXT_SIZE is on purpose exactly the amount of the bitmap covered
- * by a single sector write. Therefore enr == sector offset from the
- * start of the bitmap.
+ * @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages
*/
-int drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(local)
+int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local)
{
- sector_t on_disk_sector = enr + mdev->ldev->md.md_offset
- + mdev->ldev->md.bm_offset;
- int bm_words, num_words, offset;
- int err = 0;
+ return bm_rw(mdev, WRITE, upper_idx);
+}
+
- mutex_lock(&mdev->md_io_mutex);
- bm_words = drbd_bm_words(mdev);
- offset = S2W(enr); /* word offset into bitmap */
- num_words = min(S2W(1), bm_words - offset);
- if (num_words < S2W(1))
- memset(page_address(mdev->md_io_page), 0, MD_SECTOR_SIZE);
- drbd_bm_get_lel(mdev, offset, num_words,
- page_address(mdev->md_io_page));
- if (!drbd_md_sync_page_io(mdev, mdev->ldev, on_disk_sector, WRITE)) {
- int i;
- err = -EIO;
- dev_err(DEV, "IO ERROR writing bitmap sector %lu "
- "(meta-disk sector %llus)\n",
- enr, (unsigned long long)on_disk_sector);
- drbd_chk_io_error(mdev, 1, TRUE);
- for (i = 0; i < AL_EXT_PER_BM_SECT; i++)
- drbd_bm_ALe_set_all(mdev, enr*AL_EXT_PER_BM_SECT+i);
+/**
+ * drbd_bm_write_page: Writes a PAGE_SIZE aligned piece of bitmap
+ * @mdev: DRBD device.
+ * @idx: bitmap page index
+ *
+ * We don't want to special case on logical_block_size of the backend device,
+ * so we submit PAGE_SIZE aligned pieces.
+ * Note that on "most" systems, PAGE_SIZE is 4k.
+ *
+ * In case this becomes an issue on systems with larger PAGE_SIZE,
+ * we may want to change this again to write 4k aligned 4k pieces.
+ */
+int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local)
+{
+ struct bm_aio_ctx ctx = {
+ .mdev = mdev,
+ .in_flight = ATOMIC_INIT(1),
+ .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
+ .flags = BM_AIO_COPY_PAGES,
+ };
+
+ if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
+ dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
+ return 0;
}
+
+ bm_page_io_async(&ctx, idx, WRITE_SYNC);
+ wait_for_completion(&ctx.done);
+
+ if (ctx.error)
+ drbd_chk_io_error(mdev, 1, true);
+ /* that should force detach, so the in memory bitmap will be
+ * gone in a moment as well. */
+
mdev->bm_writ_cnt++;
- mutex_unlock(&mdev->md_io_mutex);
- return err;
+ return ctx.error;
}
/* NOTE
* find_first_bit returns int, we return unsigned long.
- * should not make much difference anyways, but ...
+ * For this to work on 32bit arch with bitnumbers > (1<<32),
+ * we'd need to return u64, and get a whole lot of other places
+ * fixed where we still use unsigned long.
*
* this returns a bit number, NOT a sector!
*/
-#define BPP_MASK ((1UL << (PAGE_SHIFT+3)) - 1)
static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
const int find_zero_bit, const enum km_type km)
{
struct drbd_bitmap *b = mdev->bitmap;
- unsigned long i = -1UL;
unsigned long *p_addr;
- unsigned long bit_offset; /* bit offset of the mapped page. */
+ unsigned long bit_offset;
+ unsigned i;
+
if (bm_fo > b->bm_bits) {
dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
+ bm_fo = DRBD_END_OF_BITMAP;
} else {
while (bm_fo < b->bm_bits) {
- unsigned long offset;
- bit_offset = bm_fo & ~BPP_MASK; /* bit offset of the page */
- offset = bit_offset >> LN2_BPL; /* word offset of the page */
- p_addr = __bm_map_paddr(b, offset, km);
+ /* bit offset of the first bit in the page */
+ bit_offset = bm_fo & ~BITS_PER_PAGE_MASK;
+ p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo), km);
if (find_zero_bit)
- i = find_next_zero_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK);
+ i = generic_find_next_zero_le_bit(p_addr,
+ PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
else
- i = find_next_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK);
+ i = generic_find_next_le_bit(p_addr,
+ PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
__bm_unmap(p_addr, km);
if (i < PAGE_SIZE*8) {
- i = bit_offset + i;
- if (i >= b->bm_bits)
+ bm_fo = bit_offset + i;
+ if (bm_fo >= b->bm_bits)
break;
goto found;
}
bm_fo = bit_offset + PAGE_SIZE*8;
}
- i = -1UL;
+ bm_fo = DRBD_END_OF_BITMAP;
}
found:
- return i;
+ return bm_fo;
}
static unsigned long bm_find_next(struct drbd_conf *mdev,
unsigned long bm_fo, const int find_zero_bit)
{
struct drbd_bitmap *b = mdev->bitmap;
- unsigned long i = -1UL;
+ unsigned long i = DRBD_END_OF_BITMAP;
ERR_IF(!b) return i;
ERR_IF(!b->bm_pages) return i;
spin_lock_irq(&b->bm_lock);
- if (bm_is_locked(b))
+ if (BM_DONT_TEST & b->bm_flags)
bm_print_lock_info(mdev);
i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1);
@@ -1005,13 +1241,13 @@ unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo
* you must take drbd_bm_lock() first */
unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
{
- /* WARN_ON(!bm_is_locked(mdev)); */
+ /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
return __bm_find_next(mdev, bm_fo, 0, KM_USER1);
}
unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
{
- /* WARN_ON(!bm_is_locked(mdev)); */
+ /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
return __bm_find_next(mdev, bm_fo, 1, KM_USER1);
}
@@ -1027,8 +1263,9 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
struct drbd_bitmap *b = mdev->bitmap;
unsigned long *p_addr = NULL;
unsigned long bitnr;
- unsigned long last_page_nr = -1UL;
+ unsigned int last_page_nr = -1U;
int c = 0;
+ int changed_total = 0;
if (e >= b->bm_bits) {
dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
@@ -1036,23 +1273,33 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
e = b->bm_bits ? b->bm_bits -1 : 0;
}
for (bitnr = s; bitnr <= e; bitnr++) {
- unsigned long offset = bitnr>>LN2_BPL;
- unsigned long page_nr = offset >> (PAGE_SHIFT - LN2_BPL + 3);
+ unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
if (page_nr != last_page_nr) {
if (p_addr)
__bm_unmap(p_addr, km);
- p_addr = __bm_map_paddr(b, offset, km);
+ if (c < 0)
+ bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
+ else if (c > 0)
+ bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
+ changed_total += c;
+ c = 0;
+ p_addr = __bm_map_pidx(b, page_nr, km);
last_page_nr = page_nr;
}
if (val)
- c += (0 == __test_and_set_bit(bitnr & BPP_MASK, p_addr));
+ c += (0 == generic___test_and_set_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr));
else
- c -= (0 != __test_and_clear_bit(bitnr & BPP_MASK, p_addr));
+ c -= (0 != generic___test_and_clear_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr));
}
if (p_addr)
__bm_unmap(p_addr, km);
- b->bm_set += c;
- return c;
+ if (c < 0)
+ bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
+ else if (c > 0)
+ bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
+ changed_total += c;
+ b->bm_set += changed_total;
+ return changed_total;
}
/* returns number of bits actually changed.
@@ -1070,7 +1317,7 @@ static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
ERR_IF(!b->bm_pages) return 0;
spin_lock_irqsave(&b->bm_lock, flags);
- if (bm_is_locked(b))
+ if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
bm_print_lock_info(mdev);
c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1);
@@ -1187,12 +1434,11 @@ int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
ERR_IF(!b->bm_pages) return 0;
spin_lock_irqsave(&b->bm_lock, flags);
- if (bm_is_locked(b))
+ if (BM_DONT_TEST & b->bm_flags)
bm_print_lock_info(mdev);
if (bitnr < b->bm_bits) {
- unsigned long offset = bitnr>>LN2_BPL;
- p_addr = bm_map_paddr(b, offset);
- i = test_bit(bitnr & BPP_MASK, p_addr) ? 1 : 0;
+ p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr));
+ i = generic_test_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0;
bm_unmap(p_addr);
} else if (bitnr == b->bm_bits) {
i = -1;
@@ -1210,10 +1456,10 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
{
unsigned long flags;
struct drbd_bitmap *b = mdev->bitmap;
- unsigned long *p_addr = NULL, page_nr = -1;
+ unsigned long *p_addr = NULL;
unsigned long bitnr;
+ unsigned int page_nr = -1U;
int c = 0;
- size_t w;
/* If this is called without a bitmap, that is a bug. But just to be
* robust in case we screwed up elsewhere, in that case pretend there
@@ -1223,20 +1469,20 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
ERR_IF(!b->bm_pages) return 1;
spin_lock_irqsave(&b->bm_lock, flags);
- if (bm_is_locked(b))
+ if (BM_DONT_TEST & b->bm_flags)
bm_print_lock_info(mdev);
for (bitnr = s; bitnr <= e; bitnr++) {
- w = bitnr >> LN2_BPL;
- if (page_nr != w >> (PAGE_SHIFT - LN2_BPL + 3)) {
- page_nr = w >> (PAGE_SHIFT - LN2_BPL + 3);
+ unsigned int idx = bm_bit_to_page_idx(b, bitnr);
+ if (page_nr != idx) {
+ page_nr = idx;
if (p_addr)
bm_unmap(p_addr);
- p_addr = bm_map_paddr(b, w);
+ p_addr = bm_map_pidx(b, idx);
}
ERR_IF (bitnr >= b->bm_bits) {
dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
} else {
- c += (0 != test_bit(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
+ c += (0 != generic_test_le_bit(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
}
}
if (p_addr)
@@ -1271,7 +1517,7 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
ERR_IF(!b->bm_pages) return 0;
spin_lock_irqsave(&b->bm_lock, flags);
- if (bm_is_locked(b))
+ if (BM_DONT_TEST & b->bm_flags)
bm_print_lock_info(mdev);
s = S2W(enr);
@@ -1279,7 +1525,7 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
count = 0;
if (s < b->bm_words) {
int n = e-s;
- p_addr = bm_map_paddr(b, s);
+ p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
bm = p_addr + MLPP(s);
while (n--)
count += hweight_long(*bm++);
@@ -1291,18 +1537,20 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
return count;
}
-/* set all bits covered by the AL-extent al_enr */
+/* Set all bits covered by the AL-extent al_enr.
+ * Returns number of bits changed. */
unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr)
{
struct drbd_bitmap *b = mdev->bitmap;
unsigned long *p_addr, *bm;
unsigned long weight;
- int count, s, e, i, do_now;
+ unsigned long s, e;
+ int count, i, do_now;
ERR_IF(!b) return 0;
ERR_IF(!b->bm_pages) return 0;
spin_lock_irq(&b->bm_lock);
- if (bm_is_locked(b))
+ if (BM_DONT_SET & b->bm_flags)
bm_print_lock_info(mdev);
weight = b->bm_set;
@@ -1314,7 +1562,7 @@ unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr)
count = 0;
if (s < b->bm_words) {
i = do_now = e-s;
- p_addr = bm_map_paddr(b, s);
+ p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
bm = p_addr + MLPP(s);
while (i--) {
count += hweight_long(*bm);
@@ -1326,7 +1574,7 @@ unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr)
if (e == b->bm_words)
b->bm_set -= bm_clear_surplus(b);
} else {
- dev_err(DEV, "start offset (%d) too large in drbd_bm_ALe_set_all\n", s);
+ dev_err(DEV, "start offset (%lu) too large in drbd_bm_ALe_set_all\n", s);
}
weight = b->bm_set - weight;
spin_unlock_irq(&b->bm_lock);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index b0bd27dfc1e8..81030d8d654b 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -72,13 +72,6 @@ extern int fault_devs;
extern char usermode_helper[];
-#ifndef TRUE
-#define TRUE 1
-#endif
-#ifndef FALSE
-#define FALSE 0
-#endif
-
/* I don't remember why XCPU ...
* This is used to wake the asender,
* and to interrupt sending the sending task
@@ -104,6 +97,7 @@ extern char usermode_helper[];
#define ID_SYNCER (-1ULL)
#define ID_VACANT 0
#define is_syncer_block_id(id) ((id) == ID_SYNCER)
+#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
struct drbd_conf;
@@ -137,20 +131,19 @@ enum {
DRBD_FAULT_MAX,
};
-#ifdef CONFIG_DRBD_FAULT_INJECTION
extern unsigned int
_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type);
+
static inline int
drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) {
+#ifdef CONFIG_DRBD_FAULT_INJECTION
return fault_rate &&
(enable_faults & (1<<type)) &&
_drbd_insert_fault(mdev, type);
-}
-#define FAULT_ACTIVE(_m, _t) (drbd_insert_fault((_m), (_t)))
-
#else
-#define FAULT_ACTIVE(_m, _t) (0)
+ return 0;
#endif
+}
/* integer division, round _UP_ to the next integer */
#define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
@@ -212,8 +205,10 @@ enum drbd_packets {
/* P_CKPT_FENCE_REQ = 0x25, * currently reserved for protocol D */
/* P_CKPT_DISABLE_REQ = 0x26, * currently reserved for protocol D */
P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */
+ P_OUT_OF_SYNC = 0x28, /* Mark as out of sync (Outrunning), data socket */
+ P_RS_CANCEL = 0x29, /* meta: Used to cancel RS_DATA_REQUEST packet by SyncSource */
- P_MAX_CMD = 0x28,
+ P_MAX_CMD = 0x2A,
P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */
P_MAX_OPT_CMD = 0x101,
@@ -269,6 +264,7 @@ static inline const char *cmdname(enum drbd_packets cmd)
[P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
[P_COMPRESSED_BITMAP] = "CBitmap",
[P_DELAY_PROBE] = "DelayProbe",
+ [P_OUT_OF_SYNC] = "OutOfSync",
[P_MAX_CMD] = NULL,
};
@@ -512,7 +508,7 @@ struct p_sizes {
u64 d_size; /* size of disk */
u64 u_size; /* user requested size */
u64 c_size; /* current exported size */
- u32 max_segment_size; /* Maximal size of a BIO */
+ u32 max_bio_size; /* Maximal size of a BIO */
u16 queue_order_type; /* not yet implemented in DRBD*/
u16 dds_flags; /* use enum dds_flags here. */
} __packed;
@@ -550,6 +546,13 @@ struct p_discard {
u32 pad;
} __packed;
+struct p_block_desc {
+ struct p_header80 head;
+ u64 sector;
+ u32 blksize;
+ u32 pad; /* to multiple of 8 Byte */
+} __packed;
+
/* Valid values for the encoding field.
* Bump proto version when changing this. */
enum drbd_bitmap_code {
@@ -647,6 +650,7 @@ union p_polymorph {
struct p_block_req block_req;
struct p_delay_probe93 delay_probe93;
struct p_rs_uuid rs_uuid;
+ struct p_block_desc block_desc;
} __packed;
/**********************************************************************/
@@ -677,13 +681,6 @@ static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
return thi->t_state;
}
-
-/*
- * Having this as the first member of a struct provides sort of "inheritance".
- * "derived" structs can be "drbd_queue_work()"ed.
- * The callback should know and cast back to the descendant struct.
- * drbd_request and drbd_epoch_entry are descendants of drbd_work.
- */
struct drbd_work;
typedef int (*drbd_work_cb)(struct drbd_conf *, struct drbd_work *, int cancel);
struct drbd_work {
@@ -712,9 +709,6 @@ struct drbd_request {
* starting a new epoch...
*/
- /* up to here, the struct layout is identical to drbd_epoch_entry;
- * we might be able to use that to our advantage... */
-
struct list_head tl_requests; /* ring list in the transfer log */
struct bio *master_bio; /* master bio pointer */
unsigned long rq_state; /* see comments above _req_mod() */
@@ -831,7 +825,7 @@ enum {
CRASHED_PRIMARY, /* This node was a crashed primary.
* Gets cleared when the state.conn
* goes into C_CONNECTED state. */
- WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */
+ NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */
CONSIDER_RESYNC,
MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */
@@ -856,10 +850,37 @@ enum {
GOT_PING_ACK, /* set when we receive a ping_ack packet, misc wait gets woken */
NEW_CUR_UUID, /* Create new current UUID when thawing IO */
AL_SUSPENDED, /* Activity logging is currently suspended. */
+ AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */
};
struct drbd_bitmap; /* opaque for drbd_conf */
+/* definition of bits in bm_flags to be used in drbd_bm_lock
+ * and drbd_bitmap_io and friends. */
+enum bm_flag {
+ /* do we need to kfree, or vfree bm_pages? */
+ BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */
+
+ /* currently locked for bulk operation */
+ BM_LOCKED_MASK = 0x7,
+
+ /* in detail, that is: */
+ BM_DONT_CLEAR = 0x1,
+ BM_DONT_SET = 0x2,
+ BM_DONT_TEST = 0x4,
+
+ /* (test bit, count bit) allowed (common case) */
+ BM_LOCKED_TEST_ALLOWED = 0x3,
+
+ /* testing bits, as well as setting new bits allowed, but clearing bits
+ * would be unexpected. Used during bitmap receive. Setting new bits
+ * requires sending of "out-of-sync" information, though. */
+ BM_LOCKED_SET_ALLOWED = 0x1,
+
+ /* clear is not expected while bitmap is locked for bulk operation */
+};
+
+
/* TODO sort members for performance
* MAYBE group them further */
@@ -925,6 +946,7 @@ struct drbd_md_io {
struct bm_io_work {
struct drbd_work w;
char *why;
+ enum bm_flag flags;
int (*io_fn)(struct drbd_conf *mdev);
void (*done)(struct drbd_conf *mdev, int rv);
};
@@ -963,9 +985,12 @@ struct drbd_conf {
struct drbd_work resync_work,
unplug_work,
go_diskless,
- md_sync_work;
+ md_sync_work,
+ start_resync_work;
struct timer_list resync_timer;
struct timer_list md_sync_timer;
+ struct timer_list start_resync_timer;
+ struct timer_list request_timer;
#ifdef DRBD_DEBUG_MD_SYNC
struct {
unsigned int line;
@@ -1000,9 +1025,9 @@ struct drbd_conf {
struct hlist_head *tl_hash;
unsigned int tl_hash_s;
- /* blocks to sync in this run [unit BM_BLOCK_SIZE] */
+ /* blocks to resync in this run [unit BM_BLOCK_SIZE] */
unsigned long rs_total;
- /* number of sync IOs that failed in this run */
+ /* number of resync blocks that failed in this run */
unsigned long rs_failed;
/* Syncer's start time [unit jiffies] */
unsigned long rs_start;
@@ -1102,6 +1127,7 @@ struct drbd_conf {
struct fifo_buffer rs_plan_s; /* correction values of resync planer */
int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
int rs_planed; /* resync sectors already planed */
+ atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
};
static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
@@ -1163,14 +1189,19 @@ enum dds_flags {
};
extern void drbd_init_set_defaults(struct drbd_conf *mdev);
-extern int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
- union drbd_state mask, union drbd_state val);
+extern enum drbd_state_rv drbd_change_state(struct drbd_conf *mdev,
+ enum chg_state_flags f,
+ union drbd_state mask,
+ union drbd_state val);
extern void drbd_force_state(struct drbd_conf *, union drbd_state,
union drbd_state);
-extern int _drbd_request_state(struct drbd_conf *, union drbd_state,
- union drbd_state, enum chg_state_flags);
-extern int __drbd_set_state(struct drbd_conf *, union drbd_state,
- enum chg_state_flags, struct completion *done);
+extern enum drbd_state_rv _drbd_request_state(struct drbd_conf *,
+ union drbd_state,
+ union drbd_state,
+ enum chg_state_flags);
+extern enum drbd_state_rv __drbd_set_state(struct drbd_conf *, union drbd_state,
+ enum chg_state_flags,
+ struct completion *done);
extern void print_st_err(struct drbd_conf *, union drbd_state,
union drbd_state, int);
extern int drbd_thread_start(struct drbd_thread *thi);
@@ -1195,7 +1226,7 @@ extern int drbd_send(struct drbd_conf *mdev, struct socket *sock,
extern int drbd_send_protocol(struct drbd_conf *mdev);
extern int drbd_send_uuids(struct drbd_conf *mdev);
extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev);
-extern int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val);
+extern int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev);
extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags);
extern int _drbd_send_state(struct drbd_conf *mdev);
extern int drbd_send_state(struct drbd_conf *mdev);
@@ -1220,11 +1251,10 @@ extern int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
struct p_data *dp, int data_size);
extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
sector_t sector, int blksize, u64 block_id);
+extern int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req);
extern int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
struct drbd_epoch_entry *e);
extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req);
-extern int _drbd_send_barrier(struct drbd_conf *mdev,
- struct drbd_tl_epoch *barrier);
extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
sector_t sector, int size, u64 block_id);
extern int drbd_send_drequest_csum(struct drbd_conf *mdev,
@@ -1235,14 +1265,13 @@ extern int drbd_send_ov_request(struct drbd_conf *mdev,sector_t sector,int size)
extern int drbd_send_bitmap(struct drbd_conf *mdev);
extern int _drbd_send_bitmap(struct drbd_conf *mdev);
-extern int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode);
+extern int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode);
extern void drbd_free_bc(struct drbd_backing_dev *ldev);
extern void drbd_mdev_cleanup(struct drbd_conf *mdev);
+void drbd_print_uuids(struct drbd_conf *mdev, const char *text);
-/* drbd_meta-data.c (still in drbd_main.c) */
extern void drbd_md_sync(struct drbd_conf *mdev);
extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev);
-/* maybe define them below as inline? */
extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
@@ -1261,10 +1290,12 @@ extern void drbd_md_mark_dirty_(struct drbd_conf *mdev,
extern void drbd_queue_bitmap_io(struct drbd_conf *mdev,
int (*io_fn)(struct drbd_conf *),
void (*done)(struct drbd_conf *, int),
- char *why);
+ char *why, enum bm_flag flags);
+extern int drbd_bitmap_io(struct drbd_conf *mdev,
+ int (*io_fn)(struct drbd_conf *),
+ char *why, enum bm_flag flags);
extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
-extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why);
extern void drbd_go_diskless(struct drbd_conf *mdev);
extern void drbd_ldev_destroy(struct drbd_conf *mdev);
@@ -1313,6 +1344,7 @@ struct bm_extent {
#define BME_NO_WRITES 0 /* bm_extent.flags: no more requests on this one! */
#define BME_LOCKED 1 /* bm_extent.flags: syncer active on this one. */
+#define BME_PRIORITY 2 /* finish resync IO on this extent ASAP! App IO waiting! */
/* drbd_bitmap.c */
/*
@@ -1390,7 +1422,9 @@ struct bm_extent {
* you should use 64bit OS for that much storage, anyways. */
#define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
#else
-#define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0x1LU << 32)
+/* we allow up to 1 PiB now on 64bit architecture with "flexible" meta data */
+#define DRBD_MAX_SECTORS_FLEX (1UL << 51)
+/* corresponds to (1UL << 38) bits right now. */
#endif
#endif
@@ -1398,7 +1432,7 @@ struct bm_extent {
* With a value of 8 all IO in one 128K block make it to the same slot of the
* hash table. */
#define HT_SHIFT 8
-#define DRBD_MAX_SEGMENT_SIZE (1U<<(9+HT_SHIFT))
+#define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT))
#define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */
@@ -1410,16 +1444,20 @@ extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new
extern void drbd_bm_cleanup(struct drbd_conf *mdev);
extern void drbd_bm_set_all(struct drbd_conf *mdev);
extern void drbd_bm_clear_all(struct drbd_conf *mdev);
+/* set/clear/test only a few bits at a time */
extern int drbd_bm_set_bits(
struct drbd_conf *mdev, unsigned long s, unsigned long e);
extern int drbd_bm_clear_bits(
struct drbd_conf *mdev, unsigned long s, unsigned long e);
-/* bm_set_bits variant for use while holding drbd_bm_lock */
+extern int drbd_bm_count_bits(
+ struct drbd_conf *mdev, const unsigned long s, const unsigned long e);
+/* bm_set_bits variant for use while holding drbd_bm_lock,
+ * may process the whole bitmap in one go */
extern void _drbd_bm_set_bits(struct drbd_conf *mdev,
const unsigned long s, const unsigned long e);
extern int drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr);
extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr);
-extern int drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(local);
+extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local);
extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);
extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local);
extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,
@@ -1427,6 +1465,8 @@ extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,
extern size_t drbd_bm_words(struct drbd_conf *mdev);
extern unsigned long drbd_bm_bits(struct drbd_conf *mdev);
extern sector_t drbd_bm_capacity(struct drbd_conf *mdev);
+
+#define DRBD_END_OF_BITMAP (~(unsigned long)0)
extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo);
/* bm_find_next variants for use while you hold drbd_bm_lock() */
extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo);
@@ -1437,14 +1477,12 @@ extern int drbd_bm_rs_done(struct drbd_conf *mdev);
/* for receive_bitmap */
extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset,
size_t number, unsigned long *buffer);
-/* for _drbd_send_bitmap and drbd_bm_write_sect */
+/* for _drbd_send_bitmap */
extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset,
size_t number, unsigned long *buffer);
-extern void drbd_bm_lock(struct drbd_conf *mdev, char *why);
+extern void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags);
extern void drbd_bm_unlock(struct drbd_conf *mdev);
-
-extern int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e);
/* drbd_main.c */
extern struct kmem_cache *drbd_request_cache;
@@ -1467,7 +1505,7 @@ extern void drbd_free_mdev(struct drbd_conf *mdev);
extern int proc_details;
/* drbd_req */
-extern int drbd_make_request_26(struct request_queue *q, struct bio *bio);
+extern int drbd_make_request(struct request_queue *q, struct bio *bio);
extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req);
extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
extern int is_valid_ar_handle(struct drbd_request *, sector_t);
@@ -1482,8 +1520,9 @@ enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew =
extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local);
extern void resync_after_online_grow(struct drbd_conf *);
extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local);
-extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role,
- int force);
+extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev,
+ enum drbd_role new_role,
+ int force);
extern enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev);
extern void drbd_try_outdate_peer_async(struct drbd_conf *mdev);
extern int drbd_khelper(struct drbd_conf *mdev, char *cmd);
@@ -1499,6 +1538,7 @@ extern int drbd_resync_finished(struct drbd_conf *mdev);
extern int drbd_md_sync_page_io(struct drbd_conf *mdev,
struct drbd_backing_dev *bdev, sector_t sector, int rw);
extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int);
+extern void drbd_rs_controller_reset(struct drbd_conf *mdev);
static inline void ov_oos_print(struct drbd_conf *mdev)
{
@@ -1522,21 +1562,23 @@ extern int w_e_end_csum_rs_req(struct drbd_conf *, struct drbd_work *, int);
extern int w_e_end_ov_reply(struct drbd_conf *, struct drbd_work *, int);
extern int w_e_end_ov_req(struct drbd_conf *, struct drbd_work *, int);
extern int w_ov_finished(struct drbd_conf *, struct drbd_work *, int);
-extern int w_resync_inactive(struct drbd_conf *, struct drbd_work *, int);
+extern int w_resync_timer(struct drbd_conf *, struct drbd_work *, int);
extern int w_resume_next_sg(struct drbd_conf *, struct drbd_work *, int);
extern int w_send_write_hint(struct drbd_conf *, struct drbd_work *, int);
-extern int w_make_resync_request(struct drbd_conf *, struct drbd_work *, int);
extern int w_send_dblock(struct drbd_conf *, struct drbd_work *, int);
extern int w_send_barrier(struct drbd_conf *, struct drbd_work *, int);
extern int w_send_read_req(struct drbd_conf *, struct drbd_work *, int);
extern int w_prev_work_done(struct drbd_conf *, struct drbd_work *, int);
extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int);
extern int w_restart_disk_io(struct drbd_conf *, struct drbd_work *, int);
+extern int w_send_oos(struct drbd_conf *, struct drbd_work *, int);
+extern int w_start_resync(struct drbd_conf *, struct drbd_work *, int);
extern void resync_timer_fn(unsigned long data);
+extern void start_resync_timer_fn(unsigned long data);
/* drbd_receiver.c */
-extern int drbd_rs_should_slow_down(struct drbd_conf *mdev);
+extern int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector);
extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
const unsigned rw, const int fault_type);
extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list);
@@ -1619,16 +1661,16 @@ extern int drbd_rs_del_all(struct drbd_conf *mdev);
extern void drbd_rs_failed_io(struct drbd_conf *mdev,
sector_t sector, int size);
extern int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *);
+extern void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go);
extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector,
int size, const char *file, const unsigned int line);
#define drbd_set_in_sync(mdev, sector, size) \
__drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__)
-extern void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector,
+extern int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector,
int size, const char *file, const unsigned int line);
#define drbd_set_out_of_sync(mdev, sector, size) \
__drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__)
extern void drbd_al_apply_to_bm(struct drbd_conf *mdev);
-extern void drbd_al_to_on_disk_bm(struct drbd_conf *mdev);
extern void drbd_al_shrink(struct drbd_conf *mdev);
@@ -1747,11 +1789,11 @@ static inline void drbd_state_unlock(struct drbd_conf *mdev)
wake_up(&mdev->misc_wait);
}
-static inline int _drbd_set_state(struct drbd_conf *mdev,
- union drbd_state ns, enum chg_state_flags flags,
- struct completion *done)
+static inline enum drbd_state_rv
+_drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
+ enum chg_state_flags flags, struct completion *done)
{
- int rv;
+ enum drbd_state_rv rv;
read_lock(&global_state_lock);
rv = __drbd_set_state(mdev, ns, flags, done);
@@ -1982,17 +2024,17 @@ static inline int drbd_send_ping_ack(struct drbd_conf *mdev)
static inline void drbd_thread_stop(struct drbd_thread *thi)
{
- _drbd_thread_stop(thi, FALSE, TRUE);
+ _drbd_thread_stop(thi, false, true);
}
static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
{
- _drbd_thread_stop(thi, FALSE, FALSE);
+ _drbd_thread_stop(thi, false, false);
}
static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
{
- _drbd_thread_stop(thi, TRUE, FALSE);
+ _drbd_thread_stop(thi, true, false);
}
/* counts how many answer packets packets we expect from our peer,
@@ -2146,17 +2188,18 @@ extern int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
unsigned long *bits_left, unsigned int *per_mil_done)
{
- /*
- * this is to break it at compile time when we change that
- * (we may feel 4TB maximum storage per drbd is not enough)
- */
+ /* this is to break it at compile time when we change that, in case we
+ * want to support more than (1<<32) bits on a 32bit arch. */
typecheck(unsigned long, mdev->rs_total);
/* note: both rs_total and rs_left are in bits, i.e. in
* units of BM_BLOCK_SIZE.
* for the percentage, we don't care. */
- *bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
+ if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
+ *bits_left = mdev->ov_left;
+ else
+ *bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
/* >> 10 to prevent overflow,
* +1 to prevent division by zero */
if (*bits_left > mdev->rs_total) {
@@ -2171,10 +2214,19 @@ static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
*bits_left, mdev->rs_total, mdev->rs_failed);
*per_mil_done = 0;
} else {
- /* make sure the calculation happens in long context */
- unsigned long tmp = 1000UL -
- (*bits_left >> 10)*1000UL
- / ((mdev->rs_total >> 10) + 1UL);
+ /* Make sure the division happens in long context.
+ * We allow up to one petabyte storage right now,
+ * at a granularity of 4k per bit that is 2**38 bits.
+ * After shift right and multiplication by 1000,
+ * this should still fit easily into a 32bit long,
+ * so we don't need a 64bit division on 32bit arch.
+ * Note: currently we don't support such large bitmaps on 32bit
+ * arch anyways, but no harm done to be prepared for it here.
+ */
+ unsigned int shift = mdev->rs_total >= (1ULL << 32) ? 16 : 10;
+ unsigned long left = *bits_left >> shift;
+ unsigned long total = 1UL + (mdev->rs_total >> shift);
+ unsigned long tmp = 1000UL - left * 1000UL/total;
*per_mil_done = tmp;
}
}
@@ -2193,8 +2245,9 @@ static inline int drbd_get_max_buffers(struct drbd_conf *mdev)
return mxb;
}
-static inline int drbd_state_is_stable(union drbd_state s)
+static inline int drbd_state_is_stable(struct drbd_conf *mdev)
{
+ union drbd_state s = mdev->state;
/* DO NOT add a default clause, we want the compiler to warn us
* for any newly introduced state we may have forgotten to add here */
@@ -2211,11 +2264,9 @@ static inline int drbd_state_is_stable(union drbd_state s)
case C_VERIFY_T:
case C_PAUSED_SYNC_S:
case C_PAUSED_SYNC_T:
- /* maybe stable, look at the disk state */
- break;
-
- /* no new io accepted during tansitional states
- * like handshake or teardown */
+ case C_AHEAD:
+ case C_BEHIND:
+ /* transitional states, IO allowed */
case C_DISCONNECTING:
case C_UNCONNECTED:
case C_TIMEOUT:
@@ -2226,7 +2277,15 @@ static inline int drbd_state_is_stable(union drbd_state s)
case C_WF_REPORT_PARAMS:
case C_STARTING_SYNC_S:
case C_STARTING_SYNC_T:
+ break;
+
+ /* Allow IO in BM exchange states with new protocols */
case C_WF_BITMAP_S:
+ if (mdev->agreed_pro_version < 96)
+ return 0;
+ break;
+
+ /* no new io accepted in these states */
case C_WF_BITMAP_T:
case C_WF_SYNC_UUID:
case C_MASK:
@@ -2261,41 +2320,47 @@ static inline int is_susp(union drbd_state s)
return s.susp || s.susp_nod || s.susp_fen;
}
-static inline int __inc_ap_bio_cond(struct drbd_conf *mdev)
+static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
{
int mxb = drbd_get_max_buffers(mdev);
if (is_susp(mdev->state))
- return 0;
+ return false;
if (test_bit(SUSPEND_IO, &mdev->flags))
- return 0;
+ return false;
/* to avoid potential deadlock or bitmap corruption,
* in various places, we only allow new application io
* to start during "stable" states. */
/* no new io accepted when attaching or detaching the disk */
- if (!drbd_state_is_stable(mdev->state))
- return 0;
+ if (!drbd_state_is_stable(mdev))
+ return false;
/* since some older kernels don't have atomic_add_unless,
* and we are within the spinlock anyways, we have this workaround. */
if (atomic_read(&mdev->ap_bio_cnt) > mxb)
- return 0;
+ return false;
if (test_bit(BITMAP_IO, &mdev->flags))
- return 0;
- return 1;
+ return false;
+ return true;
}
-/* I'd like to use wait_event_lock_irq,
- * but I'm not sure when it got introduced,
- * and not sure when it has 3 or 4 arguments */
-static inline void inc_ap_bio(struct drbd_conf *mdev, int count)
+static inline bool inc_ap_bio_cond(struct drbd_conf *mdev, int count)
{
- /* compare with after_state_ch,
- * os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S */
- DEFINE_WAIT(wait);
+ bool rv = false;
+
+ spin_lock_irq(&mdev->req_lock);
+ rv = may_inc_ap_bio(mdev);
+ if (rv)
+ atomic_add(count, &mdev->ap_bio_cnt);
+ spin_unlock_irq(&mdev->req_lock);
+
+ return rv;
+}
+static inline void inc_ap_bio(struct drbd_conf *mdev, int count)
+{
/* we wait here
* as long as the device is suspended
* until the bitmap is no longer on the fly during connection
@@ -2304,16 +2369,7 @@ static inline void inc_ap_bio(struct drbd_conf *mdev, int count)
* to avoid races with the reconnect code,
* we need to atomic_inc within the spinlock. */
- spin_lock_irq(&mdev->req_lock);
- while (!__inc_ap_bio_cond(mdev)) {
- prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(&mdev->req_lock);
- schedule();
- finish_wait(&mdev->misc_wait, &wait);
- spin_lock_irq(&mdev->req_lock);
- }
- atomic_add(count, &mdev->ap_bio_cnt);
- spin_unlock_irq(&mdev->req_lock);
+ wait_event(mdev->misc_wait, inc_ap_bio_cond(mdev, count));
}
static inline void dec_ap_bio(struct drbd_conf *mdev)
@@ -2333,9 +2389,11 @@ static inline void dec_ap_bio(struct drbd_conf *mdev)
}
}
-static inline void drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
+static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
{
+ int changed = mdev->ed_uuid != val;
mdev->ed_uuid = val;
+ return changed;
}
static inline int seq_cmp(u32 a, u32 b)
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 8a43ce0edeed..dfc85f32d317 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -85,7 +85,8 @@ MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
MODULE_VERSION(REL_VERSION);
MODULE_LICENSE("GPL");
-MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices (1-255)");
+MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices ("
+ __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
#include <linux/moduleparam.h>
@@ -115,7 +116,7 @@ module_param(fault_devs, int, 0644);
#endif
/* module parameter, defined */
-unsigned int minor_count = 32;
+unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
int disable_sendpage;
int allow_oos;
unsigned int cn_idx = CN_IDX_DRBD;
@@ -335,6 +336,7 @@ bail:
drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
}
+
/**
* _tl_restart() - Walks the transfer log, and applies an action to all requests
* @mdev: DRBD device.
@@ -456,7 +458,7 @@ void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
}
/**
- * cl_wide_st_chg() - TRUE if the state change is a cluster wide one
+ * cl_wide_st_chg() - true if the state change is a cluster wide one
* @mdev: DRBD device.
* @os: old (current) state.
* @ns: new (wanted) state.
@@ -473,12 +475,13 @@ static int cl_wide_st_chg(struct drbd_conf *mdev,
(os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
}
-int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
- union drbd_state mask, union drbd_state val)
+enum drbd_state_rv
+drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
+ union drbd_state mask, union drbd_state val)
{
unsigned long flags;
union drbd_state os, ns;
- int rv;
+ enum drbd_state_rv rv;
spin_lock_irqsave(&mdev->req_lock, flags);
os = mdev->state;
@@ -502,20 +505,22 @@ void drbd_force_state(struct drbd_conf *mdev,
drbd_change_state(mdev, CS_HARD, mask, val);
}
-static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns);
-static int is_valid_state_transition(struct drbd_conf *,
- union drbd_state, union drbd_state);
+static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
+static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
+ union drbd_state,
+ union drbd_state);
static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
union drbd_state ns, const char **warn_sync_abort);
int drbd_send_state_req(struct drbd_conf *,
union drbd_state, union drbd_state);
-static enum drbd_state_ret_codes _req_st_cond(struct drbd_conf *mdev,
- union drbd_state mask, union drbd_state val)
+static enum drbd_state_rv
+_req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
+ union drbd_state val)
{
union drbd_state os, ns;
unsigned long flags;
- int rv;
+ enum drbd_state_rv rv;
if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
return SS_CW_SUCCESS;
@@ -536,7 +541,7 @@ static enum drbd_state_ret_codes _req_st_cond(struct drbd_conf *mdev,
if (rv == SS_SUCCESS) {
rv = is_valid_state_transition(mdev, ns, os);
if (rv == SS_SUCCESS)
- rv = 0; /* cont waiting, otherwise fail. */
+ rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
}
}
spin_unlock_irqrestore(&mdev->req_lock, flags);
@@ -554,14 +559,14 @@ static enum drbd_state_ret_codes _req_st_cond(struct drbd_conf *mdev,
* Should not be called directly, use drbd_request_state() or
* _drbd_request_state().
*/
-static int drbd_req_state(struct drbd_conf *mdev,
- union drbd_state mask, union drbd_state val,
- enum chg_state_flags f)
+static enum drbd_state_rv
+drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
+ union drbd_state val, enum chg_state_flags f)
{
struct completion done;
unsigned long flags;
union drbd_state os, ns;
- int rv;
+ enum drbd_state_rv rv;
init_completion(&done);
@@ -636,10 +641,11 @@ abort:
* Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
* flag, or when logging of failed state change requests is not desired.
*/
-int _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
- union drbd_state val, enum chg_state_flags f)
+enum drbd_state_rv
+_drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
+ union drbd_state val, enum chg_state_flags f)
{
- int rv;
+ enum drbd_state_rv rv;
wait_event(mdev->state_wait,
(rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
@@ -663,8 +669,8 @@ static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
);
}
-void print_st_err(struct drbd_conf *mdev,
- union drbd_state os, union drbd_state ns, int err)
+void print_st_err(struct drbd_conf *mdev, union drbd_state os,
+ union drbd_state ns, enum drbd_state_rv err)
{
if (err == SS_IN_TRANSIENT_STATE)
return;
@@ -674,32 +680,18 @@ void print_st_err(struct drbd_conf *mdev,
}
-#define drbd_peer_str drbd_role_str
-#define drbd_pdsk_str drbd_disk_str
-
-#define drbd_susp_str(A) ((A) ? "1" : "0")
-#define drbd_aftr_isp_str(A) ((A) ? "1" : "0")
-#define drbd_peer_isp_str(A) ((A) ? "1" : "0")
-#define drbd_user_isp_str(A) ((A) ? "1" : "0")
-
-#define PSC(A) \
- ({ if (ns.A != os.A) { \
- pbp += sprintf(pbp, #A "( %s -> %s ) ", \
- drbd_##A##_str(os.A), \
- drbd_##A##_str(ns.A)); \
- } })
-
/**
* is_valid_state() - Returns an SS_ error code if ns is not valid
* @mdev: DRBD device.
* @ns: State to consider.
*/
-static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
+static enum drbd_state_rv
+is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
{
/* See drbd_state_sw_errors in drbd_strings.c */
enum drbd_fencing_p fp;
- int rv = SS_SUCCESS;
+ enum drbd_state_rv rv = SS_SUCCESS;
fp = FP_DONT_CARE;
if (get_ldev(mdev)) {
@@ -762,10 +754,11 @@ static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
* @ns: new state.
* @os: old state.
*/
-static int is_valid_state_transition(struct drbd_conf *mdev,
- union drbd_state ns, union drbd_state os)
+static enum drbd_state_rv
+is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
+ union drbd_state os)
{
- int rv = SS_SUCCESS;
+ enum drbd_state_rv rv = SS_SUCCESS;
if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
os.conn > C_CONNECTED)
@@ -800,6 +793,10 @@ static int is_valid_state_transition(struct drbd_conf *mdev,
os.conn < C_CONNECTED)
rv = SS_NEED_CONNECTION;
+ if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
+ && os.conn < C_WF_REPORT_PARAMS)
+ rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
+
return rv;
}
@@ -817,6 +814,7 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
union drbd_state ns, const char **warn_sync_abort)
{
enum drbd_fencing_p fp;
+ enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
fp = FP_DONT_CARE;
if (get_ldev(mdev)) {
@@ -869,56 +867,6 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
ns.conn = C_CONNECTED;
}
- if (ns.conn >= C_CONNECTED &&
- ((ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED) ||
- (ns.disk == D_NEGOTIATING && ns.conn == C_WF_BITMAP_T))) {
- switch (ns.conn) {
- case C_WF_BITMAP_T:
- case C_PAUSED_SYNC_T:
- ns.disk = D_OUTDATED;
- break;
- case C_CONNECTED:
- case C_WF_BITMAP_S:
- case C_SYNC_SOURCE:
- case C_PAUSED_SYNC_S:
- ns.disk = D_UP_TO_DATE;
- break;
- case C_SYNC_TARGET:
- ns.disk = D_INCONSISTENT;
- dev_warn(DEV, "Implicitly set disk state Inconsistent!\n");
- break;
- }
- if (os.disk == D_OUTDATED && ns.disk == D_UP_TO_DATE)
- dev_warn(DEV, "Implicitly set disk from Outdated to UpToDate\n");
- }
-
- if (ns.conn >= C_CONNECTED &&
- (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)) {
- switch (ns.conn) {
- case C_CONNECTED:
- case C_WF_BITMAP_T:
- case C_PAUSED_SYNC_T:
- case C_SYNC_TARGET:
- ns.pdsk = D_UP_TO_DATE;
- break;
- case C_WF_BITMAP_S:
- case C_PAUSED_SYNC_S:
- /* remap any consistent state to D_OUTDATED,
- * but disallow "upgrade" of not even consistent states.
- */
- ns.pdsk =
- (D_DISKLESS < os.pdsk && os.pdsk < D_OUTDATED)
- ? os.pdsk : D_OUTDATED;
- break;
- case C_SYNC_SOURCE:
- ns.pdsk = D_INCONSISTENT;
- dev_warn(DEV, "Implicitly set pdsk Inconsistent!\n");
- break;
- }
- if (os.pdsk == D_OUTDATED && ns.pdsk == D_UP_TO_DATE)
- dev_warn(DEV, "Implicitly set pdsk from Outdated to UpToDate\n");
- }
-
/* Connection breaks down before we finished "Negotiating" */
if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
get_ldev_if_state(mdev, D_NEGOTIATING)) {
@@ -933,6 +881,94 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
put_ldev(mdev);
}
+ /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
+ if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
+ if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
+ ns.disk = D_UP_TO_DATE;
+ if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
+ ns.pdsk = D_UP_TO_DATE;
+ }
+
+ /* Implications of the connection stat on the disk states */
+ disk_min = D_DISKLESS;
+ disk_max = D_UP_TO_DATE;
+ pdsk_min = D_INCONSISTENT;
+ pdsk_max = D_UNKNOWN;
+ switch ((enum drbd_conns)ns.conn) {
+ case C_WF_BITMAP_T:
+ case C_PAUSED_SYNC_T:
+ case C_STARTING_SYNC_T:
+ case C_WF_SYNC_UUID:
+ case C_BEHIND:
+ disk_min = D_INCONSISTENT;
+ disk_max = D_OUTDATED;
+ pdsk_min = D_UP_TO_DATE;
+ pdsk_max = D_UP_TO_DATE;
+ break;
+ case C_VERIFY_S:
+ case C_VERIFY_T:
+ disk_min = D_UP_TO_DATE;
+ disk_max = D_UP_TO_DATE;
+ pdsk_min = D_UP_TO_DATE;
+ pdsk_max = D_UP_TO_DATE;
+ break;
+ case C_CONNECTED:
+ disk_min = D_DISKLESS;
+ disk_max = D_UP_TO_DATE;
+ pdsk_min = D_DISKLESS;
+ pdsk_max = D_UP_TO_DATE;
+ break;
+ case C_WF_BITMAP_S:
+ case C_PAUSED_SYNC_S:
+ case C_STARTING_SYNC_S:
+ case C_AHEAD:
+ disk_min = D_UP_TO_DATE;
+ disk_max = D_UP_TO_DATE;
+ pdsk_min = D_INCONSISTENT;
+ pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
+ break;
+ case C_SYNC_TARGET:
+ disk_min = D_INCONSISTENT;
+ disk_max = D_INCONSISTENT;
+ pdsk_min = D_UP_TO_DATE;
+ pdsk_max = D_UP_TO_DATE;
+ break;
+ case C_SYNC_SOURCE:
+ disk_min = D_UP_TO_DATE;
+ disk_max = D_UP_TO_DATE;
+ pdsk_min = D_INCONSISTENT;
+ pdsk_max = D_INCONSISTENT;
+ break;
+ case C_STANDALONE:
+ case C_DISCONNECTING:
+ case C_UNCONNECTED:
+ case C_TIMEOUT:
+ case C_BROKEN_PIPE:
+ case C_NETWORK_FAILURE:
+ case C_PROTOCOL_ERROR:
+ case C_TEAR_DOWN:
+ case C_WF_CONNECTION:
+ case C_WF_REPORT_PARAMS:
+ case C_MASK:
+ break;
+ }
+ if (ns.disk > disk_max)
+ ns.disk = disk_max;
+
+ if (ns.disk < disk_min) {
+ dev_warn(DEV, "Implicitly set disk from %s to %s\n",
+ drbd_disk_str(ns.disk), drbd_disk_str(disk_min));
+ ns.disk = disk_min;
+ }
+ if (ns.pdsk > pdsk_max)
+ ns.pdsk = pdsk_max;
+
+ if (ns.pdsk < pdsk_min) {
+ dev_warn(DEV, "Implicitly set pdsk from %s to %s\n",
+ drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min));
+ ns.pdsk = pdsk_min;
+ }
+
if (fp == FP_STONITH &&
(ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
!(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
@@ -961,6 +997,10 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
/* helper for __drbd_set_state */
static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
{
+ if (mdev->agreed_pro_version < 90)
+ mdev->ov_start_sector = 0;
+ mdev->rs_total = drbd_bm_bits(mdev);
+ mdev->ov_position = 0;
if (cs == C_VERIFY_T) {
/* starting online verify from an arbitrary position
* does not fit well into the existing protocol.
@@ -970,11 +1010,15 @@ static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
mdev->ov_start_sector = ~(sector_t)0;
} else {
unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
- if (bit >= mdev->rs_total)
+ if (bit >= mdev->rs_total) {
mdev->ov_start_sector =
BM_BIT_TO_SECT(mdev->rs_total - 1);
+ mdev->rs_total = 1;
+ } else
+ mdev->rs_total -= bit;
mdev->ov_position = mdev->ov_start_sector;
}
+ mdev->ov_left = mdev->rs_total;
}
static void drbd_resume_al(struct drbd_conf *mdev)
@@ -992,12 +1036,12 @@ static void drbd_resume_al(struct drbd_conf *mdev)
*
* Caller needs to hold req_lock, and global_state_lock. Do not call directly.
*/
-int __drbd_set_state(struct drbd_conf *mdev,
- union drbd_state ns, enum chg_state_flags flags,
- struct completion *done)
+enum drbd_state_rv
+__drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
+ enum chg_state_flags flags, struct completion *done)
{
union drbd_state os;
- int rv = SS_SUCCESS;
+ enum drbd_state_rv rv = SS_SUCCESS;
const char *warn_sync_abort = NULL;
struct after_state_chg_work *ascw;
@@ -1033,22 +1077,46 @@ int __drbd_set_state(struct drbd_conf *mdev,
dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
{
- char *pbp, pb[300];
- pbp = pb;
- *pbp = 0;
- PSC(role);
- PSC(peer);
- PSC(conn);
- PSC(disk);
- PSC(pdsk);
- if (is_susp(ns) != is_susp(os))
- pbp += sprintf(pbp, "susp( %s -> %s ) ",
- drbd_susp_str(is_susp(os)),
- drbd_susp_str(is_susp(ns)));
- PSC(aftr_isp);
- PSC(peer_isp);
- PSC(user_isp);
- dev_info(DEV, "%s\n", pb);
+ char *pbp, pb[300];
+ pbp = pb;
+ *pbp = 0;
+ if (ns.role != os.role)
+ pbp += sprintf(pbp, "role( %s -> %s ) ",
+ drbd_role_str(os.role),
+ drbd_role_str(ns.role));
+ if (ns.peer != os.peer)
+ pbp += sprintf(pbp, "peer( %s -> %s ) ",
+ drbd_role_str(os.peer),
+ drbd_role_str(ns.peer));
+ if (ns.conn != os.conn)
+ pbp += sprintf(pbp, "conn( %s -> %s ) ",
+ drbd_conn_str(os.conn),
+ drbd_conn_str(ns.conn));
+ if (ns.disk != os.disk)
+ pbp += sprintf(pbp, "disk( %s -> %s ) ",
+ drbd_disk_str(os.disk),
+ drbd_disk_str(ns.disk));
+ if (ns.pdsk != os.pdsk)
+ pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
+ drbd_disk_str(os.pdsk),
+ drbd_disk_str(ns.pdsk));
+ if (is_susp(ns) != is_susp(os))
+ pbp += sprintf(pbp, "susp( %d -> %d ) ",
+ is_susp(os),
+ is_susp(ns));
+ if (ns.aftr_isp != os.aftr_isp)
+ pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
+ os.aftr_isp,
+ ns.aftr_isp);
+ if (ns.peer_isp != os.peer_isp)
+ pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
+ os.peer_isp,
+ ns.peer_isp);
+ if (ns.user_isp != os.user_isp)
+ pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
+ os.user_isp,
+ ns.user_isp);
+ dev_info(DEV, "%s\n", pb);
}
/* solve the race between becoming unconfigured,
@@ -1074,6 +1142,10 @@ int __drbd_set_state(struct drbd_conf *mdev,
atomic_inc(&mdev->local_cnt);
mdev->state = ns;
+
+ if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
+ drbd_print_uuids(mdev, "attached to UUIDs");
+
wake_up(&mdev->misc_wait);
wake_up(&mdev->state_wait);
@@ -1081,7 +1153,7 @@ int __drbd_set_state(struct drbd_conf *mdev,
if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
ns.conn < C_CONNECTED) {
mdev->ov_start_sector =
- BM_BIT_TO_SECT(mdev->rs_total - mdev->ov_left);
+ BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
dev_info(DEV, "Online Verify reached sector %llu\n",
(unsigned long long)mdev->ov_start_sector);
}
@@ -1106,14 +1178,7 @@ int __drbd_set_state(struct drbd_conf *mdev,
unsigned long now = jiffies;
int i;
- mdev->ov_position = 0;
- mdev->rs_total = drbd_bm_bits(mdev);
- if (mdev->agreed_pro_version >= 90)
- set_ov_position(mdev, ns.conn);
- else
- mdev->ov_start_sector = 0;
- mdev->ov_left = mdev->rs_total
- - BM_SECT_TO_BIT(mdev->ov_position);
+ set_ov_position(mdev, ns.conn);
mdev->rs_start = now;
mdev->rs_last_events = 0;
mdev->rs_last_sect_ev = 0;
@@ -1121,10 +1186,12 @@ int __drbd_set_state(struct drbd_conf *mdev,
mdev->ov_last_oos_start = 0;
for (i = 0; i < DRBD_SYNC_MARKS; i++) {
- mdev->rs_mark_left[i] = mdev->rs_total;
+ mdev->rs_mark_left[i] = mdev->ov_left;
mdev->rs_mark_time[i] = now;
}
+ drbd_rs_controller_reset(mdev);
+
if (ns.conn == C_VERIFY_S) {
dev_info(DEV, "Starting Online Verify from sector %llu\n",
(unsigned long long)mdev->ov_position);
@@ -1228,6 +1295,26 @@ static void abw_start_sync(struct drbd_conf *mdev, int rv)
}
}
+int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
+ int (*io_fn)(struct drbd_conf *),
+ char *why, enum bm_flag flags)
+{
+ int rv;
+
+ D_ASSERT(current == mdev->worker.task);
+
+ /* open coded non-blocking drbd_suspend_io(mdev); */
+ set_bit(SUSPEND_IO, &mdev->flags);
+
+ drbd_bm_lock(mdev, why, flags);
+ rv = io_fn(mdev);
+ drbd_bm_unlock(mdev);
+
+ drbd_resume_io(mdev);
+
+ return rv;
+}
+
/**
* after_state_ch() - Perform after state change actions that may sleep
* @mdev: DRBD device.
@@ -1266,16 +1353,14 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
nsm.i = -1;
if (ns.susp_nod) {
- if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
- if (ns.conn == C_CONNECTED)
- what = resend, nsm.susp_nod = 0;
- else /* ns.conn > C_CONNECTED */
- dev_err(DEV, "Unexpected Resynd going on!\n");
- }
+ if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
+ what = resend;
if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
- what = restart_frozen_disk_io, nsm.susp_nod = 0;
+ what = restart_frozen_disk_io;
+ if (what != nothing)
+ nsm.susp_nod = 0;
}
if (ns.susp_fen) {
@@ -1306,13 +1391,30 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
spin_unlock_irq(&mdev->req_lock);
}
+ /* Became sync source. With protocol >= 96, we still need to send out
+ * the sync uuid now. Need to do that before any drbd_send_state, or
+ * the other side may go "paused sync" before receiving the sync uuids,
+ * which is unexpected. */
+ if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
+ (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
+ mdev->agreed_pro_version >= 96 && get_ldev(mdev)) {
+ drbd_gen_and_send_sync_uuid(mdev);
+ put_ldev(mdev);
+ }
+
/* Do not change the order of the if above and the two below... */
if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
drbd_send_uuids(mdev);
drbd_send_state(mdev);
}
- if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S)
- drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL, "send_bitmap (WFBitMapS)");
+ /* No point in queuing send_bitmap if we don't have a connection
+ * anymore, so check also the _current_ state, not only the new state
+ * at the time this work was queued. */
+ if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
+ mdev->state.conn == C_WF_BITMAP_S)
+ drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
+ "send_bitmap (WFBitMapS)",
+ BM_LOCKED_TEST_ALLOWED);
/* Lost contact to peer's copy of the data */
if ((os.pdsk >= D_INCONSISTENT &&
@@ -1343,7 +1445,23 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
/* D_DISKLESS Peer becomes secondary */
if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
- drbd_al_to_on_disk_bm(mdev);
+ /* We may still be Primary ourselves.
+ * No harm done if the bitmap still changes,
+ * redirtied pages will follow later. */
+ drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
+ "demote diskless peer", BM_LOCKED_SET_ALLOWED);
+ put_ldev(mdev);
+ }
+
+ /* Write out all changed bits on demote.
+ * Though, no need to da that just yet
+ * if there is a resync going on still */
+ if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
+ mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
+ /* No changes to the bitmap expected this time, so assert that,
+ * even though no harm was done if it did change. */
+ drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
+ "demote", BM_LOCKED_TEST_ALLOWED);
put_ldev(mdev);
}
@@ -1371,15 +1489,23 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
drbd_send_state(mdev);
+ if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
+ drbd_send_state(mdev);
+
/* We are in the progress to start a full sync... */
if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
(os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
- drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, &abw_start_sync, "set_n_write from StartingSync");
+ /* no other bitmap changes expected during this phase */
+ drbd_queue_bitmap_io(mdev,
+ &drbd_bmio_set_n_write, &abw_start_sync,
+ "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
/* We are invalidating our self... */
if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
- drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
+ /* other bitmap operation expected during this phase */
+ drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
+ "set_n_write from invalidate", BM_LOCKED_MASK);
/* first half of local IO error, failure to attach,
* or administrative detach */
@@ -1434,8 +1560,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
if (drbd_send_state(mdev))
dev_warn(DEV, "Notified peer that I'm now diskless.\n");
- else
- dev_err(DEV, "Sending state for being diskless failed\n");
/* corresponding get_ldev in __drbd_set_state
* this may finaly trigger drbd_ldev_destroy. */
put_ldev(mdev);
@@ -1459,6 +1583,19 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
drbd_send_state(mdev);
+ /* This triggers bitmap writeout of potentially still unwritten pages
+ * if the resync finished cleanly, or aborted because of peer disk
+ * failure, or because of connection loss.
+ * For resync aborted because of local disk failure, we cannot do
+ * any bitmap writeout anymore.
+ * No harm done if some bits change during this phase.
+ */
+ if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
+ drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL,
+ "write from resync_finished", BM_LOCKED_SET_ALLOWED);
+ put_ldev(mdev);
+ }
+
/* free tl_hash if we Got thawed and are C_STANDALONE */
if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash)
drbd_free_tl_hash(mdev);
@@ -1559,7 +1696,7 @@ int drbd_thread_start(struct drbd_thread *thi)
if (!try_module_get(THIS_MODULE)) {
dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
spin_unlock_irqrestore(&thi->t_lock, flags);
- return FALSE;
+ return false;
}
init_completion(&thi->stop);
@@ -1576,7 +1713,7 @@ int drbd_thread_start(struct drbd_thread *thi)
dev_err(DEV, "Couldn't start thread\n");
module_put(THIS_MODULE);
- return FALSE;
+ return false;
}
spin_lock_irqsave(&thi->t_lock, flags);
thi->task = nt;
@@ -1596,7 +1733,7 @@ int drbd_thread_start(struct drbd_thread *thi)
break;
}
- return TRUE;
+ return true;
}
@@ -1694,8 +1831,8 @@ int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
{
int sent, ok;
- ERR_IF(!h) return FALSE;
- ERR_IF(!size) return FALSE;
+ ERR_IF(!h) return false;
+ ERR_IF(!size) return false;
h->magic = BE_DRBD_MAGIC;
h->command = cpu_to_be16(cmd);
@@ -1704,8 +1841,8 @@ int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
sent = drbd_send(mdev, sock, h, size, msg_flags);
ok = (sent == size);
- if (!ok)
- dev_err(DEV, "short sent %s size=%d sent=%d\n",
+ if (!ok && !signal_pending(current))
+ dev_warn(DEV, "short sent %s size=%d sent=%d\n",
cmdname(cmd), (int)size, sent);
return ok;
}
@@ -1840,7 +1977,7 @@ int drbd_send_protocol(struct drbd_conf *mdev)
else {
dev_err(DEV, "--dry-run is not supported by peer");
kfree(p);
- return 0;
+ return -1;
}
}
p->conn_flags = cpu_to_be32(cf);
@@ -1888,12 +2025,36 @@ int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
return _drbd_send_uuids(mdev, 8);
}
+void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
+{
+ if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
+ u64 *uuid = mdev->ldev->md.uuid;
+ dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
+ text,
+ (unsigned long long)uuid[UI_CURRENT],
+ (unsigned long long)uuid[UI_BITMAP],
+ (unsigned long long)uuid[UI_HISTORY_START],
+ (unsigned long long)uuid[UI_HISTORY_END]);
+ put_ldev(mdev);
+ } else {
+ dev_info(DEV, "%s effective data uuid: %016llX\n",
+ text,
+ (unsigned long long)mdev->ed_uuid);
+ }
+}
-int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val)
+int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
{
struct p_rs_uuid p;
+ u64 uuid;
- p.uuid = cpu_to_be64(val);
+ D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
+
+ uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
+ drbd_uuid_set(mdev, UI_BITMAP, uuid);
+ drbd_print_uuids(mdev, "updated sync UUID");
+ drbd_md_sync(mdev);
+ p.uuid = cpu_to_be64(uuid);
return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
(struct p_header80 *)&p, sizeof(p));
@@ -1921,7 +2082,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
p.d_size = cpu_to_be64(d_size);
p.u_size = cpu_to_be64(u_size);
p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
- p.max_segment_size = cpu_to_be32(queue_max_segment_size(mdev->rq_queue));
+ p.max_bio_size = cpu_to_be32(queue_max_hw_sectors(mdev->rq_queue) << 9);
p.queue_order_type = cpu_to_be16(q_order_type);
p.dds_flags = cpu_to_be16(flags);
@@ -1972,7 +2133,7 @@ int drbd_send_state_req(struct drbd_conf *mdev,
(struct p_header80 *)&p, sizeof(p));
}
-int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode)
+int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
{
struct p_req_state_reply p;
@@ -2076,9 +2237,15 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
return len;
}
-enum { OK, FAILED, DONE }
+/**
+ * send_bitmap_rle_or_plain
+ *
+ * Return 0 when done, 1 when another iteration is needed, and a negative error
+ * code upon failure.
+ */
+static int
send_bitmap_rle_or_plain(struct drbd_conf *mdev,
- struct p_header80 *h, struct bm_xfer_ctx *c)
+ struct p_header80 *h, struct bm_xfer_ctx *c)
{
struct p_compressed_bm *p = (void*)h;
unsigned long num_words;
@@ -2088,7 +2255,7 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev,
len = fill_bitmap_rle_bits(mdev, p, c);
if (len < 0)
- return FAILED;
+ return -EIO;
if (len) {
DCBP_set_code(p, RLE_VLI_Bits);
@@ -2118,11 +2285,14 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev,
if (c->bit_offset > c->bm_bits)
c->bit_offset = c->bm_bits;
}
- ok = ok ? ((len == 0) ? DONE : OK) : FAILED;
-
- if (ok == DONE)
- INFO_bm_xfer_stats(mdev, "send", c);
- return ok;
+ if (ok) {
+ if (len == 0) {
+ INFO_bm_xfer_stats(mdev, "send", c);
+ return 0;
+ } else
+ return 1;
+ }
+ return -EIO;
}
/* See the comment at receive_bitmap() */
@@ -2130,16 +2300,16 @@ int _drbd_send_bitmap(struct drbd_conf *mdev)
{
struct bm_xfer_ctx c;
struct p_header80 *p;
- int ret;
+ int err;
- ERR_IF(!mdev->bitmap) return FALSE;
+ ERR_IF(!mdev->bitmap) return false;
/* maybe we should use some per thread scratch page,
* and allocate that during initial device creation? */
p = (struct p_header80 *) __get_free_page(GFP_NOIO);
if (!p) {
dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
- return FALSE;
+ return false;
}
if (get_ldev(mdev)) {
@@ -2165,11 +2335,11 @@ int _drbd_send_bitmap(struct drbd_conf *mdev)
};
do {
- ret = send_bitmap_rle_or_plain(mdev, p, &c);
- } while (ret == OK);
+ err = send_bitmap_rle_or_plain(mdev, p, &c);
+ } while (err > 0);
free_page((unsigned long) p);
- return (ret == DONE);
+ return err == 0;
}
int drbd_send_bitmap(struct drbd_conf *mdev)
@@ -2192,7 +2362,7 @@ int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
p.set_size = cpu_to_be32(set_size);
if (mdev->state.conn < C_CONNECTED)
- return FALSE;
+ return false;
ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
(struct p_header80 *)&p, sizeof(p));
return ok;
@@ -2220,7 +2390,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
- return FALSE;
+ return false;
ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
(struct p_header80 *)&p, sizeof(p));
return ok;
@@ -2326,8 +2496,8 @@ int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
}
/* called on sndtimeo
- * returns FALSE if we should retry,
- * TRUE if we think connection is dead
+ * returns false if we should retry,
+ * true if we think connection is dead
*/
static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
{
@@ -2340,7 +2510,7 @@ static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *
|| mdev->state.conn < C_CONNECTED;
if (drop_it)
- return TRUE;
+ return true;
drop_it = !--mdev->ko_count;
if (!drop_it) {
@@ -2531,13 +2701,39 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
if (ok && dgs) {
dgb = mdev->int_dig_out;
drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
- ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
+ ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
}
if (ok) {
- if (mdev->net_conf->wire_protocol == DRBD_PROT_A)
+ /* For protocol A, we have to memcpy the payload into
+ * socket buffers, as we may complete right away
+ * as soon as we handed it over to tcp, at which point the data
+ * pages may become invalid.
+ *
+ * For data-integrity enabled, we copy it as well, so we can be
+ * sure that even if the bio pages may still be modified, it
+ * won't change the data on the wire, thus if the digest checks
+ * out ok after sending on this side, but does not fit on the
+ * receiving side, we sure have detected corruption elsewhere.
+ */
+ if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs)
ok = _drbd_send_bio(mdev, req->master_bio);
else
ok = _drbd_send_zc_bio(mdev, req->master_bio);
+
+ /* double check digest, sometimes buffers have been modified in flight. */
+ if (dgs > 0 && dgs <= 64) {
+ /* 64 byte, 512 bit, is the larges digest size
+ * currently supported in kernel crypto. */
+ unsigned char digest[64];
+ drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest);
+ if (memcmp(mdev->int_dig_out, digest, dgs)) {
+ dev_warn(DEV,
+ "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
+ (unsigned long long)req->sector, req->size);
+ }
+ } /* else if (dgs > 64) {
+ ... Be noisy about digest too large ...
+ } */
}
drbd_put_data_sock(mdev);
@@ -2587,7 +2783,7 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
if (ok && dgs) {
dgb = mdev->int_dig_out;
drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
- ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
+ ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
}
if (ok)
ok = _drbd_send_zc_ee(mdev, e);
@@ -2597,6 +2793,16 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
return ok;
}
+int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req)
+{
+ struct p_block_desc p;
+
+ p.sector = cpu_to_be64(req->sector);
+ p.blksize = cpu_to_be32(req->size);
+
+ return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p));
+}
+
/*
drbd_send distinguishes two cases:
@@ -2770,6 +2976,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
atomic_set(&mdev->pp_in_use_by_net, 0);
atomic_set(&mdev->rs_sect_in, 0);
atomic_set(&mdev->rs_sect_ev, 0);
+ atomic_set(&mdev->ap_in_flight, 0);
mutex_init(&mdev->md_io_mutex);
mutex_init(&mdev->data.mutex);
@@ -2798,19 +3005,27 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
INIT_LIST_HEAD(&mdev->unplug_work.list);
INIT_LIST_HEAD(&mdev->go_diskless.list);
INIT_LIST_HEAD(&mdev->md_sync_work.list);
+ INIT_LIST_HEAD(&mdev->start_resync_work.list);
INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
- mdev->resync_work.cb = w_resync_inactive;
+ mdev->resync_work.cb = w_resync_timer;
mdev->unplug_work.cb = w_send_write_hint;
mdev->go_diskless.cb = w_go_diskless;
mdev->md_sync_work.cb = w_md_sync;
mdev->bm_io_work.w.cb = w_bitmap_io;
+ mdev->start_resync_work.cb = w_start_resync;
init_timer(&mdev->resync_timer);
init_timer(&mdev->md_sync_timer);
+ init_timer(&mdev->start_resync_timer);
+ init_timer(&mdev->request_timer);
mdev->resync_timer.function = resync_timer_fn;
mdev->resync_timer.data = (unsigned long) mdev;
mdev->md_sync_timer.function = md_sync_timer_fn;
mdev->md_sync_timer.data = (unsigned long) mdev;
+ mdev->start_resync_timer.function = start_resync_timer_fn;
+ mdev->start_resync_timer.data = (unsigned long) mdev;
+ mdev->request_timer.function = request_timer_fn;
+ mdev->request_timer.data = (unsigned long) mdev;
init_waitqueue_head(&mdev->misc_wait);
init_waitqueue_head(&mdev->state_wait);
@@ -2881,6 +3096,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
D_ASSERT(list_empty(&mdev->resync_work.list));
D_ASSERT(list_empty(&mdev->unplug_work.list));
D_ASSERT(list_empty(&mdev->go_diskless.list));
+
+ drbd_set_defaults(mdev);
}
@@ -2923,7 +3140,7 @@ static void drbd_destroy_mempools(void)
static int drbd_create_mempools(void)
{
struct page *page;
- const int number = (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE) * minor_count;
+ const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
int i;
/* prepare our caches and mempools */
@@ -3087,11 +3304,20 @@ static void drbd_cleanup(void)
unregister_reboot_notifier(&drbd_notifier);
+ /* first remove proc,
+ * drbdsetup uses it's presence to detect
+ * whether DRBD is loaded.
+ * If we would get stuck in proc removal,
+ * but have netlink already deregistered,
+ * some drbdsetup commands may wait forever
+ * for an answer.
+ */
+ if (drbd_proc)
+ remove_proc_entry("drbd", NULL);
+
drbd_nl_cleanup();
if (minor_table) {
- if (drbd_proc)
- remove_proc_entry("drbd", NULL);
i = minor_count;
while (i--)
drbd_delete_device(i);
@@ -3119,7 +3345,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
char reason = '-';
int r = 0;
- if (!__inc_ap_bio_cond(mdev)) {
+ if (!may_inc_ap_bio(mdev)) {
/* DRBD has frozen IO */
r = bdi_bits;
reason = 'd';
@@ -3172,7 +3398,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
goto out_no_disk;
mdev->vdisk = disk;
- set_disk_ro(disk, TRUE);
+ set_disk_ro(disk, true);
disk->queue = q;
disk->major = DRBD_MAJOR;
@@ -3188,8 +3414,8 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
q->backing_dev_info.congested_fn = drbd_congested;
q->backing_dev_info.congested_data = mdev;
- blk_queue_make_request(q, drbd_make_request_26);
- blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE);
+ blk_queue_make_request(q, drbd_make_request);
+ blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE >> 9);
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
blk_queue_merge_bvec(q, drbd_merge_bvec);
q->queue_lock = &mdev->req_lock;
@@ -3251,6 +3477,7 @@ void drbd_free_mdev(struct drbd_conf *mdev)
put_disk(mdev->vdisk);
blk_cleanup_queue(mdev->rq_queue);
free_cpumask_var(mdev->cpu_mask);
+ drbd_free_tl_hash(mdev);
kfree(mdev);
}
@@ -3266,7 +3493,7 @@ int __init drbd_init(void)
return -EINVAL;
}
- if (1 > minor_count || minor_count > 255) {
+ if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
printk(KERN_ERR
"drbd: invalid minor_count (%d)\n", minor_count);
#ifdef MODULE
@@ -3448,7 +3675,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
/* this was a try anyways ... */
dev_err(DEV, "meta data update failed!\n");
- drbd_chk_io_error(mdev, 1, TRUE);
+ drbd_chk_io_error(mdev, 1, true);
}
/* Update mdev->ldev->md.la_size_sect,
@@ -3464,7 +3691,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
* @mdev: DRBD device.
* @bdev: Device from which the meta data should be read in.
*
- * Return 0 (NO_ERROR) on success, and an enum drbd_ret_codes in case
+ * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
* something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
*/
int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
@@ -3534,28 +3761,6 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
return rv;
}
-static void debug_drbd_uuid(struct drbd_conf *mdev, enum drbd_uuid_index index)
-{
- static char *uuid_str[UI_EXTENDED_SIZE] = {
- [UI_CURRENT] = "CURRENT",
- [UI_BITMAP] = "BITMAP",
- [UI_HISTORY_START] = "HISTORY_START",
- [UI_HISTORY_END] = "HISTORY_END",
- [UI_SIZE] = "SIZE",
- [UI_FLAGS] = "FLAGS",
- };
-
- if (index >= UI_EXTENDED_SIZE) {
- dev_warn(DEV, " uuid_index >= EXTENDED_SIZE\n");
- return;
- }
-
- dynamic_dev_dbg(DEV, " uuid[%s] now %016llX\n",
- uuid_str[index],
- (unsigned long long)mdev->ldev->md.uuid[index]);
-}
-
-
/**
* drbd_md_mark_dirty() - Mark meta data super block as dirty
* @mdev: DRBD device.
@@ -3585,10 +3790,8 @@ static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
{
int i;
- for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) {
+ for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
- debug_drbd_uuid(mdev, i+1);
- }
}
void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
@@ -3603,7 +3806,6 @@ void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
}
mdev->ldev->md.uuid[idx] = val;
- debug_drbd_uuid(mdev, idx);
drbd_md_mark_dirty(mdev);
}
@@ -3613,7 +3815,6 @@ void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
if (mdev->ldev->md.uuid[idx]) {
drbd_uuid_move_history(mdev);
mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
- debug_drbd_uuid(mdev, UI_HISTORY_START);
}
_drbd_uuid_set(mdev, idx, val);
}
@@ -3628,14 +3829,16 @@ void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
{
u64 val;
+ unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
+
+ if (bm_uuid)
+ dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
- dev_info(DEV, "Creating new current UUID\n");
- D_ASSERT(mdev->ldev->md.uuid[UI_BITMAP] == 0);
mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
- debug_drbd_uuid(mdev, UI_BITMAP);
get_random_bytes(&val, sizeof(u64));
_drbd_uuid_set(mdev, UI_CURRENT, val);
+ drbd_print_uuids(mdev, "new current UUID");
/* get it to stable storage _now_ */
drbd_md_sync(mdev);
}
@@ -3649,16 +3852,12 @@ void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
drbd_uuid_move_history(mdev);
mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
mdev->ldev->md.uuid[UI_BITMAP] = 0;
- debug_drbd_uuid(mdev, UI_HISTORY_START);
- debug_drbd_uuid(mdev, UI_BITMAP);
} else {
- if (mdev->ldev->md.uuid[UI_BITMAP])
- dev_warn(DEV, "bm UUID already set");
-
- mdev->ldev->md.uuid[UI_BITMAP] = val;
- mdev->ldev->md.uuid[UI_BITMAP] &= ~((u64)1);
+ unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
+ if (bm_uuid)
+ dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
- debug_drbd_uuid(mdev, UI_BITMAP);
+ mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
}
drbd_md_mark_dirty(mdev);
}
@@ -3714,15 +3913,19 @@ int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
{
struct bm_io_work *work = container_of(w, struct bm_io_work, w);
- int rv;
+ int rv = -EIO;
D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
- drbd_bm_lock(mdev, work->why);
- rv = work->io_fn(mdev);
- drbd_bm_unlock(mdev);
+ if (get_ldev(mdev)) {
+ drbd_bm_lock(mdev, work->why, work->flags);
+ rv = work->io_fn(mdev);
+ drbd_bm_unlock(mdev);
+ put_ldev(mdev);
+ }
clear_bit(BITMAP_IO, &mdev->flags);
+ smp_mb__after_clear_bit();
wake_up(&mdev->misc_wait);
if (work->done)
@@ -3730,6 +3933,7 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
work->why = NULL;
+ work->flags = 0;
return 1;
}
@@ -3784,7 +3988,7 @@ void drbd_go_diskless(struct drbd_conf *mdev)
void drbd_queue_bitmap_io(struct drbd_conf *mdev,
int (*io_fn)(struct drbd_conf *),
void (*done)(struct drbd_conf *, int),
- char *why)
+ char *why, enum bm_flag flags)
{
D_ASSERT(current == mdev->worker.task);
@@ -3798,15 +4002,15 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev,
mdev->bm_io_work.io_fn = io_fn;
mdev->bm_io_work.done = done;
mdev->bm_io_work.why = why;
+ mdev->bm_io_work.flags = flags;
+ spin_lock_irq(&mdev->req_lock);
set_bit(BITMAP_IO, &mdev->flags);
if (atomic_read(&mdev->ap_bio_cnt) == 0) {
- if (list_empty(&mdev->bm_io_work.w.list)) {
- set_bit(BITMAP_IO_QUEUED, &mdev->flags);
+ if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
- } else
- dev_err(DEV, "FIXME avoided double queuing bm_io_work\n");
}
+ spin_unlock_irq(&mdev->req_lock);
}
/**
@@ -3818,19 +4022,22 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev,
* freezes application IO while that the actual IO operations runs. This
* functions MAY NOT be called from worker context.
*/
-int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why)
+int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
+ char *why, enum bm_flag flags)
{
int rv;
D_ASSERT(current != mdev->worker.task);
- drbd_suspend_io(mdev);
+ if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
+ drbd_suspend_io(mdev);
- drbd_bm_lock(mdev, why);
+ drbd_bm_lock(mdev, why, flags);
rv = io_fn(mdev);
drbd_bm_unlock(mdev);
- drbd_resume_io(mdev);
+ if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
+ drbd_resume_io(mdev);
return rv;
}
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index fe81c851ca88..03b29f78a37d 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -288,10 +288,11 @@ void drbd_try_outdate_peer_async(struct drbd_conf *mdev)
dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n");
}
-int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
+enum drbd_state_rv
+drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
{
const int max_tries = 4;
- int r = 0;
+ enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
int try = 0;
int forced = 0;
union drbd_state mask, val;
@@ -306,17 +307,17 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
val.i = 0; val.role = new_role;
while (try++ < max_tries) {
- r = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
+ rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
/* in case we first succeeded to outdate,
* but now suddenly could establish a connection */
- if (r == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
+ if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
val.pdsk = 0;
mask.pdsk = 0;
continue;
}
- if (r == SS_NO_UP_TO_DATE_DISK && force &&
+ if (rv == SS_NO_UP_TO_DATE_DISK && force &&
(mdev->state.disk < D_UP_TO_DATE &&
mdev->state.disk >= D_INCONSISTENT)) {
mask.disk = D_MASK;
@@ -325,7 +326,7 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
continue;
}
- if (r == SS_NO_UP_TO_DATE_DISK &&
+ if (rv == SS_NO_UP_TO_DATE_DISK &&
mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
nps = drbd_try_outdate_peer(mdev);
@@ -341,9 +342,9 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
continue;
}
- if (r == SS_NOTHING_TO_DO)
+ if (rv == SS_NOTHING_TO_DO)
goto fail;
- if (r == SS_PRIMARY_NOP && mask.pdsk == 0) {
+ if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
nps = drbd_try_outdate_peer(mdev);
if (force && nps > D_OUTDATED) {
@@ -356,25 +357,24 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
continue;
}
- if (r == SS_TWO_PRIMARIES) {
+ if (rv == SS_TWO_PRIMARIES) {
/* Maybe the peer is detected as dead very soon...
retry at most once more in this case. */
- __set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10);
+ schedule_timeout_interruptible((mdev->net_conf->ping_timeo+1)*HZ/10);
if (try < max_tries)
try = max_tries - 1;
continue;
}
- if (r < SS_SUCCESS) {
- r = _drbd_request_state(mdev, mask, val,
+ if (rv < SS_SUCCESS) {
+ rv = _drbd_request_state(mdev, mask, val,
CS_VERBOSE + CS_WAIT_COMPLETE);
- if (r < SS_SUCCESS)
+ if (rv < SS_SUCCESS)
goto fail;
}
break;
}
- if (r < SS_SUCCESS)
+ if (rv < SS_SUCCESS)
goto fail;
if (forced)
@@ -384,7 +384,7 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
if (new_role == R_SECONDARY) {
- set_disk_ro(mdev->vdisk, TRUE);
+ set_disk_ro(mdev->vdisk, true);
if (get_ldev(mdev)) {
mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
put_ldev(mdev);
@@ -394,7 +394,7 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
mdev->net_conf->want_lose = 0;
put_net_conf(mdev);
}
- set_disk_ro(mdev->vdisk, FALSE);
+ set_disk_ro(mdev->vdisk, false);
if (get_ldev(mdev)) {
if (((mdev->state.conn < C_CONNECTED ||
mdev->state.pdsk <= D_FAILED)
@@ -406,10 +406,8 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
}
}
- if ((new_role == R_SECONDARY) && get_ldev(mdev)) {
- drbd_al_to_on_disk_bm(mdev);
- put_ldev(mdev);
- }
+ /* writeout of activity log covered areas of the bitmap
+ * to stable storage done in after state change already */
if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
/* if this was forced, we should consider sync */
@@ -423,7 +421,7 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
fail:
mutex_unlock(&mdev->state_mutex);
- return r;
+ return rv;
}
static struct drbd_conf *ensure_mdev(int minor, int create)
@@ -528,17 +526,19 @@ static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
}
}
+/* input size is expected to be in KB */
char *ppsize(char *buf, unsigned long long size)
{
- /* Needs 9 bytes at max. */
+ /* Needs 9 bytes at max including trailing NUL:
+ * -1ULL ==> "16384 EB" */
static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
int base = 0;
- while (size >= 10000) {
+ while (size >= 10000 && base < sizeof(units)-1) {
/* shift + round */
size = (size >> 10) + !!(size & (1<<9));
base++;
}
- sprintf(buf, "%lu %cB", (long)size, units[base]);
+ sprintf(buf, "%u %cB", (unsigned)size, units[base]);
return buf;
}
@@ -642,11 +642,19 @@ enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_
|| prev_size != mdev->ldev->md.md_size_sect;
if (la_size_changed || md_moved) {
+ int err;
+
drbd_al_shrink(mdev); /* All extents inactive. */
dev_info(DEV, "Writing the whole bitmap, %s\n",
la_size_changed && md_moved ? "size changed and md moved" :
la_size_changed ? "size changed" : "md moved");
- rv = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */
+ /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
+ err = drbd_bitmap_io(mdev, &drbd_bm_write,
+ "size changed", BM_LOCKED_MASK);
+ if (err) {
+ rv = dev_size_error;
+ goto out;
+ }
drbd_md_mark_dirty(mdev);
}
@@ -765,22 +773,21 @@ static int drbd_check_al_size(struct drbd_conf *mdev)
return 0;
}
-void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __must_hold(local)
+void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size) __must_hold(local)
{
struct request_queue * const q = mdev->rq_queue;
struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
int max_segments = mdev->ldev->dc.max_bio_bvecs;
+ int max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
- max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s);
-
- blk_queue_max_hw_sectors(q, max_seg_s >> 9);
- blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
- blk_queue_max_segment_size(q, max_seg_s);
blk_queue_logical_block_size(q, 512);
- blk_queue_segment_boundary(q, PAGE_SIZE-1);
- blk_stack_limits(&q->limits, &b->limits, 0);
+ blk_queue_max_hw_sectors(q, max_hw_sectors);
+ /* This is the workaround for "bio would need to, but cannot, be split" */
+ blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
+ blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
+ blk_queue_stack_limits(q, b);
- dev_info(DEV, "max_segment_size ( = BIO size ) = %u\n", queue_max_segment_size(q));
+ dev_info(DEV, "max BIO size = %u\n", queue_max_hw_sectors(q) << 9);
if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
@@ -850,7 +857,7 @@ static void drbd_suspend_al(struct drbd_conf *mdev)
static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
struct drbd_nl_cfg_reply *reply)
{
- enum drbd_ret_codes retcode;
+ enum drbd_ret_code retcode;
enum determine_dev_size dd;
sector_t max_possible_sectors;
sector_t min_md_device_sectors;
@@ -858,8 +865,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
struct block_device *bdev;
struct lru_cache *resync_lru = NULL;
union drbd_state ns, os;
- unsigned int max_seg_s;
- int rv;
+ unsigned int max_bio_size;
+ enum drbd_state_rv rv;
int cp_discovered = 0;
int logical_block_size;
@@ -1005,9 +1012,10 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
/* and for any other previously queued work */
drbd_flush_workqueue(mdev);
- retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
+ rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
+ retcode = rv; /* FIXME: Type mismatch. */
drbd_resume_io(mdev);
- if (retcode < SS_SUCCESS)
+ if (rv < SS_SUCCESS)
goto fail;
if (!get_ldev_if_state(mdev, D_ATTACHING))
@@ -1109,20 +1117,20 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
mdev->read_cnt = 0;
mdev->writ_cnt = 0;
- max_seg_s = DRBD_MAX_SEGMENT_SIZE;
+ max_bio_size = DRBD_MAX_BIO_SIZE;
if (mdev->state.conn == C_CONNECTED) {
/* We are Primary, Connected, and now attach a new local
* backing store. We must not increase the user visible maximum
* bio size on this device to something the peer may not be
* able to handle. */
if (mdev->agreed_pro_version < 94)
- max_seg_s = queue_max_segment_size(mdev->rq_queue);
+ max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
else if (mdev->agreed_pro_version == 94)
- max_seg_s = DRBD_MAX_SIZE_H80_PACKET;
+ max_bio_size = DRBD_MAX_SIZE_H80_PACKET;
/* else: drbd 8.3.9 and later, stay with default */
}
- drbd_setup_queue_param(mdev, max_seg_s);
+ drbd_setup_queue_param(mdev, max_bio_size);
/* If I am currently not R_PRIMARY,
* but meta data primary indicator is set,
@@ -1154,12 +1162,14 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
dev_info(DEV, "Assuming that all blocks are out of sync "
"(aka FullSync)\n");
- if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) {
+ if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
+ "set_n_write from attaching", BM_LOCKED_MASK)) {
retcode = ERR_IO_MD_DISK;
goto force_diskless_dec;
}
} else {
- if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) {
+ if (drbd_bitmap_io(mdev, &drbd_bm_read,
+ "read from attaching", BM_LOCKED_MASK) < 0) {
retcode = ERR_IO_MD_DISK;
goto force_diskless_dec;
}
@@ -1167,7 +1177,11 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
if (cp_discovered) {
drbd_al_apply_to_bm(mdev);
- drbd_al_to_on_disk_bm(mdev);
+ if (drbd_bitmap_io(mdev, &drbd_bm_write,
+ "crashed primary apply AL", BM_LOCKED_MASK)) {
+ retcode = ERR_IO_MD_DISK;
+ goto force_diskless_dec;
+ }
}
if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
@@ -1279,7 +1293,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
struct drbd_nl_cfg_reply *reply)
{
int i, ns;
- enum drbd_ret_codes retcode;
+ enum drbd_ret_code retcode;
struct net_conf *new_conf = NULL;
struct crypto_hash *tfm = NULL;
struct crypto_hash *integrity_w_tfm = NULL;
@@ -1324,6 +1338,8 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
new_conf->wire_protocol = DRBD_PROT_C;
new_conf->ping_timeo = DRBD_PING_TIMEO_DEF;
new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF;
+ new_conf->on_congestion = DRBD_ON_CONGESTION_DEF;
+ new_conf->cong_extents = DRBD_CONG_EXTENTS_DEF;
if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) {
retcode = ERR_MANDATORY_TAG;
@@ -1345,6 +1361,11 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
}
}
+ if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) {
+ retcode = ERR_CONG_NOT_PROTO_A;
+ goto fail;
+ }
+
if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
retcode = ERR_DISCARD;
goto fail;
@@ -1525,6 +1546,21 @@ static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
struct drbd_nl_cfg_reply *reply)
{
int retcode;
+ struct disconnect dc;
+
+ memset(&dc, 0, sizeof(struct disconnect));
+ if (!disconnect_from_tags(mdev, nlp->tag_list, &dc)) {
+ retcode = ERR_MANDATORY_TAG;
+ goto fail;
+ }
+
+ if (dc.force) {
+ spin_lock_irq(&mdev->req_lock);
+ if (mdev->state.conn >= C_WF_CONNECTION)
+ _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), CS_HARD, NULL);
+ spin_unlock_irq(&mdev->req_lock);
+ goto done;
+ }
retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED);
@@ -1842,6 +1878,10 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
{
int retcode;
+ /* If there is still bitmap IO pending, probably because of a previous
+ * resync just being finished, wait for it before requesting a new resync. */
+ wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+
retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
@@ -1877,6 +1917,10 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
{
int retcode;
+ /* If there is still bitmap IO pending, probably because of a previous
+ * resync just being finished, wait for it before requesting a new resync. */
+ wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+
retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
if (retcode < SS_SUCCESS) {
@@ -1885,9 +1929,9 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
into a full resync. */
retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
if (retcode >= SS_SUCCESS) {
- /* open coded drbd_bitmap_io() */
if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
- "set_n_write from invalidate_peer"))
+ "set_n_write from invalidate_peer",
+ BM_LOCKED_SET_ALLOWED))
retcode = ERR_IO_MD_DISK;
}
} else
@@ -1914,9 +1958,17 @@ static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
struct drbd_nl_cfg_reply *reply)
{
int retcode = NO_ERROR;
+ union drbd_state s;
- if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO)
- retcode = ERR_PAUSE_IS_CLEAR;
+ if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
+ s = mdev->state;
+ if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
+ retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
+ s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
+ } else {
+ retcode = ERR_PAUSE_IS_CLEAR;
+ }
+ }
reply->ret_code = retcode;
return 0;
@@ -2054,6 +2106,11 @@ static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
reply->ret_code = ERR_MANDATORY_TAG;
return 0;
}
+
+ /* If there is still bitmap IO pending, e.g. previous resync or verify
+ * just being finished, wait for it before requesting a new resync. */
+ wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+
/* w_make_ov_request expects position to be aligned */
mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT;
reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
@@ -2097,7 +2154,8 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
if (args.clear_bm) {
- err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid");
+ err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
+ "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
if (err) {
dev_err(DEV, "Writing bitmap failed with %d\n",err);
retcode = ERR_IO_MD_DISK;
@@ -2105,6 +2163,7 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
if (skip_initial_sync) {
drbd_send_uuids_skip_initial_sync(mdev);
_drbd_uuid_set(mdev, UI_BITMAP, 0);
+ drbd_print_uuids(mdev, "cleared bitmap UUID");
spin_lock_irq(&mdev->req_lock);
_drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
CS_VERBOSE, NULL);
@@ -2189,7 +2248,8 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
goto fail;
}
- if (nlp->packet_type >= P_nl_after_last_packet) {
+ if (nlp->packet_type >= P_nl_after_last_packet ||
+ nlp->packet_type == P_return_code_only) {
retcode = ERR_PACKET_NR;
goto fail;
}
@@ -2205,7 +2265,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
reply_size += cm->reply_body_size;
/* allocation not in the IO path, cqueue thread context */
- cn_reply = kmalloc(reply_size, GFP_KERNEL);
+ cn_reply = kzalloc(reply_size, GFP_KERNEL);
if (!cn_reply) {
retcode = ERR_NOMEM;
goto fail;
@@ -2213,7 +2273,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
reply = (struct drbd_nl_cfg_reply *) cn_reply->data;
reply->packet_type =
- cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet;
+ cm->reply_body_size ? nlp->packet_type : P_return_code_only;
reply->minor = nlp->drbd_minor;
reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */
/* reply->tag_list; might be modified by cm->function. */
@@ -2376,7 +2436,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
/* receiver thread context, which is not in the writeout path (of this node),
* but may be in the writeout path of the _other_ node.
* GFP_NOIO to avoid potential "distributed deadlock". */
- cn_reply = kmalloc(
+ cn_reply = kzalloc(
sizeof(struct cn_msg)+
sizeof(struct drbd_nl_cfg_reply)+
sizeof(struct dump_ee_tag_len_struct)+
@@ -2398,10 +2458,11 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
tl = tl_add_int(tl, T_ee_sector, &e->sector);
tl = tl_add_int(tl, T_ee_block_id, &e->block_id);
+ /* dump the first 32k */
+ len = min_t(unsigned, e->size, 32 << 10);
put_unaligned(T_ee_data, tl++);
- put_unaligned(e->size, tl++);
+ put_unaligned(len, tl++);
- len = e->size;
page = e->pages;
page_chain_for_each(page) {
void *d = kmap_atomic(page, KM_USER0);
@@ -2410,6 +2471,8 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
kunmap_atomic(d, KM_USER0);
tl = (unsigned short*)((char*)tl + l);
len -= l;
+ if (len == 0)
+ break;
}
put_unaligned(TT_END, tl++); /* Close the tag list */
@@ -2508,6 +2571,7 @@ void drbd_nl_send_reply(struct cn_msg *req, int ret_code)
(struct drbd_nl_cfg_reply *)cn_reply->data;
int rr;
+ memset(buffer, 0, sizeof(buffer));
cn_reply->id = req->id;
cn_reply->seq = req->seq;
@@ -2515,6 +2579,7 @@ void drbd_nl_send_reply(struct cn_msg *req, int ret_code)
cn_reply->len = sizeof(struct drbd_nl_cfg_reply);
cn_reply->flags = 0;
+ reply->packet_type = P_return_code_only;
reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor;
reply->ret_code = ret_code;
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 7e6ac307e2de..2959cdfb77f5 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -34,6 +34,7 @@
#include "drbd_int.h"
static int drbd_proc_open(struct inode *inode, struct file *file);
+static int drbd_proc_release(struct inode *inode, struct file *file);
struct proc_dir_entry *drbd_proc;
@@ -42,9 +43,22 @@ const struct file_operations drbd_proc_fops = {
.open = drbd_proc_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = single_release,
+ .release = drbd_proc_release,
};
+void seq_printf_with_thousands_grouping(struct seq_file *seq, long v)
+{
+ /* v is in kB/sec. We don't expect TiByte/sec yet. */
+ if (unlikely(v >= 1000000)) {
+ /* cool: > GiByte/s */
+ seq_printf(seq, "%ld,", v / 1000000);
+ v /= 1000000;
+ seq_printf(seq, "%03ld,%03ld", v/1000, v % 1000);
+ } else if (likely(v >= 1000))
+ seq_printf(seq, "%ld,%03ld", v/1000, v % 1000);
+ else
+ seq_printf(seq, "%ld", v);
+}
/*lge
* progress bars shamelessly adapted from driver/md/md.c
@@ -71,10 +85,15 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
seq_printf(seq, ".");
seq_printf(seq, "] ");
- seq_printf(seq, "sync'ed:%3u.%u%% ", res / 10, res % 10);
- /* if more than 1 GB display in MB */
- if (mdev->rs_total > 0x100000L)
- seq_printf(seq, "(%lu/%lu)M\n\t",
+ if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
+ seq_printf(seq, "verified:");
+ else
+ seq_printf(seq, "sync'ed:");
+ seq_printf(seq, "%3u.%u%% ", res / 10, res % 10);
+
+ /* if more than a few GB, display in MB */
+ if (mdev->rs_total > (4UL << (30 - BM_BLOCK_SHIFT)))
+ seq_printf(seq, "(%lu/%lu)M",
(unsigned long) Bit2KB(rs_left >> 10),
(unsigned long) Bit2KB(mdev->rs_total >> 10));
else
@@ -94,6 +113,7 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
/* Rolling marks. last_mark+1 may just now be modified. last_mark+2 is
* at least (DRBD_SYNC_MARKS-2)*DRBD_SYNC_MARK_STEP old, and has at
* least DRBD_SYNC_MARK_STEP time before it will be modified. */
+ /* ------------------------ ~18s average ------------------------ */
i = (mdev->rs_last_mark + 2) % DRBD_SYNC_MARKS;
dt = (jiffies - mdev->rs_mark_time[i]) / HZ;
if (dt > (DRBD_SYNC_MARK_STEP * DRBD_SYNC_MARKS))
@@ -107,14 +127,24 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
seq_printf(seq, "finish: %lu:%02lu:%02lu",
rt / 3600, (rt % 3600) / 60, rt % 60);
- /* current speed average over (SYNC_MARKS * SYNC_MARK_STEP) jiffies */
dbdt = Bit2KB(db/dt);
- if (dbdt > 1000)
- seq_printf(seq, " speed: %ld,%03ld",
- dbdt/1000, dbdt % 1000);
- else
- seq_printf(seq, " speed: %ld", dbdt);
+ seq_printf(seq, " speed: ");
+ seq_printf_with_thousands_grouping(seq, dbdt);
+ seq_printf(seq, " (");
+ /* ------------------------- ~3s average ------------------------ */
+ if (proc_details >= 1) {
+ /* this is what drbd_rs_should_slow_down() uses */
+ i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
+ dt = (jiffies - mdev->rs_mark_time[i]) / HZ;
+ if (!dt)
+ dt++;
+ db = mdev->rs_mark_left[i] - rs_left;
+ dbdt = Bit2KB(db/dt);
+ seq_printf_with_thousands_grouping(seq, dbdt);
+ seq_printf(seq, " -- ");
+ }
+ /* --------------------- long term average ---------------------- */
/* mean speed since syncer started
* we do account for PausedSync periods */
dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
@@ -122,20 +152,34 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
dt = 1;
db = mdev->rs_total - rs_left;
dbdt = Bit2KB(db/dt);
- if (dbdt > 1000)
- seq_printf(seq, " (%ld,%03ld)",
- dbdt/1000, dbdt % 1000);
- else
- seq_printf(seq, " (%ld)", dbdt);
+ seq_printf_with_thousands_grouping(seq, dbdt);
+ seq_printf(seq, ")");
- if (mdev->state.conn == C_SYNC_TARGET) {
- if (mdev->c_sync_rate > 1000)
- seq_printf(seq, " want: %d,%03d",
- mdev->c_sync_rate / 1000, mdev->c_sync_rate % 1000);
- else
- seq_printf(seq, " want: %d", mdev->c_sync_rate);
+ if (mdev->state.conn == C_SYNC_TARGET ||
+ mdev->state.conn == C_VERIFY_S) {
+ seq_printf(seq, " want: ");
+ seq_printf_with_thousands_grouping(seq, mdev->c_sync_rate);
}
seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : "");
+
+ if (proc_details >= 1) {
+ /* 64 bit:
+ * we convert to sectors in the display below. */
+ unsigned long bm_bits = drbd_bm_bits(mdev);
+ unsigned long bit_pos;
+ if (mdev->state.conn == C_VERIFY_S ||
+ mdev->state.conn == C_VERIFY_T)
+ bit_pos = bm_bits - mdev->ov_left;
+ else
+ bit_pos = mdev->bm_resync_fo;
+ /* Total sectors may be slightly off for oddly
+ * sized devices. So what. */
+ seq_printf(seq,
+ "\t%3d%% sector pos: %llu/%llu\n",
+ (int)(bit_pos / (bm_bits/100+1)),
+ (unsigned long long)bit_pos * BM_SECT_PER_BIT,
+ (unsigned long long)bm_bits * BM_SECT_PER_BIT);
+ }
}
static void resync_dump_detail(struct seq_file *seq, struct lc_element *e)
@@ -232,20 +276,16 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
mdev->epochs,
write_ordering_chars[mdev->write_ordering]
);
- seq_printf(seq, " oos:%lu\n",
- Bit2KB(drbd_bm_total_weight(mdev)));
+ seq_printf(seq, " oos:%llu\n",
+ Bit2KB((unsigned long long)
+ drbd_bm_total_weight(mdev)));
}
if (mdev->state.conn == C_SYNC_SOURCE ||
- mdev->state.conn == C_SYNC_TARGET)
+ mdev->state.conn == C_SYNC_TARGET ||
+ mdev->state.conn == C_VERIFY_S ||
+ mdev->state.conn == C_VERIFY_T)
drbd_syncer_progress(mdev, seq);
- if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
- seq_printf(seq, "\t%3d%% %lu/%lu\n",
- (int)((mdev->rs_total-mdev->ov_left) /
- (mdev->rs_total/100+1)),
- mdev->rs_total - mdev->ov_left,
- mdev->rs_total);
-
if (proc_details >= 1 && get_ldev_if_state(mdev, D_FAILED)) {
lc_seq_printf_stats(seq, mdev->resync);
lc_seq_printf_stats(seq, mdev->act_log);
@@ -265,7 +305,15 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
static int drbd_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, drbd_seq_show, PDE(inode)->data);
+ if (try_module_get(THIS_MODULE))
+ return single_open(file, drbd_seq_show, PDE(inode)->data);
+ return -ENODEV;
+}
+
+static int drbd_proc_release(struct inode *inode, struct file *file)
+{
+ module_put(THIS_MODULE);
+ return single_release(inode, file);
}
/* PROC FS stuff end */
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 8e68be939deb..fe1564c7d8b6 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -277,7 +277,7 @@ static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
int i;
- if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count)
+ if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
i = page_chain_free(page);
else {
struct page *tmp;
@@ -319,7 +319,7 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
struct page *page;
unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
- if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE))
+ if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
return NULL;
e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
@@ -725,16 +725,16 @@ static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
char tb[4];
if (!*sock)
- return FALSE;
+ return false;
rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
if (rr > 0 || rr == -EAGAIN) {
- return TRUE;
+ return true;
} else {
sock_release(*sock);
*sock = NULL;
- return FALSE;
+ return false;
}
}
@@ -768,8 +768,7 @@ static int drbd_connect(struct drbd_conf *mdev)
if (s || ++try >= 3)
break;
/* give the other side time to call bind() & listen() */
- __set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ / 10);
+ schedule_timeout_interruptible(HZ / 10);
}
if (s) {
@@ -788,8 +787,7 @@ static int drbd_connect(struct drbd_conf *mdev)
}
if (sock && msock) {
- __set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ / 10);
+ schedule_timeout_interruptible(HZ / 10);
ok = drbd_socket_okay(mdev, &sock);
ok = drbd_socket_okay(mdev, &msock) && ok;
if (ok)
@@ -906,7 +904,7 @@ retry:
put_ldev(mdev);
}
- if (!drbd_send_protocol(mdev))
+ if (drbd_send_protocol(mdev) == -1)
return -1;
drbd_send_sync_param(mdev, &mdev->sync_conf);
drbd_send_sizes(mdev, 0, 0);
@@ -914,6 +912,7 @@ retry:
drbd_send_state(mdev);
clear_bit(USE_DEGR_WFC_T, &mdev->flags);
clear_bit(RESIZE_PENDING, &mdev->flags);
+ mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
return 1;
@@ -932,8 +931,9 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi
r = drbd_recv(mdev, h, sizeof(*h));
if (unlikely(r != sizeof(*h))) {
- dev_err(DEV, "short read expecting header on sock: r=%d\n", r);
- return FALSE;
+ if (!signal_pending(current))
+ dev_warn(DEV, "short read expecting header on sock: r=%d\n", r);
+ return false;
}
if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
@@ -947,11 +947,11 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi
be32_to_cpu(h->h80.magic),
be16_to_cpu(h->h80.command),
be16_to_cpu(h->h80.length));
- return FALSE;
+ return false;
}
mdev->last_received = jiffies;
- return TRUE;
+ return true;
}
static void drbd_flush(struct drbd_conf *mdev)
@@ -1074,6 +1074,16 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo)
* @mdev: DRBD device.
* @e: epoch entry
* @rw: flag field, see bio->bi_rw
+ *
+ * May spread the pages to multiple bios,
+ * depending on bio_add_page restrictions.
+ *
+ * Returns 0 if all bios have been submitted,
+ * -ENOMEM if we could not allocate enough bios,
+ * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
+ * single page to an empty bio (which should never happen and likely indicates
+ * that the lower level IO stack is in some way broken). This has been observed
+ * on certain Xen deployments.
*/
/* TODO allocate from our own bio_set. */
int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
@@ -1086,6 +1096,7 @@ int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
unsigned ds = e->size;
unsigned n_bios = 0;
unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
+ int err = -ENOMEM;
/* In most cases, we will only need one bio. But in case the lower
* level restrictions happen to be different at this offset on this
@@ -1111,8 +1122,17 @@ next_bio:
page_chain_for_each(page) {
unsigned len = min_t(unsigned, ds, PAGE_SIZE);
if (!bio_add_page(bio, page, len, 0)) {
- /* a single page must always be possible! */
- BUG_ON(bio->bi_vcnt == 0);
+ /* A single page must always be possible!
+ * But in case it fails anyways,
+ * we deal with it, and complain (below). */
+ if (bio->bi_vcnt == 0) {
+ dev_err(DEV,
+ "bio_add_page failed for len=%u, "
+ "bi_vcnt=0 (bi_sector=%llu)\n",
+ len, (unsigned long long)bio->bi_sector);
+ err = -ENOSPC;
+ goto fail;
+ }
goto next_bio;
}
ds -= len;
@@ -1138,7 +1158,7 @@ fail:
bios = bios->bi_next;
bio_put(bio);
}
- return -ENOMEM;
+ return err;
}
static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
@@ -1160,7 +1180,7 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
switch (mdev->write_ordering) {
case WO_none:
if (rv == FE_RECYCLED)
- return TRUE;
+ return true;
/* receiver context, in the writeout path of the other node.
* avoid potential distributed deadlock */
@@ -1188,10 +1208,10 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
D_ASSERT(atomic_read(&epoch->active) == 0);
D_ASSERT(epoch->flags == 0);
- return TRUE;
+ return true;
default:
dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
- return FALSE;
+ return false;
}
epoch->flags = 0;
@@ -1209,7 +1229,7 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
}
spin_unlock(&mdev->epoch_lock);
- return TRUE;
+ return true;
}
/* used from receive_RSDataReply (recv_resync_read)
@@ -1231,21 +1251,25 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
if (dgs) {
rr = drbd_recv(mdev, dig_in, dgs);
if (rr != dgs) {
- dev_warn(DEV, "short read receiving data digest: read %d expected %d\n",
- rr, dgs);
+ if (!signal_pending(current))
+ dev_warn(DEV,
+ "short read receiving data digest: read %d expected %d\n",
+ rr, dgs);
return NULL;
}
}
data_size -= dgs;
+ ERR_IF(data_size == 0) return NULL;
ERR_IF(data_size & 0x1ff) return NULL;
- ERR_IF(data_size > DRBD_MAX_SEGMENT_SIZE) return NULL;
+ ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL;
/* even though we trust out peer,
* we sometimes have to double check. */
if (sector + (data_size>>9) > capacity) {
- dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n",
+ dev_err(DEV, "request from peer beyond end of local disk: "
+ "capacity: %llus < sector: %llus + size: %u\n",
(unsigned long long)capacity,
(unsigned long long)sector, data_size);
return NULL;
@@ -1264,15 +1288,16 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
unsigned len = min_t(int, ds, PAGE_SIZE);
data = kmap(page);
rr = drbd_recv(mdev, data, len);
- if (FAULT_ACTIVE(mdev, DRBD_FAULT_RECEIVE)) {
+ if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
dev_err(DEV, "Fault injection: Corrupting data on receive\n");
data[0] = data[0] ^ (unsigned long)-1;
}
kunmap(page);
if (rr != len) {
drbd_free_ee(mdev, e);
- dev_warn(DEV, "short read receiving data: read %d expected %d\n",
- rr, len);
+ if (!signal_pending(current))
+ dev_warn(DEV, "short read receiving data: read %d expected %d\n",
+ rr, len);
return NULL;
}
ds -= rr;
@@ -1281,7 +1306,8 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
if (dgs) {
drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
if (memcmp(dig_in, dig_vv, dgs)) {
- dev_err(DEV, "Digest integrity check FAILED.\n");
+ dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
+ (unsigned long long)sector, data_size);
drbd_bcast_ee(mdev, "digest failed",
dgs, dig_in, dig_vv, e);
drbd_free_ee(mdev, e);
@@ -1302,7 +1328,7 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
void *data;
if (!data_size)
- return TRUE;
+ return true;
page = drbd_pp_alloc(mdev, 1, 1);
@@ -1311,8 +1337,10 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
if (rr != min_t(int, data_size, PAGE_SIZE)) {
rv = 0;
- dev_warn(DEV, "short read receiving data: read %d expected %d\n",
- rr, min_t(int, data_size, PAGE_SIZE));
+ if (!signal_pending(current))
+ dev_warn(DEV,
+ "short read receiving data: read %d expected %d\n",
+ rr, min_t(int, data_size, PAGE_SIZE));
break;
}
data_size -= rr;
@@ -1337,8 +1365,10 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
if (dgs) {
rr = drbd_recv(mdev, dig_in, dgs);
if (rr != dgs) {
- dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n",
- rr, dgs);
+ if (!signal_pending(current))
+ dev_warn(DEV,
+ "short read receiving data reply digest: read %d expected %d\n",
+ rr, dgs);
return 0;
}
}
@@ -1359,9 +1389,10 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
expect);
kunmap(bvec->bv_page);
if (rr != expect) {
- dev_warn(DEV, "short read receiving data reply: "
- "read %d expected %d\n",
- rr, expect);
+ if (!signal_pending(current))
+ dev_warn(DEV, "short read receiving data reply: "
+ "read %d expected %d\n",
+ rr, expect);
return 0;
}
data_size -= rr;
@@ -1425,11 +1456,10 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
atomic_add(data_size >> 9, &mdev->rs_sect_ev);
if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
- return TRUE;
+ return true;
- /* drbd_submit_ee currently fails for one reason only:
- * not being able to allocate enough bios.
- * Is dropping the connection going to help? */
+ /* don't care for the reason here */
+ dev_err(DEV, "submit failed, triggering re-connect\n");
spin_lock_irq(&mdev->req_lock);
list_del(&e->w.list);
spin_unlock_irq(&mdev->req_lock);
@@ -1437,7 +1467,7 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
drbd_free_ee(mdev, e);
fail:
put_ldev(mdev);
- return FALSE;
+ return false;
}
static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
@@ -1454,7 +1484,7 @@ static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
spin_unlock_irq(&mdev->req_lock);
if (unlikely(!req)) {
dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
- return FALSE;
+ return false;
}
/* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
@@ -1611,15 +1641,15 @@ static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
return ret;
}
-static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
+/* see also bio_flags_to_wire()
+ * DRBD_REQ_*, because we need to semantically map the flags to data packet
+ * flags and back. We may replicate to other kernel versions. */
+static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
{
- if (mdev->agreed_pro_version >= 95)
- return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
- (dpf & DP_FUA ? REQ_FUA : 0) |
- (dpf & DP_FLUSH ? REQ_FUA : 0) |
- (dpf & DP_DISCARD ? REQ_DISCARD : 0);
- else
- return dpf & DP_RW_SYNC ? REQ_SYNC : 0;
+ return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
+ (dpf & DP_FUA ? REQ_FUA : 0) |
+ (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
+ (dpf & DP_DISCARD ? REQ_DISCARD : 0);
}
/* mirrored write */
@@ -1632,9 +1662,6 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
u32 dp_flags;
if (!get_ldev(mdev)) {
- if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "Can not write mirrored data block "
- "to local disk.\n");
spin_lock(&mdev->peer_seq_lock);
if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
mdev->peer_seq++;
@@ -1654,23 +1681,23 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
e = read_in_block(mdev, p->block_id, sector, data_size);
if (!e) {
put_ldev(mdev);
- return FALSE;
+ return false;
}
e->w.cb = e_end_block;
+ dp_flags = be32_to_cpu(p->dp_flags);
+ rw |= wire_flags_to_bio(mdev, dp_flags);
+
+ if (dp_flags & DP_MAY_SET_IN_SYNC)
+ e->flags |= EE_MAY_SET_IN_SYNC;
+
spin_lock(&mdev->epoch_lock);
e->epoch = mdev->current_epoch;
atomic_inc(&e->epoch->epoch_size);
atomic_inc(&e->epoch->active);
spin_unlock(&mdev->epoch_lock);
- dp_flags = be32_to_cpu(p->dp_flags);
- rw |= write_flags_to_bio(mdev, dp_flags);
-
- if (dp_flags & DP_MAY_SET_IN_SYNC)
- e->flags |= EE_MAY_SET_IN_SYNC;
-
/* I'm the receiver, I do hold a net_cnt reference. */
if (!mdev->net_conf->two_primaries) {
spin_lock_irq(&mdev->req_lock);
@@ -1773,7 +1800,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
put_ldev(mdev);
wake_asender(mdev);
finish_wait(&mdev->misc_wait, &wait);
- return TRUE;
+ return true;
}
if (signal_pending(current)) {
@@ -1829,11 +1856,10 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
}
if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
- return TRUE;
+ return true;
- /* drbd_submit_ee currently fails for one reason only:
- * not being able to allocate enough bios.
- * Is dropping the connection going to help? */
+ /* don't care for the reason here */
+ dev_err(DEV, "submit failed, triggering re-connect\n");
spin_lock_irq(&mdev->req_lock);
list_del(&e->w.list);
hlist_del_init(&e->colision);
@@ -1842,12 +1868,10 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
drbd_al_complete_io(mdev, e->sector);
out_interrupted:
- /* yes, the epoch_size now is imbalanced.
- * but we drop the connection anyways, so we don't have a chance to
- * receive a barrier... atomic_inc(&mdev->epoch_size); */
+ drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP);
put_ldev(mdev);
drbd_free_ee(mdev, e);
- return FALSE;
+ return false;
}
/* We may throttle resync, if the lower device seems to be busy,
@@ -1861,10 +1885,11 @@ out_interrupted:
* The current sync rate used here uses only the most recent two step marks,
* to have a short time average so we can react faster.
*/
-int drbd_rs_should_slow_down(struct drbd_conf *mdev)
+int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
{
struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
unsigned long db, dt, dbdt;
+ struct lc_element *tmp;
int curr_events;
int throttle = 0;
@@ -1872,9 +1897,22 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev)
if (mdev->sync_conf.c_min_rate == 0)
return 0;
+ spin_lock_irq(&mdev->al_lock);
+ tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
+ if (tmp) {
+ struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
+ if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
+ spin_unlock_irq(&mdev->al_lock);
+ return 0;
+ }
+ /* Do not slow down if app IO is already waiting for this extent */
+ }
+ spin_unlock_irq(&mdev->al_lock);
+
curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
(int)part_stat_read(&disk->part0, sectors[1]) -
atomic_read(&mdev->rs_sect_ev);
+
if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
unsigned long rs_left;
int i;
@@ -1883,8 +1921,12 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev)
/* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
* approx. */
- i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-2) % DRBD_SYNC_MARKS;
- rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
+ i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
+
+ if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
+ rs_left = mdev->ov_left;
+ else
+ rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
if (!dt)
@@ -1912,15 +1954,15 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
- if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
+ if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
(unsigned long long)sector, size);
- return FALSE;
+ return false;
}
if (sector + (size>>9) > capacity) {
dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
(unsigned long long)sector, size);
- return FALSE;
+ return false;
}
if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
@@ -1957,7 +1999,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
if (!e) {
put_ldev(mdev);
- return FALSE;
+ return false;
}
switch (cmd) {
@@ -1970,6 +2012,8 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
case P_RS_DATA_REQUEST:
e->w.cb = w_e_end_rsdata_req;
fault_type = DRBD_FAULT_RS_RD;
+ /* used in the sector offset progress display */
+ mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
break;
case P_OV_REPLY:
@@ -1991,7 +2035,11 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
if (cmd == P_CSUM_RS_REQUEST) {
D_ASSERT(mdev->agreed_pro_version >= 89);
e->w.cb = w_e_end_csum_rs_req;
+ /* used in the sector offset progress display */
+ mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
} else if (cmd == P_OV_REPLY) {
+ /* track progress, we may need to throttle */
+ atomic_add(size >> 9, &mdev->rs_sect_in);
e->w.cb = w_e_end_ov_reply;
dec_rs_pending(mdev);
/* drbd_rs_begin_io done when we sent this request,
@@ -2003,9 +2051,16 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
case P_OV_REQUEST:
if (mdev->ov_start_sector == ~(sector_t)0 &&
mdev->agreed_pro_version >= 90) {
+ unsigned long now = jiffies;
+ int i;
mdev->ov_start_sector = sector;
mdev->ov_position = sector;
- mdev->ov_left = mdev->rs_total - BM_SECT_TO_BIT(sector);
+ mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
+ mdev->rs_total = mdev->ov_left;
+ for (i = 0; i < DRBD_SYNC_MARKS; i++) {
+ mdev->rs_mark_left[i] = mdev->ov_left;
+ mdev->rs_mark_time[i] = now;
+ }
dev_info(DEV, "Online Verify start sector: %llu\n",
(unsigned long long)sector);
}
@@ -2042,9 +2097,9 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
* we would also throttle its application reads.
* In that case, throttling is done on the SyncTarget only.
*/
- if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev))
- msleep(100);
- if (drbd_rs_begin_io(mdev, e->sector))
+ if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
+ schedule_timeout_uninterruptible(HZ/10);
+ if (drbd_rs_begin_io(mdev, sector))
goto out_free_e;
submit_for_resync:
@@ -2057,11 +2112,10 @@ submit:
spin_unlock_irq(&mdev->req_lock);
if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
- return TRUE;
+ return true;
- /* drbd_submit_ee currently fails for one reason only:
- * not being able to allocate enough bios.
- * Is dropping the connection going to help? */
+ /* don't care for the reason here */
+ dev_err(DEV, "submit failed, triggering re-connect\n");
spin_lock_irq(&mdev->req_lock);
list_del(&e->w.list);
spin_unlock_irq(&mdev->req_lock);
@@ -2070,7 +2124,7 @@ submit:
out_free_e:
put_ldev(mdev);
drbd_free_ee(mdev, e);
- return FALSE;
+ return false;
}
static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
@@ -2147,10 +2201,7 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
{
- int self, peer, hg, rv = -100;
-
- self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
- peer = mdev->p_uuid[UI_BITMAP] & 1;
+ int hg, rv = -100;
switch (mdev->net_conf->after_sb_1p) {
case ASB_DISCARD_YOUNGER_PRI:
@@ -2177,12 +2228,14 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
case ASB_CALL_HELPER:
hg = drbd_asb_recover_0p(mdev);
if (hg == -1 && mdev->state.role == R_PRIMARY) {
- self = drbd_set_role(mdev, R_SECONDARY, 0);
+ enum drbd_state_rv rv2;
+
+ drbd_set_role(mdev, R_SECONDARY, 0);
/* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
* we might be here in C_WF_REPORT_PARAMS which is transient.
* we do not need to wait for the after state change work either. */
- self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
- if (self != SS_SUCCESS) {
+ rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
+ if (rv2 != SS_SUCCESS) {
drbd_khelper(mdev, "pri-lost-after-sb");
} else {
dev_warn(DEV, "Successfully gave up primary role.\n");
@@ -2197,10 +2250,7 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
{
- int self, peer, hg, rv = -100;
-
- self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
- peer = mdev->p_uuid[UI_BITMAP] & 1;
+ int hg, rv = -100;
switch (mdev->net_conf->after_sb_2p) {
case ASB_DISCARD_YOUNGER_PRI:
@@ -2220,11 +2270,13 @@ static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
case ASB_CALL_HELPER:
hg = drbd_asb_recover_0p(mdev);
if (hg == -1) {
+ enum drbd_state_rv rv2;
+
/* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
* we might be here in C_WF_REPORT_PARAMS which is transient.
* we do not need to wait for the after state change work either. */
- self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
- if (self != SS_SUCCESS) {
+ rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
+ if (rv2 != SS_SUCCESS) {
drbd_khelper(mdev, "pri-lost-after-sb");
} else {
dev_warn(DEV, "Successfully gave up primary role.\n");
@@ -2263,6 +2315,8 @@ static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
-2 C_SYNC_TARGET set BitMap
-100 after split brain, disconnect
-1000 unrelated data
+-1091 requires proto 91
+-1096 requires proto 96
*/
static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
{
@@ -2292,7 +2346,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
if (mdev->agreed_pro_version < 91)
- return -1001;
+ return -1091;
if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
(mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
@@ -2313,7 +2367,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
if (mdev->agreed_pro_version < 91)
- return -1001;
+ return -1091;
if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
(mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
@@ -2358,17 +2412,22 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
*rule_nr = 51;
peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
if (self == peer) {
- self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
- peer = mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1);
- if (self == peer) {
+ if (mdev->agreed_pro_version < 96 ?
+ (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
+ (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
+ peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
/* The last P_SYNC_UUID did not get though. Undo the last start of
resync as sync source modifications of the peer's UUIDs. */
if (mdev->agreed_pro_version < 91)
- return -1001;
+ return -1091;
mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
+
+ dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
+ drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
+
return -1;
}
}
@@ -2390,20 +2449,20 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
*rule_nr = 71;
self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
if (self == peer) {
- self = mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1);
- peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
- if (self == peer) {
+ if (mdev->agreed_pro_version < 96 ?
+ (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
+ (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
+ self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
/* The last P_SYNC_UUID did not get though. Undo the last start of
resync as sync source modifications of our UUIDs. */
if (mdev->agreed_pro_version < 91)
- return -1001;
+ return -1091;
_drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
_drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
- dev_info(DEV, "Undid last start of resync:\n");
-
+ dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
@@ -2466,8 +2525,8 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
dev_alert(DEV, "Unrelated data, aborting!\n");
return C_MASK;
}
- if (hg == -1001) {
- dev_alert(DEV, "To resolve this both sides have to support at least protocol\n");
+ if (hg < -1000) {
+ dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
return C_MASK;
}
@@ -2566,7 +2625,8 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
if (abs(hg) >= 2) {
dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
- if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake"))
+ if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
+ BM_LOCKED_SET_ALLOWED))
return C_MASK;
}
@@ -2660,7 +2720,7 @@ static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsig
unsigned char *my_alg = mdev->net_conf->integrity_alg;
if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
- return FALSE;
+ return false;
p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
if (strcmp(p_integrity_alg, my_alg)) {
@@ -2671,11 +2731,11 @@ static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsig
my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
}
- return TRUE;
+ return true;
disconnect:
drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return FALSE;
+ return false;
}
/* helper function
@@ -2707,7 +2767,7 @@ struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
{
- int ok = TRUE;
+ int ok = true;
struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
unsigned int header_size, data_size, exp_max_sz;
struct crypto_hash *verify_tfm = NULL;
@@ -2725,7 +2785,7 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
if (packet_size > exp_max_sz) {
dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
packet_size, exp_max_sz);
- return FALSE;
+ return false;
}
if (apv <= 88) {
@@ -2745,7 +2805,7 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
- return FALSE;
+ return false;
mdev->sync_conf.rate = be32_to_cpu(p->rate);
@@ -2755,11 +2815,11 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
dev_err(DEV, "verify-alg too long, "
"peer wants %u, accepting only %u byte\n",
data_size, SHARED_SECRET_MAX);
- return FALSE;
+ return false;
}
if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
- return FALSE;
+ return false;
/* we expect NUL terminated string */
/* but just in case someone tries to be evil */
@@ -2853,7 +2913,7 @@ disconnect:
/* but free the verify_tfm again, if csums_tfm did not work out */
crypto_free_hash(verify_tfm);
drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return FALSE;
+ return false;
}
static void drbd_setup_order_type(struct drbd_conf *mdev, int peer)
@@ -2879,7 +2939,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
{
struct p_sizes *p = &mdev->data.rbuf.sizes;
enum determine_dev_size dd = unchanged;
- unsigned int max_seg_s;
+ unsigned int max_bio_size;
sector_t p_size, p_usize, my_usize;
int ldsc = 0; /* local disk size changed */
enum dds_flags ddsf;
@@ -2890,7 +2950,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
dev_err(DEV, "some backing storage is needed\n");
drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return FALSE;
+ return false;
}
/* just store the peer's disk size for now.
@@ -2927,18 +2987,17 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
mdev->ldev->dc.disk_size = my_usize;
put_ldev(mdev);
- return FALSE;
+ return false;
}
put_ldev(mdev);
}
-#undef min_not_zero
ddsf = be16_to_cpu(p->dds_flags);
if (get_ldev(mdev)) {
dd = drbd_determin_dev_size(mdev, ddsf);
put_ldev(mdev);
if (dd == dev_size_error)
- return FALSE;
+ return false;
drbd_md_sync(mdev);
} else {
/* I am diskless, need to accept the peer's size. */
@@ -2952,14 +3011,14 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
}
if (mdev->agreed_pro_version < 94)
- max_seg_s = be32_to_cpu(p->max_segment_size);
+ max_bio_size = be32_to_cpu(p->max_bio_size);
else if (mdev->agreed_pro_version == 94)
- max_seg_s = DRBD_MAX_SIZE_H80_PACKET;
+ max_bio_size = DRBD_MAX_SIZE_H80_PACKET;
else /* drbd 8.3.8 onwards */
- max_seg_s = DRBD_MAX_SEGMENT_SIZE;
+ max_bio_size = DRBD_MAX_BIO_SIZE;
- if (max_seg_s != queue_max_segment_size(mdev->rq_queue))
- drbd_setup_queue_param(mdev, max_seg_s);
+ if (max_bio_size != queue_max_hw_sectors(mdev->rq_queue) << 9)
+ drbd_setup_queue_param(mdev, max_bio_size);
drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type));
put_ldev(mdev);
@@ -2985,14 +3044,14 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
}
}
- return TRUE;
+ return true;
}
static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
{
struct p_uuids *p = &mdev->data.rbuf.uuids;
u64 *p_uuid;
- int i;
+ int i, updated_uuids = 0;
p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
@@ -3009,7 +3068,7 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
(unsigned long long)mdev->ed_uuid);
drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return FALSE;
+ return false;
}
if (get_ldev(mdev)) {
@@ -3021,19 +3080,21 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
if (skip_initial_sync) {
dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
- "clear_n_write from receive_uuids");
+ "clear_n_write from receive_uuids",
+ BM_LOCKED_TEST_ALLOWED);
_drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
_drbd_uuid_set(mdev, UI_BITMAP, 0);
_drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
CS_VERBOSE, NULL);
drbd_md_sync(mdev);
+ updated_uuids = 1;
}
put_ldev(mdev);
} else if (mdev->state.disk < D_INCONSISTENT &&
mdev->state.role == R_PRIMARY) {
/* I am a diskless primary, the peer just created a new current UUID
for me. */
- drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
+ updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
}
/* Before we test for the disk state, we should wait until an eventually
@@ -3042,9 +3103,12 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
new disk state... */
wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
- drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
+ updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
- return TRUE;
+ if (updated_uuids)
+ drbd_print_uuids(mdev, "receiver updated UUIDs to");
+
+ return true;
}
/**
@@ -3081,7 +3145,7 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
{
struct p_req_state *p = &mdev->data.rbuf.req_state;
union drbd_state mask, val;
- int rv;
+ enum drbd_state_rv rv;
mask.i = be32_to_cpu(p->mask);
val.i = be32_to_cpu(p->val);
@@ -3089,7 +3153,7 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
- return TRUE;
+ return true;
}
mask = convert_state(mask);
@@ -3100,7 +3164,7 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
drbd_send_sr_reply(mdev, rv);
drbd_md_sync(mdev);
- return TRUE;
+ return true;
}
static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
@@ -3145,7 +3209,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
peer_state.conn == C_CONNECTED) {
if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
drbd_resync_finished(mdev);
- return TRUE;
+ return true;
}
}
@@ -3161,6 +3225,9 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
if (ns.conn == C_WF_REPORT_PARAMS)
ns.conn = C_CONNECTED;
+ if (peer_state.conn == C_AHEAD)
+ ns.conn = C_BEHIND;
+
if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
get_ldev_if_state(mdev, D_NEGOTIATING)) {
int cr; /* consider resync */
@@ -3195,10 +3262,10 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
real_peer_disk = D_DISKLESS;
} else {
if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
- return FALSE;
+ return false;
D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return FALSE;
+ return false;
}
}
}
@@ -3223,7 +3290,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
drbd_uuid_new_current(mdev);
clear_bit(NEW_CUR_UUID, &mdev->flags);
drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
- return FALSE;
+ return false;
}
rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
ns = mdev->state;
@@ -3231,7 +3298,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
if (rv < SS_SUCCESS) {
drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return FALSE;
+ return false;
}
if (os.conn > C_WF_REPORT_PARAMS) {
@@ -3249,7 +3316,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
- return TRUE;
+ return true;
}
static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
@@ -3258,6 +3325,7 @@ static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
wait_event(mdev->misc_wait,
mdev->state.conn == C_WF_SYNC_UUID ||
+ mdev->state.conn == C_BEHIND ||
mdev->state.conn < C_CONNECTED ||
mdev->state.disk < D_NEGOTIATING);
@@ -3269,32 +3337,42 @@ static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
_drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
_drbd_uuid_set(mdev, UI_BITMAP, 0UL);
+ drbd_print_uuids(mdev, "updated sync uuid");
drbd_start_resync(mdev, C_SYNC_TARGET);
put_ldev(mdev);
} else
dev_err(DEV, "Ignoring SyncUUID packet!\n");
- return TRUE;
+ return true;
}
-enum receive_bitmap_ret { OK, DONE, FAILED };
-
-static enum receive_bitmap_ret
+/**
+ * receive_bitmap_plain
+ *
+ * Return 0 when done, 1 when another iteration is needed, and a negative error
+ * code upon failure.
+ */
+static int
receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
unsigned long *buffer, struct bm_xfer_ctx *c)
{
unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
unsigned want = num_words * sizeof(long);
+ int err;
if (want != data_size) {
dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
- return FAILED;
+ return -EIO;
}
if (want == 0)
- return DONE;
- if (drbd_recv(mdev, buffer, want) != want)
- return FAILED;
+ return 0;
+ err = drbd_recv(mdev, buffer, want);
+ if (err != want) {
+ if (err >= 0)
+ err = -EIO;
+ return err;
+ }
drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
@@ -3303,10 +3381,16 @@ receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
if (c->bit_offset > c->bm_bits)
c->bit_offset = c->bm_bits;
- return OK;
+ return 1;
}
-static enum receive_bitmap_ret
+/**
+ * recv_bm_rle_bits
+ *
+ * Return 0 when done, 1 when another iteration is needed, and a negative error
+ * code upon failure.
+ */
+static int
recv_bm_rle_bits(struct drbd_conf *mdev,
struct p_compressed_bm *p,
struct bm_xfer_ctx *c)
@@ -3326,18 +3410,18 @@ recv_bm_rle_bits(struct drbd_conf *mdev,
bits = bitstream_get_bits(&bs, &look_ahead, 64);
if (bits < 0)
- return FAILED;
+ return -EIO;
for (have = bits; have > 0; s += rl, toggle = !toggle) {
bits = vli_decode_bits(&rl, look_ahead);
if (bits <= 0)
- return FAILED;
+ return -EIO;
if (toggle) {
e = s + rl -1;
if (e >= c->bm_bits) {
dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
- return FAILED;
+ return -EIO;
}
_drbd_bm_set_bits(mdev, s, e);
}
@@ -3347,14 +3431,14 @@ recv_bm_rle_bits(struct drbd_conf *mdev,
have, bits, look_ahead,
(unsigned int)(bs.cur.b - p->code),
(unsigned int)bs.buf_len);
- return FAILED;
+ return -EIO;
}
look_ahead >>= bits;
have -= bits;
bits = bitstream_get_bits(&bs, &tmp, 64 - have);
if (bits < 0)
- return FAILED;
+ return -EIO;
look_ahead |= tmp << have;
have += bits;
}
@@ -3362,10 +3446,16 @@ recv_bm_rle_bits(struct drbd_conf *mdev,
c->bit_offset = s;
bm_xfer_ctx_bit_to_word_offset(c);
- return (s == c->bm_bits) ? DONE : OK;
+ return (s != c->bm_bits);
}
-static enum receive_bitmap_ret
+/**
+ * decode_bitmap_c
+ *
+ * Return 0 when done, 1 when another iteration is needed, and a negative error
+ * code upon failure.
+ */
+static int
decode_bitmap_c(struct drbd_conf *mdev,
struct p_compressed_bm *p,
struct bm_xfer_ctx *c)
@@ -3379,7 +3469,7 @@ decode_bitmap_c(struct drbd_conf *mdev,
dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
- return FAILED;
+ return -EIO;
}
void INFO_bm_xfer_stats(struct drbd_conf *mdev,
@@ -3428,13 +3518,13 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne
{
struct bm_xfer_ctx c;
void *buffer;
- enum receive_bitmap_ret ret;
- int ok = FALSE;
+ int err;
+ int ok = false;
struct p_header80 *h = &mdev->data.rbuf.header.h80;
- wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
-
- drbd_bm_lock(mdev, "receive bitmap");
+ drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
+ /* you are supposed to send additional out-of-sync information
+ * if you actually set bits during this phase */
/* maybe we should use some per thread scratch page,
* and allocate that during initial device creation? */
@@ -3449,9 +3539,9 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne
.bm_words = drbd_bm_words(mdev),
};
- do {
+ for(;;) {
if (cmd == P_BITMAP) {
- ret = receive_bitmap_plain(mdev, data_size, buffer, &c);
+ err = receive_bitmap_plain(mdev, data_size, buffer, &c);
} else if (cmd == P_COMPRESSED_BITMAP) {
/* MAYBE: sanity check that we speak proto >= 90,
* and the feature is enabled! */
@@ -3468,9 +3558,9 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne
goto out;
if (data_size <= (sizeof(*p) - sizeof(p->head))) {
dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
- return FAILED;
+ goto out;
}
- ret = decode_bitmap_c(mdev, p, &c);
+ err = decode_bitmap_c(mdev, p, &c);
} else {
dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
goto out;
@@ -3479,24 +3569,26 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne
c.packets[cmd == P_BITMAP]++;
c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
- if (ret != OK)
+ if (err <= 0) {
+ if (err < 0)
+ goto out;
break;
-
+ }
if (!drbd_recv_header(mdev, &cmd, &data_size))
goto out;
- } while (ret == OK);
- if (ret == FAILED)
- goto out;
+ }
INFO_bm_xfer_stats(mdev, "receive", &c);
if (mdev->state.conn == C_WF_BITMAP_T) {
+ enum drbd_state_rv rv;
+
ok = !drbd_send_bitmap(mdev);
if (!ok)
goto out;
/* Omit CS_ORDERED with this state transition to avoid deadlocks. */
- ok = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
- D_ASSERT(ok == SS_SUCCESS);
+ rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
+ D_ASSERT(rv == SS_SUCCESS);
} else if (mdev->state.conn != C_WF_BITMAP_S) {
/* admin may have requested C_DISCONNECTING,
* other threads may have noticed network errors */
@@ -3504,7 +3596,7 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne
drbd_conn_str(mdev->state.conn));
}
- ok = TRUE;
+ ok = true;
out:
drbd_bm_unlock(mdev);
if (ok && mdev->state.conn == C_WF_BITMAP_S)
@@ -3538,7 +3630,26 @@ static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, u
* with the data requests being unplugged */
drbd_tcp_quickack(mdev->data.socket);
- return TRUE;
+ return true;
+}
+
+static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+{
+ struct p_block_desc *p = &mdev->data.rbuf.block_desc;
+
+ switch (mdev->state.conn) {
+ case C_WF_SYNC_UUID:
+ case C_WF_BITMAP_T:
+ case C_BEHIND:
+ break;
+ default:
+ dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
+ drbd_conn_str(mdev->state.conn));
+ }
+
+ drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
+
+ return true;
}
typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
@@ -3571,6 +3682,7 @@ static struct data_cmd drbd_cmd_handler[] = {
[P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
[P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
[P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
+ [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
/* anything missing from this table is in
* the asender_tbl, see get_asender_cmd */
[P_MAX_CMD] = { 0, 0, NULL },
@@ -3610,7 +3722,8 @@ static void drbdd(struct drbd_conf *mdev)
if (shs) {
rv = drbd_recv(mdev, &header->h80.payload, shs);
if (unlikely(rv != shs)) {
- dev_err(DEV, "short read while reading sub header: rv=%d\n", rv);
+ if (!signal_pending(current))
+ dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv);
goto err_out;
}
}
@@ -3682,9 +3795,6 @@ static void drbd_disconnect(struct drbd_conf *mdev)
if (mdev->state.conn == C_STANDALONE)
return;
- if (mdev->state.conn >= C_WF_CONNECTION)
- dev_err(DEV, "ASSERT FAILED cstate = %s, expected < WFConnection\n",
- drbd_conn_str(mdev->state.conn));
/* asender does not clean up anything. it must not interfere, either */
drbd_thread_stop(&mdev->asender);
@@ -3713,6 +3823,8 @@ static void drbd_disconnect(struct drbd_conf *mdev)
atomic_set(&mdev->rs_pending_cnt, 0);
wake_up(&mdev->misc_wait);
+ del_timer(&mdev->request_timer);
+
/* make sure syncer is stopped and w_resume_next_sg queued */
del_timer_sync(&mdev->resync_timer);
resync_timer_fn((unsigned long)mdev);
@@ -3758,13 +3870,6 @@ static void drbd_disconnect(struct drbd_conf *mdev)
if (os.conn == C_DISCONNECTING) {
wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
- if (!is_susp(mdev->state)) {
- /* we must not free the tl_hash
- * while application io is still on the fly */
- wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
- drbd_free_tl_hash(mdev);
- }
-
crypto_free_hash(mdev->cram_hmac_tfm);
mdev->cram_hmac_tfm = NULL;
@@ -3773,6 +3878,10 @@ static void drbd_disconnect(struct drbd_conf *mdev)
drbd_request_state(mdev, NS(conn, C_STANDALONE));
}
+ /* serialize with bitmap writeout triggered by the state change,
+ * if any. */
+ wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+
/* tcp_close and release of sendpage pages can be deferred. I don't
* want to use SO_LINGER, because apparently it can be deferred for
* more than 20 seconds (longest time I checked).
@@ -3873,7 +3982,8 @@ static int drbd_do_handshake(struct drbd_conf *mdev)
rv = drbd_recv(mdev, &p->head.payload, expect);
if (rv != expect) {
- dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv);
+ if (!signal_pending(current))
+ dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv);
return 0;
}
@@ -3975,7 +4085,8 @@ static int drbd_do_auth(struct drbd_conf *mdev)
rv = drbd_recv(mdev, peers_ch, length);
if (rv != length) {
- dev_err(DEV, "short read AuthChallenge: l=%u\n", rv);
+ if (!signal_pending(current))
+ dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv);
rv = 0;
goto fail;
}
@@ -4022,7 +4133,8 @@ static int drbd_do_auth(struct drbd_conf *mdev)
rv = drbd_recv(mdev, response , resp_size);
if (rv != resp_size) {
- dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv);
+ if (!signal_pending(current))
+ dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv);
rv = 0;
goto fail;
}
@@ -4074,8 +4186,7 @@ int drbdd_init(struct drbd_thread *thi)
h = drbd_connect(mdev);
if (h == 0) {
drbd_disconnect(mdev);
- __set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
+ schedule_timeout_interruptible(HZ);
}
if (h == -1) {
dev_warn(DEV, "Discarding network configuration.\n");
@@ -4113,7 +4224,7 @@ static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
}
wake_up(&mdev->state_wait);
- return TRUE;
+ return true;
}
static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
@@ -4129,7 +4240,7 @@ static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
wake_up(&mdev->misc_wait);
- return TRUE;
+ return true;
}
static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
@@ -4152,7 +4263,7 @@ static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
dec_rs_pending(mdev);
atomic_add(blksize >> 9, &mdev->rs_sect_in);
- return TRUE;
+ return true;
}
/* when we receive the ACK for a write request,
@@ -4176,8 +4287,6 @@ static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
return req;
}
}
- dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n",
- (void *)(unsigned long)id, (unsigned long long)sector);
return NULL;
}
@@ -4195,15 +4304,17 @@ static int validate_req_change_req_state(struct drbd_conf *mdev,
req = validator(mdev, id, sector);
if (unlikely(!req)) {
spin_unlock_irq(&mdev->req_lock);
- dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func);
- return FALSE;
+
+ dev_err(DEV, "%s: failed to find req %p, sector %llus\n", func,
+ (void *)(unsigned long)id, (unsigned long long)sector);
+ return false;
}
__req_mod(req, what, &m);
spin_unlock_irq(&mdev->req_lock);
if (m.bio)
complete_master_bio(mdev, &m);
- return TRUE;
+ return true;
}
static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
@@ -4218,7 +4329,7 @@ static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
if (is_syncer_block_id(p->block_id)) {
drbd_set_in_sync(mdev, sector, blksize);
dec_rs_pending(mdev);
- return TRUE;
+ return true;
}
switch (be16_to_cpu(h->command)) {
case P_RS_WRITE_ACK:
@@ -4239,7 +4350,7 @@ static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
break;
default:
D_ASSERT(0);
- return FALSE;
+ return false;
}
return validate_req_change_req_state(mdev, p->block_id, sector,
@@ -4250,20 +4361,44 @@ static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
{
struct p_block_ack *p = (struct p_block_ack *)h;
sector_t sector = be64_to_cpu(p->sector);
-
- if (__ratelimit(&drbd_ratelimit_state))
- dev_warn(DEV, "Got NegAck packet. Peer is in troubles?\n");
+ int size = be32_to_cpu(p->blksize);
+ struct drbd_request *req;
+ struct bio_and_error m;
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
if (is_syncer_block_id(p->block_id)) {
- int size = be32_to_cpu(p->blksize);
dec_rs_pending(mdev);
drbd_rs_failed_io(mdev, sector, size);
- return TRUE;
+ return true;
}
- return validate_req_change_req_state(mdev, p->block_id, sector,
- _ack_id_to_req, __func__ , neg_acked);
+
+ spin_lock_irq(&mdev->req_lock);
+ req = _ack_id_to_req(mdev, p->block_id, sector);
+ if (!req) {
+ spin_unlock_irq(&mdev->req_lock);
+ if (mdev->net_conf->wire_protocol == DRBD_PROT_A ||
+ mdev->net_conf->wire_protocol == DRBD_PROT_B) {
+ /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
+ The master bio might already be completed, therefore the
+ request is no longer in the collision hash.
+ => Do not try to validate block_id as request. */
+ /* In Protocol B we might already have got a P_RECV_ACK
+ but then get a P_NEG_ACK after wards. */
+ drbd_set_out_of_sync(mdev, sector, size);
+ return true;
+ } else {
+ dev_err(DEV, "%s: failed to find req %p, sector %llus\n", __func__,
+ (void *)(unsigned long)p->block_id, (unsigned long long)sector);
+ return false;
+ }
+ }
+ __req_mod(req, neg_acked, &m);
+ spin_unlock_irq(&mdev->req_lock);
+
+ if (m.bio)
+ complete_master_bio(mdev, &m);
+ return true;
}
static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
@@ -4294,11 +4429,20 @@ static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
if (get_ldev_if_state(mdev, D_FAILED)) {
drbd_rs_complete_io(mdev, sector);
- drbd_rs_failed_io(mdev, sector, size);
+ switch (be16_to_cpu(h->command)) {
+ case P_NEG_RS_DREPLY:
+ drbd_rs_failed_io(mdev, sector, size);
+ case P_RS_CANCEL:
+ break;
+ default:
+ D_ASSERT(0);
+ put_ldev(mdev);
+ return false;
+ }
put_ldev(mdev);
}
- return TRUE;
+ return true;
}
static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
@@ -4307,7 +4451,14 @@ static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
- return TRUE;
+ if (mdev->state.conn == C_AHEAD &&
+ atomic_read(&mdev->ap_in_flight) == 0 &&
+ !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
+ mdev->start_resync_timer.expires = jiffies + HZ;
+ add_timer(&mdev->start_resync_timer);
+ }
+
+ return true;
}
static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
@@ -4328,12 +4479,18 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
ov_oos_print(mdev);
if (!get_ldev(mdev))
- return TRUE;
+ return true;
drbd_rs_complete_io(mdev, sector);
dec_rs_pending(mdev);
- if (--mdev->ov_left == 0) {
+ --mdev->ov_left;
+
+ /* let's advance progress step marks only for every other megabyte */
+ if ((mdev->ov_left & 0x200) == 0x200)
+ drbd_advance_rs_marks(mdev, mdev->ov_left);
+
+ if (mdev->ov_left == 0) {
w = kmalloc(sizeof(*w), GFP_NOIO);
if (w) {
w->cb = w_ov_finished;
@@ -4345,12 +4502,12 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
}
}
put_ldev(mdev);
- return TRUE;
+ return true;
}
static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
{
- return TRUE;
+ return true;
}
struct asender_cmd {
@@ -4378,6 +4535,7 @@ static struct asender_cmd *get_asender_cmd(int cmd)
[P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
[P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
[P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
+ [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply},
[P_MAX_CMD] = { 0, NULL },
};
if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index ad3fc6228f27..5c0c8be1bb0a 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -140,9 +140,14 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev,
struct hlist_node *n;
struct hlist_head *slot;
- /* before we can signal completion to the upper layers,
- * we may need to close the current epoch */
+ /* Before we can signal completion to the upper layers,
+ * we may need to close the current epoch.
+ * We can skip this, if this request has not even been sent, because we
+ * did not have a fully established connection yet/anymore, during
+ * bitmap exchange, or while we are C_AHEAD due to congestion policy.
+ */
if (mdev->state.conn >= C_CONNECTED &&
+ (s & RQ_NET_SENT) != 0 &&
req->epoch == mdev->newest_tle->br_number)
queue_barrier(mdev);
@@ -440,7 +445,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state |= RQ_LOCAL_COMPLETED;
req->rq_state &= ~RQ_LOCAL_PENDING;
- __drbd_chk_io_error(mdev, FALSE);
+ __drbd_chk_io_error(mdev, false);
_req_may_be_done_not_susp(req, m);
put_ldev(mdev);
break;
@@ -461,7 +466,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
D_ASSERT(!(req->rq_state & RQ_NET_MASK));
- __drbd_chk_io_error(mdev, FALSE);
+ __drbd_chk_io_error(mdev, false);
put_ldev(mdev);
/* no point in retrying if there is no good remote data,
@@ -545,6 +550,14 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break;
+ case queue_for_send_oos:
+ req->rq_state |= RQ_NET_QUEUED;
+ req->w.cb = w_send_oos;
+ drbd_queue_work(&mdev->data.work, &req->w);
+ break;
+
+ case oos_handed_to_network:
+ /* actually the same */
case send_canceled:
/* treat it the same */
case send_failed:
@@ -558,6 +571,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
case handed_over_to_network:
/* assert something? */
+ if (bio_data_dir(req->master_bio) == WRITE)
+ atomic_add(req->size>>9, &mdev->ap_in_flight);
+
if (bio_data_dir(req->master_bio) == WRITE &&
mdev->net_conf->wire_protocol == DRBD_PROT_A) {
/* this is what is dangerous about protocol A:
@@ -591,6 +607,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
dec_ap_pending(mdev);
req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
req->rq_state |= RQ_NET_DONE;
+ if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE)
+ atomic_sub(req->size>>9, &mdev->ap_in_flight);
+
/* if it is still queued, we may not complete it here.
* it will be canceled soon. */
if (!(req->rq_state & RQ_NET_QUEUED))
@@ -628,14 +647,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state |= RQ_NET_OK;
D_ASSERT(req->rq_state & RQ_NET_PENDING);
dec_ap_pending(mdev);
+ atomic_sub(req->size>>9, &mdev->ap_in_flight);
req->rq_state &= ~RQ_NET_PENDING;
_req_may_be_done_not_susp(req, m);
break;
case neg_acked:
/* assert something? */
- if (req->rq_state & RQ_NET_PENDING)
+ if (req->rq_state & RQ_NET_PENDING) {
dec_ap_pending(mdev);
+ atomic_sub(req->size>>9, &mdev->ap_in_flight);
+ }
req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
req->rq_state |= RQ_NET_DONE;
@@ -690,8 +712,11 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
dev_err(DEV, "FIXME (barrier_acked but pending)\n");
list_move(&req->tl_requests, &mdev->out_of_sequence_requests);
}
- D_ASSERT(req->rq_state & RQ_NET_SENT);
- req->rq_state |= RQ_NET_DONE;
+ if ((req->rq_state & RQ_NET_MASK) != 0) {
+ req->rq_state |= RQ_NET_DONE;
+ if (mdev->net_conf->wire_protocol == DRBD_PROT_A)
+ atomic_sub(req->size>>9, &mdev->ap_in_flight);
+ }
_req_may_be_done(req, m); /* Allowed while state.susp */
break;
@@ -738,14 +763,14 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s
return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
}
-static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
+static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
{
const int rw = bio_rw(bio);
const int size = bio->bi_size;
const sector_t sector = bio->bi_sector;
struct drbd_tl_epoch *b = NULL;
struct drbd_request *req;
- int local, remote;
+ int local, remote, send_oos = 0;
int err = -EIO;
int ret = 0;
@@ -759,6 +784,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
bio_endio(bio, -ENOMEM);
return 0;
}
+ req->start_time = start_time;
local = get_ldev(mdev);
if (!local) {
@@ -808,9 +834,9 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
drbd_al_begin_io(mdev, sector);
}
- remote = remote && (mdev->state.pdsk == D_UP_TO_DATE ||
- (mdev->state.pdsk == D_INCONSISTENT &&
- mdev->state.conn >= C_CONNECTED));
+ remote = remote && drbd_should_do_remote(mdev->state);
+ send_oos = rw == WRITE && drbd_should_send_oos(mdev->state);
+ D_ASSERT(!(remote && send_oos));
if (!(local || remote) && !is_susp(mdev->state)) {
if (__ratelimit(&drbd_ratelimit_state))
@@ -824,7 +850,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
* but there is a race between testing the bit and pointer outside the
* spinlock, and grabbing the spinlock.
* if we lost that race, we retry. */
- if (rw == WRITE && remote &&
+ if (rw == WRITE && (remote || send_oos) &&
mdev->unused_spare_tle == NULL &&
test_bit(CREATE_BARRIER, &mdev->flags)) {
allocate_barrier:
@@ -842,18 +868,19 @@ allocate_barrier:
if (is_susp(mdev->state)) {
/* If we got suspended, use the retry mechanism of
generic_make_request() to restart processing of this
- bio. In the next call to drbd_make_request_26
+ bio. In the next call to drbd_make_request
we sleep in inc_ap_bio() */
ret = 1;
spin_unlock_irq(&mdev->req_lock);
goto fail_free_complete;
}
- if (remote) {
- remote = (mdev->state.pdsk == D_UP_TO_DATE ||
- (mdev->state.pdsk == D_INCONSISTENT &&
- mdev->state.conn >= C_CONNECTED));
- if (!remote)
+ if (remote || send_oos) {
+ remote = drbd_should_do_remote(mdev->state);
+ send_oos = rw == WRITE && drbd_should_send_oos(mdev->state);
+ D_ASSERT(!(remote && send_oos));
+
+ if (!(remote || send_oos))
dev_warn(DEV, "lost connection while grabbing the req_lock!\n");
if (!(local || remote)) {
dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
@@ -866,7 +893,7 @@ allocate_barrier:
mdev->unused_spare_tle = b;
b = NULL;
}
- if (rw == WRITE && remote &&
+ if (rw == WRITE && (remote || send_oos) &&
mdev->unused_spare_tle == NULL &&
test_bit(CREATE_BARRIER, &mdev->flags)) {
/* someone closed the current epoch
@@ -889,7 +916,7 @@ allocate_barrier:
* barrier packet. To get the write ordering right, we only have to
* make sure that, if this is a write request and it triggered a
* barrier packet, this request is queued within the same spinlock. */
- if (remote && mdev->unused_spare_tle &&
+ if ((remote || send_oos) && mdev->unused_spare_tle &&
test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
_tl_add_barrier(mdev, mdev->unused_spare_tle);
mdev->unused_spare_tle = NULL;
@@ -937,6 +964,34 @@ allocate_barrier:
? queue_for_net_write
: queue_for_net_read);
}
+ if (send_oos && drbd_set_out_of_sync(mdev, sector, size))
+ _req_mod(req, queue_for_send_oos);
+
+ if (remote &&
+ mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) {
+ int congested = 0;
+
+ if (mdev->net_conf->cong_fill &&
+ atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
+ dev_info(DEV, "Congestion-fill threshold reached\n");
+ congested = 1;
+ }
+
+ if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
+ dev_info(DEV, "Congestion-extents threshold reached\n");
+ congested = 1;
+ }
+
+ if (congested) {
+ queue_barrier(mdev); /* last barrier, after mirrored writes */
+
+ if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
+ _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
+ else /*mdev->net_conf->on_congestion == OC_DISCONNECT */
+ _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
+ }
+ }
+
spin_unlock_irq(&mdev->req_lock);
kfree(b); /* if someone else has beaten us to it... */
@@ -949,9 +1004,9 @@ allocate_barrier:
* stable storage, and this is a WRITE, we may not even submit
* this bio. */
if (get_ldev(mdev)) {
- if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
- : rw == READ ? DRBD_FAULT_DT_RD
- : DRBD_FAULT_DT_RA))
+ if (drbd_insert_fault(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
+ : rw == READ ? DRBD_FAULT_DT_RD
+ : DRBD_FAULT_DT_RA))
bio_endio(req->private_bio, -EIO);
else
generic_make_request(req->private_bio);
@@ -1018,16 +1073,19 @@ static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write)
return 0;
}
-int drbd_make_request_26(struct request_queue *q, struct bio *bio)
+int drbd_make_request(struct request_queue *q, struct bio *bio)
{
unsigned int s_enr, e_enr;
struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
+ unsigned long start_time;
if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) {
bio_endio(bio, -EPERM);
return 0;
}
+ start_time = jiffies;
+
/*
* what we "blindly" assume:
*/
@@ -1042,12 +1100,12 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
if (likely(s_enr == e_enr)) {
inc_ap_bio(mdev, 1);
- return drbd_make_request_common(mdev, bio);
+ return drbd_make_request_common(mdev, bio, start_time);
}
/* can this bio be split generically?
* Maybe add our own split-arbitrary-bios function. */
- if (bio->bi_vcnt != 1 || bio->bi_idx != 0 || bio->bi_size > DRBD_MAX_SEGMENT_SIZE) {
+ if (bio->bi_vcnt != 1 || bio->bi_idx != 0 || bio->bi_size > DRBD_MAX_BIO_SIZE) {
/* rather error out here than BUG in bio_split */
dev_err(DEV, "bio would need to, but cannot, be split: "
"(vcnt=%u,idx=%u,size=%u,sector=%llu)\n",
@@ -1069,11 +1127,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
const int sps = 1 << HT_SHIFT; /* sectors per slot */
const int mask = sps - 1;
const sector_t first_sectors = sps - (sect & mask);
- bp = bio_split(bio,
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
- bio_split_pool,
-#endif
- first_sectors);
+ bp = bio_split(bio, first_sectors);
/* we need to get a "reference count" (ap_bio_cnt)
* to avoid races with the disconnect/reconnect/suspend code.
@@ -1084,10 +1138,10 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
D_ASSERT(e_enr == s_enr + 1);
- while (drbd_make_request_common(mdev, &bp->bio1))
+ while (drbd_make_request_common(mdev, &bp->bio1, start_time))
inc_ap_bio(mdev, 1);
- while (drbd_make_request_common(mdev, &bp->bio2))
+ while (drbd_make_request_common(mdev, &bp->bio2, start_time))
inc_ap_bio(mdev, 1);
dec_ap_bio(mdev);
@@ -1098,7 +1152,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
}
/* This is called by bio_add_page(). With this function we reduce
- * the number of BIOs that span over multiple DRBD_MAX_SEGMENT_SIZEs
+ * the number of BIOs that span over multiple DRBD_MAX_BIO_SIZEs
* units (was AL_EXTENTs).
*
* we do the calculation within the lower 32bit of the byte offsets,
@@ -1108,7 +1162,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
* As long as the BIO is empty we have to allow at least one bvec,
* regardless of size and offset. so the resulting bio may still
* cross extent boundaries. those are dealt with (bio_split) in
- * drbd_make_request_26.
+ * drbd_make_request.
*/
int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec)
{
@@ -1118,8 +1172,8 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
unsigned int bio_size = bvm->bi_size;
int limit, backing_limit;
- limit = DRBD_MAX_SEGMENT_SIZE
- - ((bio_offset & (DRBD_MAX_SEGMENT_SIZE-1)) + bio_size);
+ limit = DRBD_MAX_BIO_SIZE
+ - ((bio_offset & (DRBD_MAX_BIO_SIZE-1)) + bio_size);
if (limit < 0)
limit = 0;
if (bio_size == 0) {
@@ -1136,3 +1190,42 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
}
return limit;
}
+
+void request_timer_fn(unsigned long data)
+{
+ struct drbd_conf *mdev = (struct drbd_conf *) data;
+ struct drbd_request *req; /* oldest request */
+ struct list_head *le;
+ unsigned long et = 0; /* effective timeout = ko_count * timeout */
+
+ if (get_net_conf(mdev)) {
+ et = mdev->net_conf->timeout*HZ/10 * mdev->net_conf->ko_count;
+ put_net_conf(mdev);
+ }
+ if (!et || mdev->state.conn < C_WF_REPORT_PARAMS)
+ return; /* Recurring timer stopped */
+
+ spin_lock_irq(&mdev->req_lock);
+ le = &mdev->oldest_tle->requests;
+ if (list_empty(le)) {
+ spin_unlock_irq(&mdev->req_lock);
+ mod_timer(&mdev->request_timer, jiffies + et);
+ return;
+ }
+
+ le = le->prev;
+ req = list_entry(le, struct drbd_request, tl_requests);
+ if (time_is_before_eq_jiffies(req->start_time + et)) {
+ if (req->rq_state & RQ_NET_PENDING) {
+ dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
+ _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE, NULL);
+ } else {
+ dev_warn(DEV, "Local backing block device frozen?\n");
+ mod_timer(&mdev->request_timer, jiffies + et);
+ }
+ } else {
+ mod_timer(&mdev->request_timer, req->start_time + et);
+ }
+
+ spin_unlock_irq(&mdev->req_lock);
+}
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index ab2bd09d54b4..32e2c3e6a813 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -82,14 +82,16 @@ enum drbd_req_event {
to_be_submitted,
/* XXX yes, now I am inconsistent...
- * these two are not "events" but "actions"
+ * these are not "events" but "actions"
* oh, well... */
queue_for_net_write,
queue_for_net_read,
+ queue_for_send_oos,
send_canceled,
send_failed,
handed_over_to_network,
+ oos_handed_to_network,
connection_lost_while_pending,
read_retry_remote_canceled,
recv_acked_by_peer,
@@ -289,7 +291,6 @@ static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
req->epoch = 0;
req->sector = bio_src->bi_sector;
req->size = bio_src->bi_size;
- req->start_time = jiffies;
INIT_HLIST_NODE(&req->colision);
INIT_LIST_HEAD(&req->tl_requests);
INIT_LIST_HEAD(&req->w.list);
@@ -321,6 +322,7 @@ extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
struct bio_and_error *m);
extern void complete_master_bio(struct drbd_conf *mdev,
struct bio_and_error *m);
+extern void request_timer_fn(unsigned long data);
/* use this if you don't want to deal with calling complete_master_bio()
* outside the spinlock, e.g. when walking some list on cleanup. */
@@ -338,23 +340,43 @@ static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
return rv;
}
-/* completion of master bio is outside of spinlock.
- * If you need it irqsave, do it your self!
- * Which means: don't use from bio endio callback. */
+/* completion of master bio is outside of our spinlock.
+ * We still may or may not be inside some irqs disabled section
+ * of the lower level driver completion callback, so we need to
+ * spin_lock_irqsave here. */
static inline int req_mod(struct drbd_request *req,
enum drbd_req_event what)
{
+ unsigned long flags;
struct drbd_conf *mdev = req->mdev;
struct bio_and_error m;
int rv;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irqsave(&mdev->req_lock, flags);
rv = __req_mod(req, what, &m);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irqrestore(&mdev->req_lock, flags);
if (m.bio)
complete_master_bio(mdev, &m);
return rv;
}
+
+static inline bool drbd_should_do_remote(union drbd_state s)
+{
+ return s.pdsk == D_UP_TO_DATE ||
+ (s.pdsk >= D_INCONSISTENT &&
+ s.conn >= C_WF_BITMAP_T &&
+ s.conn < C_AHEAD);
+ /* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T.
+ That is equivalent since before 96 IO was frozen in the C_WF_BITMAP*
+ states. */
+}
+static inline bool drbd_should_send_oos(union drbd_state s)
+{
+ return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S;
+ /* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary
+ since we enter state C_AHEAD only if proto >= 96 */
+}
+
#endif
diff --git a/drivers/block/drbd/drbd_strings.c b/drivers/block/drbd/drbd_strings.c
index 85179e1fb50a..c44a2a602772 100644
--- a/drivers/block/drbd/drbd_strings.c
+++ b/drivers/block/drbd/drbd_strings.c
@@ -48,6 +48,8 @@ static const char *drbd_conn_s_names[] = {
[C_PAUSED_SYNC_T] = "PausedSyncT",
[C_VERIFY_S] = "VerifyS",
[C_VERIFY_T] = "VerifyT",
+ [C_AHEAD] = "Ahead",
+ [C_BEHIND] = "Behind",
};
static const char *drbd_role_s_names[] = {
@@ -92,7 +94,7 @@ static const char *drbd_state_sw_errors[] = {
const char *drbd_conn_str(enum drbd_conns s)
{
/* enums are unsigned... */
- return s > C_PAUSED_SYNC_T ? "TOO_LARGE" : drbd_conn_s_names[s];
+ return s > C_BEHIND ? "TOO_LARGE" : drbd_conn_s_names[s];
}
const char *drbd_role_str(enum drbd_role s)
@@ -105,7 +107,7 @@ const char *drbd_disk_str(enum drbd_disk_state s)
return s > D_UP_TO_DATE ? "TOO_LARGE" : drbd_disk_s_names[s];
}
-const char *drbd_set_st_err_str(enum drbd_state_ret_codes err)
+const char *drbd_set_st_err_str(enum drbd_state_rv err)
{
return err <= SS_AFTER_LAST_ERROR ? "TOO_SMALL" :
err > SS_TWO_PRIMARIES ? "TOO_LARGE"
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index e027446590d3..f7e6c92f8d03 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -39,18 +39,17 @@
#include "drbd_req.h"
static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel);
+static int w_make_resync_request(struct drbd_conf *mdev,
+ struct drbd_work *w, int cancel);
-/* defined here:
- drbd_md_io_complete
- drbd_endio_sec
- drbd_endio_pri
-
- * more endio handlers:
- atodb_endio in drbd_actlog.c
- drbd_bm_async_io_complete in drbd_bitmap.c
-
+/* endio handlers:
+ * drbd_md_io_complete (defined here)
+ * drbd_endio_pri (defined here)
+ * drbd_endio_sec (defined here)
+ * bm_async_io_complete (defined in drbd_bitmap.c)
+ *
* For all these callbacks, note the following:
* The callbacks will be called in irq context by the IDE drivers,
* and in Softirqs/Tasklets/BH context by the SCSI drivers.
@@ -94,7 +93,7 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
if (list_empty(&mdev->read_ee))
wake_up(&mdev->ee_wait);
if (test_bit(__EE_WAS_ERROR, &e->flags))
- __drbd_chk_io_error(mdev, FALSE);
+ __drbd_chk_io_error(mdev, false);
spin_unlock_irqrestore(&mdev->req_lock, flags);
drbd_queue_work(&mdev->data.work, &e->w);
@@ -137,7 +136,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
: list_empty(&mdev->active_ee);
if (test_bit(__EE_WAS_ERROR, &e->flags))
- __drbd_chk_io_error(mdev, FALSE);
+ __drbd_chk_io_error(mdev, false);
spin_unlock_irqrestore(&mdev->req_lock, flags);
if (is_syncer_req)
@@ -163,14 +162,15 @@ void drbd_endio_sec(struct bio *bio, int error)
int uptodate = bio_flagged(bio, BIO_UPTODATE);
int is_write = bio_data_dir(bio) == WRITE;
- if (error)
+ if (error && __ratelimit(&drbd_ratelimit_state))
dev_warn(DEV, "%s: error=%d s=%llus\n",
is_write ? "write" : "read", error,
(unsigned long long)e->sector);
if (!error && !uptodate) {
- dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
- is_write ? "write" : "read",
- (unsigned long long)e->sector);
+ if (__ratelimit(&drbd_ratelimit_state))
+ dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
+ is_write ? "write" : "read",
+ (unsigned long long)e->sector);
/* strange behavior of some lower level drivers...
* fail the request by clearing the uptodate flag,
* but do not return any error?! */
@@ -250,13 +250,6 @@ int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
return w_send_read_req(mdev, w, 0);
}
-int w_resync_inactive(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
-{
- ERR_IF(cancel) return 1;
- dev_err(DEV, "resync inactive, but callback triggered??\n");
- return 1; /* Simply ignore this! */
-}
-
void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_epoch_entry *e, void *digest)
{
struct hash_desc desc;
@@ -355,7 +348,7 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
if (!get_ldev(mdev))
return -EIO;
- if (drbd_rs_should_slow_down(mdev))
+ if (drbd_rs_should_slow_down(mdev, sector))
goto defer;
/* GFP_TRY, because if there is no memory available right now, this may
@@ -373,9 +366,10 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
return 0;
- /* drbd_submit_ee currently fails for one reason only:
- * not being able to allocate enough bios.
- * Is dropping the connection going to help? */
+ /* If it failed because of ENOMEM, retry should help. If it failed
+ * because bio_add_page failed (probably broken lower level driver),
+ * retry may or may not help.
+ * If it does not, you may need to force disconnect. */
spin_lock_irq(&mdev->req_lock);
list_del(&e->w.list);
spin_unlock_irq(&mdev->req_lock);
@@ -386,26 +380,25 @@ defer:
return -EAGAIN;
}
-void resync_timer_fn(unsigned long data)
+int w_resync_timer(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
- struct drbd_conf *mdev = (struct drbd_conf *) data;
- int queue;
-
- queue = 1;
switch (mdev->state.conn) {
case C_VERIFY_S:
- mdev->resync_work.cb = w_make_ov_request;
+ w_make_ov_request(mdev, w, cancel);
break;
case C_SYNC_TARGET:
- mdev->resync_work.cb = w_make_resync_request;
+ w_make_resync_request(mdev, w, cancel);
break;
- default:
- queue = 0;
- mdev->resync_work.cb = w_resync_inactive;
}
- /* harmless race: list_empty outside data.work.q_lock */
- if (list_empty(&mdev->resync_work.list) && queue)
+ return 1;
+}
+
+void resync_timer_fn(unsigned long data)
+{
+ struct drbd_conf *mdev = (struct drbd_conf *) data;
+
+ if (list_empty(&mdev->resync_work.list))
drbd_queue_work(&mdev->data.work, &mdev->resync_work);
}
@@ -438,7 +431,7 @@ static void fifo_add_val(struct fifo_buffer *fb, int value)
fb->values[i] += value;
}
-int drbd_rs_controller(struct drbd_conf *mdev)
+static int drbd_rs_controller(struct drbd_conf *mdev)
{
unsigned int sect_in; /* Number of sectors that came in since the last turn */
unsigned int want; /* The number of sectors we want in the proxy */
@@ -492,29 +485,36 @@ int drbd_rs_controller(struct drbd_conf *mdev)
return req_sect;
}
-int w_make_resync_request(struct drbd_conf *mdev,
- struct drbd_work *w, int cancel)
+static int drbd_rs_number_requests(struct drbd_conf *mdev)
+{
+ int number;
+ if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */
+ number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
+ mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
+ } else {
+ mdev->c_sync_rate = mdev->sync_conf.rate;
+ number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
+ }
+
+ /* ignore the amount of pending requests, the resync controller should
+ * throttle down to incoming reply rate soon enough anyways. */
+ return number;
+}
+
+static int w_make_resync_request(struct drbd_conf *mdev,
+ struct drbd_work *w, int cancel)
{
unsigned long bit;
sector_t sector;
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
- int max_segment_size;
- int number, rollback_i, size, pe, mx;
+ int max_bio_size;
+ int number, rollback_i, size;
int align, queued, sndbuf;
int i = 0;
if (unlikely(cancel))
return 1;
- if (unlikely(mdev->state.conn < C_CONNECTED)) {
- dev_err(DEV, "Confused in w_make_resync_request()! cstate < Connected");
- return 0;
- }
-
- if (mdev->state.conn != C_SYNC_TARGET)
- dev_err(DEV, "%s in w_make_resync_request\n",
- drbd_conn_str(mdev->state.conn));
-
if (mdev->rs_total == 0) {
/* empty resync? */
drbd_resync_finished(mdev);
@@ -527,49 +527,19 @@ int w_make_resync_request(struct drbd_conf *mdev,
to continue resync with a broken disk makes no sense at
all */
dev_err(DEV, "Disk broke down during resync!\n");
- mdev->resync_work.cb = w_resync_inactive;
return 1;
}
/* starting with drbd 8.3.8, we can handle multi-bio EEs,
* if it should be necessary */
- max_segment_size =
- mdev->agreed_pro_version < 94 ? queue_max_segment_size(mdev->rq_queue) :
- mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_SEGMENT_SIZE;
+ max_bio_size =
+ mdev->agreed_pro_version < 94 ? queue_max_hw_sectors(mdev->rq_queue) << 9 :
+ mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_BIO_SIZE;
- if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */
- number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
- mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
- } else {
- mdev->c_sync_rate = mdev->sync_conf.rate;
- number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
- }
-
- /* Throttle resync on lower level disk activity, which may also be
- * caused by application IO on Primary/SyncTarget.
- * Keep this after the call to drbd_rs_controller, as that assumes
- * to be called as precisely as possible every SLEEP_TIME,
- * and would be confused otherwise. */
- if (drbd_rs_should_slow_down(mdev))
+ number = drbd_rs_number_requests(mdev);
+ if (number == 0)
goto requeue;
- mutex_lock(&mdev->data.mutex);
- if (mdev->data.socket)
- mx = mdev->data.socket->sk->sk_rcvbuf / sizeof(struct p_block_req);
- else
- mx = 1;
- mutex_unlock(&mdev->data.mutex);
-
- /* For resync rates >160MB/sec, allow more pending RS requests */
- if (number > mx)
- mx = number;
-
- /* Limit the number of pending RS requests to no more than the peer's receive buffer */
- pe = atomic_read(&mdev->rs_pending_cnt);
- if ((pe + number) > mx) {
- number = mx - pe;
- }
-
for (i = 0; i < number; i++) {
/* Stop generating RS requests, when half of the send buffer is filled */
mutex_lock(&mdev->data.mutex);
@@ -588,16 +558,16 @@ next_sector:
size = BM_BLOCK_SIZE;
bit = drbd_bm_find_next(mdev, mdev->bm_resync_fo);
- if (bit == -1UL) {
+ if (bit == DRBD_END_OF_BITMAP) {
mdev->bm_resync_fo = drbd_bm_bits(mdev);
- mdev->resync_work.cb = w_resync_inactive;
put_ldev(mdev);
return 1;
}
sector = BM_BIT_TO_SECT(bit);
- if (drbd_try_rs_begin_io(mdev, sector)) {
+ if (drbd_rs_should_slow_down(mdev, sector) ||
+ drbd_try_rs_begin_io(mdev, sector)) {
mdev->bm_resync_fo = bit;
goto requeue;
}
@@ -608,7 +578,7 @@ next_sector:
goto next_sector;
}
-#if DRBD_MAX_SEGMENT_SIZE > BM_BLOCK_SIZE
+#if DRBD_MAX_BIO_SIZE > BM_BLOCK_SIZE
/* try to find some adjacent bits.
* we stop if we have already the maximum req size.
*
@@ -618,7 +588,7 @@ next_sector:
align = 1;
rollback_i = i;
for (;;) {
- if (size + BM_BLOCK_SIZE > max_segment_size)
+ if (size + BM_BLOCK_SIZE > max_bio_size)
break;
/* Be always aligned */
@@ -685,7 +655,6 @@ next_sector:
* resync data block, and the last bit is cleared.
* until then resync "work" is "inactive" ...
*/
- mdev->resync_work.cb = w_resync_inactive;
put_ldev(mdev);
return 1;
}
@@ -706,27 +675,18 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca
if (unlikely(cancel))
return 1;
- if (unlikely(mdev->state.conn < C_CONNECTED)) {
- dev_err(DEV, "Confused in w_make_ov_request()! cstate < Connected");
- return 0;
- }
-
- number = SLEEP_TIME*mdev->sync_conf.rate / ((BM_BLOCK_SIZE/1024)*HZ);
- if (atomic_read(&mdev->rs_pending_cnt) > number)
- goto requeue;
-
- number -= atomic_read(&mdev->rs_pending_cnt);
+ number = drbd_rs_number_requests(mdev);
sector = mdev->ov_position;
for (i = 0; i < number; i++) {
if (sector >= capacity) {
- mdev->resync_work.cb = w_resync_inactive;
return 1;
}
size = BM_BLOCK_SIZE;
- if (drbd_try_rs_begin_io(mdev, sector)) {
+ if (drbd_rs_should_slow_down(mdev, sector) ||
+ drbd_try_rs_begin_io(mdev, sector)) {
mdev->ov_position = sector;
goto requeue;
}
@@ -744,11 +704,33 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca
mdev->ov_position = sector;
requeue:
+ mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
return 1;
}
+void start_resync_timer_fn(unsigned long data)
+{
+ struct drbd_conf *mdev = (struct drbd_conf *) data;
+
+ drbd_queue_work(&mdev->data.work, &mdev->start_resync_work);
+}
+
+int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+{
+ if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) {
+ dev_warn(DEV, "w_start_resync later...\n");
+ mdev->start_resync_timer.expires = jiffies + HZ/10;
+ add_timer(&mdev->start_resync_timer);
+ return 1;
+ }
+
+ drbd_start_resync(mdev, C_SYNC_SOURCE);
+ clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags);
+ return 1;
+}
+
int w_ov_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
kfree(w);
@@ -782,6 +764,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
union drbd_state os, ns;
struct drbd_work *w;
char *khelper_cmd = NULL;
+ int verify_done = 0;
/* Remove all elements from the resync LRU. Since future actions
* might set bits in the (main) bitmap, then the entries in the
@@ -792,8 +775,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
* queue (or even the read operations for those packets
* is not finished by now). Retry in 100ms. */
- __set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ / 10);
+ schedule_timeout_interruptible(HZ / 10);
w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
if (w) {
w->cb = w_resync_finished;
@@ -818,6 +800,8 @@ int drbd_resync_finished(struct drbd_conf *mdev)
spin_lock_irq(&mdev->req_lock);
os = mdev->state;
+ verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
+
/* This protects us against multiple calls (that can happen in the presence
of application IO), and against connectivity loss just before we arrive here. */
if (os.conn <= C_CONNECTED)
@@ -827,8 +811,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
ns.conn = C_CONNECTED;
dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
- (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) ?
- "Online verify " : "Resync",
+ verify_done ? "Online verify " : "Resync",
dt + mdev->rs_paused, mdev->rs_paused, dbdt);
n_oos = drbd_bm_total_weight(mdev);
@@ -886,14 +869,18 @@ int drbd_resync_finished(struct drbd_conf *mdev)
}
}
- drbd_uuid_set_bm(mdev, 0UL);
-
- if (mdev->p_uuid) {
- /* Now the two UUID sets are equal, update what we
- * know of the peer. */
- int i;
- for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
- mdev->p_uuid[i] = mdev->ldev->md.uuid[i];
+ if (!(os.conn == C_VERIFY_S || os.conn == C_VERIFY_T)) {
+ /* for verify runs, we don't update uuids here,
+ * so there would be nothing to report. */
+ drbd_uuid_set_bm(mdev, 0UL);
+ drbd_print_uuids(mdev, "updated UUIDs");
+ if (mdev->p_uuid) {
+ /* Now the two UUID sets are equal, update what we
+ * know of the peer. */
+ int i;
+ for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
+ mdev->p_uuid[i] = mdev->ldev->md.uuid[i];
+ }
}
}
@@ -905,15 +892,11 @@ out:
mdev->rs_total = 0;
mdev->rs_failed = 0;
mdev->rs_paused = 0;
- mdev->ov_start_sector = 0;
+ if (verify_done)
+ mdev->ov_start_sector = 0;
drbd_md_sync(mdev);
- if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) {
- dev_info(DEV, "Writing the whole bitmap\n");
- drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
- }
-
if (khelper_cmd)
drbd_khelper(mdev, khelper_cmd);
@@ -994,7 +977,9 @@ int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
put_ldev(mdev);
}
- if (likely((e->flags & EE_WAS_ERROR) == 0)) {
+ if (mdev->state.conn == C_AHEAD) {
+ ok = drbd_send_ack(mdev, P_RS_CANCEL, e);
+ } else if (likely((e->flags & EE_WAS_ERROR) == 0)) {
if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
inc_rs_pending(mdev);
ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
@@ -1096,25 +1081,27 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
if (unlikely(cancel))
goto out;
- if (unlikely((e->flags & EE_WAS_ERROR) != 0))
- goto out;
-
digest_size = crypto_hash_digestsize(mdev->verify_tfm);
- /* FIXME if this allocation fails, online verify will not terminate! */
digest = kmalloc(digest_size, GFP_NOIO);
- if (digest) {
- drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
- inc_rs_pending(mdev);
- ok = drbd_send_drequest_csum(mdev, e->sector, e->size,
- digest, digest_size, P_OV_REPLY);
- if (!ok)
- dec_rs_pending(mdev);
- kfree(digest);
+ if (!digest) {
+ ok = 0; /* terminate the connection in case the allocation failed */
+ goto out;
}
+ if (likely(!(e->flags & EE_WAS_ERROR)))
+ drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
+ else
+ memset(digest, 0, digest_size);
+
+ inc_rs_pending(mdev);
+ ok = drbd_send_drequest_csum(mdev, e->sector, e->size,
+ digest, digest_size, P_OV_REPLY);
+ if (!ok)
+ dec_rs_pending(mdev);
+ kfree(digest);
+
out:
drbd_free_ee(mdev, e);
-
dec_unacked(mdev);
return ok;
@@ -1129,7 +1116,6 @@ void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size)
mdev->ov_last_oos_size = size>>9;
}
drbd_set_out_of_sync(mdev, sector, size);
- set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags);
}
int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
@@ -1165,10 +1151,6 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
eq = !memcmp(digest, di->digest, digest_size);
kfree(digest);
}
- } else {
- ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
- if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
}
dec_unacked(mdev);
@@ -1182,7 +1164,13 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
drbd_free_ee(mdev, e);
- if (--mdev->ov_left == 0) {
+ --mdev->ov_left;
+
+ /* let's advance progress step marks only for every other megabyte */
+ if ((mdev->ov_left & 0x200) == 0x200)
+ drbd_advance_rs_marks(mdev, mdev->ov_left);
+
+ if (mdev->ov_left == 0) {
ov_oos_print(mdev);
drbd_resync_finished(mdev);
}
@@ -1235,6 +1223,22 @@ int w_send_write_hint(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE);
}
+int w_send_oos(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+{
+ struct drbd_request *req = container_of(w, struct drbd_request, w);
+ int ok;
+
+ if (unlikely(cancel)) {
+ req_mod(req, send_canceled);
+ return 1;
+ }
+
+ ok = drbd_send_oos(mdev, req);
+ req_mod(req, oos_handed_to_network);
+
+ return ok;
+}
+
/**
* w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request
* @mdev: DRBD device.
@@ -1430,6 +1434,17 @@ int drbd_alter_sa(struct drbd_conf *mdev, int na)
return retcode;
}
+void drbd_rs_controller_reset(struct drbd_conf *mdev)
+{
+ atomic_set(&mdev->rs_sect_in, 0);
+ atomic_set(&mdev->rs_sect_ev, 0);
+ mdev->rs_in_flight = 0;
+ mdev->rs_planed = 0;
+ spin_lock(&mdev->peer_seq_lock);
+ fifo_set(&mdev->rs_plan_s, 0);
+ spin_unlock(&mdev->peer_seq_lock);
+}
+
/**
* drbd_start_resync() - Start the resync process
* @mdev: DRBD device.
@@ -1443,13 +1458,18 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
union drbd_state ns;
int r;
- if (mdev->state.conn >= C_SYNC_SOURCE) {
+ if (mdev->state.conn >= C_SYNC_SOURCE && mdev->state.conn < C_AHEAD) {
dev_err(DEV, "Resync already running!\n");
return;
}
- /* In case a previous resync run was aborted by an IO error/detach on the peer. */
- drbd_rs_cancel_all(mdev);
+ if (mdev->state.conn < C_AHEAD) {
+ /* In case a previous resync run was aborted by an IO error/detach on the peer. */
+ drbd_rs_cancel_all(mdev);
+ /* This should be done when we abort the resync. We definitely do not
+ want to have this for connections going back and forth between
+ Ahead/Behind and SyncSource/SyncTarget */
+ }
if (side == C_SYNC_TARGET) {
/* Since application IO was locked out during C_WF_BITMAP_T and
@@ -1463,6 +1483,20 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
return;
}
+ } else /* C_SYNC_SOURCE */ {
+ r = drbd_khelper(mdev, "before-resync-source");
+ r = (r >> 8) & 0xff;
+ if (r > 0) {
+ if (r == 3) {
+ dev_info(DEV, "before-resync-source handler returned %d, "
+ "ignoring. Old userland tools?", r);
+ } else {
+ dev_info(DEV, "before-resync-source handler returned %d, "
+ "dropping connection.\n", r);
+ drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
+ return;
+ }
+ }
}
drbd_state_lock(mdev);
@@ -1472,18 +1506,6 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
return;
}
- if (side == C_SYNC_TARGET) {
- mdev->bm_resync_fo = 0;
- } else /* side == C_SYNC_SOURCE */ {
- u64 uuid;
-
- get_random_bytes(&uuid, sizeof(u64));
- drbd_uuid_set(mdev, UI_BITMAP, uuid);
- drbd_send_sync_uuid(mdev, uuid);
-
- D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
- }
-
write_lock_irq(&global_state_lock);
ns = mdev->state;
@@ -1521,13 +1543,24 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
_drbd_pause_after(mdev);
}
write_unlock_irq(&global_state_lock);
- put_ldev(mdev);
if (r == SS_SUCCESS) {
dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
drbd_conn_str(ns.conn),
(unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10),
(unsigned long) mdev->rs_total);
+ if (side == C_SYNC_TARGET)
+ mdev->bm_resync_fo = 0;
+
+ /* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid
+ * with w_send_oos, or the sync target will get confused as to
+ * how much bits to resync. We cannot do that always, because for an
+ * empty resync and protocol < 95, we need to do it here, as we call
+ * drbd_resync_finished from here in that case.
+ * We drbd_gen_and_send_sync_uuid here for protocol < 96,
+ * and from after_state_ch otherwise. */
+ if (side == C_SYNC_SOURCE && mdev->agreed_pro_version < 96)
+ drbd_gen_and_send_sync_uuid(mdev);
if (mdev->agreed_pro_version < 95 && mdev->rs_total == 0) {
/* This still has a race (about when exactly the peers
@@ -1547,13 +1580,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
drbd_resync_finished(mdev);
}
- atomic_set(&mdev->rs_sect_in, 0);
- atomic_set(&mdev->rs_sect_ev, 0);
- mdev->rs_in_flight = 0;
- mdev->rs_planed = 0;
- spin_lock(&mdev->peer_seq_lock);
- fifo_set(&mdev->rs_plan_s, 0);
- spin_unlock(&mdev->peer_seq_lock);
+ drbd_rs_controller_reset(mdev);
/* ns.conn may already be != mdev->state.conn,
* we may have been paused in between, or become paused until
* the timer triggers.
@@ -1563,6 +1590,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
drbd_md_sync(mdev);
}
+ put_ldev(mdev);
drbd_state_unlock(mdev);
}
diff --git a/drivers/block/drbd/drbd_wrappers.h b/drivers/block/drbd/drbd_wrappers.h
index 53586fa5ae1b..151f1a37478f 100644
--- a/drivers/block/drbd/drbd_wrappers.h
+++ b/drivers/block/drbd/drbd_wrappers.h
@@ -39,7 +39,7 @@ static inline void drbd_generic_make_request(struct drbd_conf *mdev,
return;
}
- if (FAULT_ACTIVE(mdev, fault_type))
+ if (drbd_insert_fault(mdev, fault_type))
bio_endio(bio, -EIO);
else
generic_make_request(bio);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 0f17ad8585d7..b03771d4787c 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -28,6 +28,7 @@
#include <linux/cpu.h>
#include <linux/completion.h>
#include <linux/mutex.h>
+#include <linux/syscore_ops.h>
#include <trace/events/power.h>
@@ -1340,35 +1341,31 @@ out:
}
EXPORT_SYMBOL(cpufreq_get);
+static struct sysdev_driver cpufreq_sysdev_driver = {
+ .add = cpufreq_add_dev,
+ .remove = cpufreq_remove_dev,
+};
+
/**
- * cpufreq_suspend - let the low level driver prepare for suspend
+ * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
+ *
+ * This function is only executed for the boot processor. The other CPUs
+ * have been put offline by means of CPU hotplug.
*/
-
-static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
+static int cpufreq_bp_suspend(void)
{
int ret = 0;
- int cpu = sysdev->id;
+ int cpu = smp_processor_id();
struct cpufreq_policy *cpu_policy;
dprintk("suspending cpu %u\n", cpu);
- if (!cpu_online(cpu))
- return 0;
-
- /* we may be lax here as interrupts are off. Nonetheless
- * we need to grab the correct cpu policy, as to check
- * whether we really run on this CPU.
- */
-
+ /* If there's no policy for the boot CPU, we have nothing to do. */
cpu_policy = cpufreq_cpu_get(cpu);
if (!cpu_policy)
- return -EINVAL;
-
- /* only handle each CPU group once */
- if (unlikely(cpu_policy->cpu != cpu))
- goto out;
+ return 0;
if (cpufreq_driver->suspend) {
ret = cpufreq_driver->suspend(cpu_policy);
@@ -1377,13 +1374,12 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
"step on CPU %u\n", cpu_policy->cpu);
}
-out:
cpufreq_cpu_put(cpu_policy);
return ret;
}
/**
- * cpufreq_resume - restore proper CPU frequency handling after resume
+ * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
*
* 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
* 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
@@ -1391,31 +1387,23 @@ out:
* what we believe it to be. This is a bit later than when it
* should be, but nonethteless it's better than calling
* cpufreq_driver->get() here which might re-enable interrupts...
+ *
+ * This function is only executed for the boot CPU. The other CPUs have not
+ * been turned on yet.
*/
-static int cpufreq_resume(struct sys_device *sysdev)
+static void cpufreq_bp_resume(void)
{
int ret = 0;
- int cpu = sysdev->id;
+ int cpu = smp_processor_id();
struct cpufreq_policy *cpu_policy;
dprintk("resuming cpu %u\n", cpu);
- if (!cpu_online(cpu))
- return 0;
-
- /* we may be lax here as interrupts are off. Nonetheless
- * we need to grab the correct cpu policy, as to check
- * whether we really run on this CPU.
- */
-
+ /* If there's no policy for the boot CPU, we have nothing to do. */
cpu_policy = cpufreq_cpu_get(cpu);
if (!cpu_policy)
- return -EINVAL;
-
- /* only handle each CPU group once */
- if (unlikely(cpu_policy->cpu != cpu))
- goto fail;
+ return;
if (cpufreq_driver->resume) {
ret = cpufreq_driver->resume(cpu_policy);
@@ -1430,14 +1418,11 @@ static int cpufreq_resume(struct sys_device *sysdev)
fail:
cpufreq_cpu_put(cpu_policy);
- return ret;
}
-static struct sysdev_driver cpufreq_sysdev_driver = {
- .add = cpufreq_add_dev,
- .remove = cpufreq_remove_dev,
- .suspend = cpufreq_suspend,
- .resume = cpufreq_resume,
+static struct syscore_ops cpufreq_syscore_ops = {
+ .suspend = cpufreq_bp_suspend,
+ .resume = cpufreq_bp_resume,
};
@@ -2002,6 +1987,7 @@ static int __init cpufreq_core_init(void)
cpufreq_global_kobject = kobject_create_and_add("cpufreq",
&cpu_sysdev_class.kset.kobj);
BUG_ON(!cpufreq_global_kobject);
+ register_syscore_ops(&cpufreq_syscore_ops);
return 0;
}
diff --git a/drivers/gpio/adp5588-gpio.c b/drivers/gpio/adp5588-gpio.c
index 33fc685cb385..3525ad918771 100644
--- a/drivers/gpio/adp5588-gpio.c
+++ b/drivers/gpio/adp5588-gpio.c
@@ -289,10 +289,10 @@ static int adp5588_irq_setup(struct adp5588_gpio *dev)
for (gpio = 0; gpio < dev->gpio_chip.ngpio; gpio++) {
int irq = gpio + dev->irq_base;
- set_irq_chip_data(irq, dev);
- set_irq_chip_and_handler(irq, &adp5588_irq_chip,
+ irq_set_chip_data(irq, dev);
+ irq_set_chip_and_handler(irq, &adp5588_irq_chip,
handle_level_irq);
- set_irq_nested_thread(irq, 1);
+ irq_set_nested_thread(irq, 1);
#ifdef CONFIG_ARM
/*
* ARM needs us to explicitly flag the IRQ as VALID,
@@ -300,7 +300,7 @@ static int adp5588_irq_setup(struct adp5588_gpio *dev)
*/
set_irq_flags(irq, IRQF_VALID);
#else
- set_irq_noprobe(irq);
+ irq_set_noprobe(irq);
#endif
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 649550e2cae9..36a2974815b7 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1656,51 +1656,6 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
chip->get
? (chip->get(chip, i) ? "hi" : "lo")
: "? ");
-
- if (!is_out) {
- int irq = gpio_to_irq(gpio);
- struct irq_desc *desc = irq_to_desc(irq);
-
- /* This races with request_irq(), set_irq_type(),
- * and set_irq_wake() ... but those are "rare".
- *
- * More significantly, trigger type flags aren't
- * currently maintained by genirq.
- */
- if (irq >= 0 && desc->action) {
- char *trigger;
-
- switch (desc->status & IRQ_TYPE_SENSE_MASK) {
- case IRQ_TYPE_NONE:
- trigger = "(default)";
- break;
- case IRQ_TYPE_EDGE_FALLING:
- trigger = "edge-falling";
- break;
- case IRQ_TYPE_EDGE_RISING:
- trigger = "edge-rising";
- break;
- case IRQ_TYPE_EDGE_BOTH:
- trigger = "edge-both";
- break;
- case IRQ_TYPE_LEVEL_HIGH:
- trigger = "level-high";
- break;
- case IRQ_TYPE_LEVEL_LOW:
- trigger = "level-low";
- break;
- default:
- trigger = "?trigger?";
- break;
- }
-
- seq_printf(s, " irq-%d %s%s",
- irq, trigger,
- (desc->status & IRQ_WAKEUP)
- ? " wakeup" : "");
- }
- }
-
seq_printf(s, "\n");
}
}
diff --git a/drivers/gpio/max732x.c b/drivers/gpio/max732x.c
index 9e1d01f0071a..ad6951edc16c 100644
--- a/drivers/gpio/max732x.c
+++ b/drivers/gpio/max732x.c
@@ -470,14 +470,14 @@ static int max732x_irq_setup(struct max732x_chip *chip,
if (!(chip->dir_input & (1 << lvl)))
continue;
- set_irq_chip_data(irq, chip);
- set_irq_chip_and_handler(irq, &max732x_irq_chip,
+ irq_set_chip_data(irq, chip);
+ irq_set_chip_and_handler(irq, &max732x_irq_chip,
handle_edge_irq);
- set_irq_nested_thread(irq, 1);
+ irq_set_nested_thread(irq, 1);
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID);
#else
- set_irq_noprobe(irq);
+ irq_set_noprobe(irq);
#endif
}
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index 2fc25dec7cf5..583e92592073 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -395,13 +395,13 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) {
int irq = lvl + chip->irq_base;
- set_irq_chip_data(irq, chip);
- set_irq_chip_and_handler(irq, &pca953x_irq_chip,
+ irq_set_chip_data(irq, chip);
+ irq_set_chip_and_handler(irq, &pca953x_irq_chip,
handle_edge_irq);
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID);
#else
- set_irq_noprobe(irq);
+ irq_set_noprobe(irq);
#endif
}
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index 838ddbdf90cc..6fcb28cdd862 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -210,7 +210,7 @@ static struct irq_chip pl061_irqchip = {
static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
{
- struct list_head *chip_list = get_irq_data(irq);
+ struct list_head *chip_list = irq_get_handler_data(irq);
struct list_head *ptr;
struct pl061_gpio *chip;
@@ -294,7 +294,7 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
ret = -ENODEV;
goto iounmap;
}
- set_irq_chained_handler(irq, pl061_irq_handler);
+ irq_set_chained_handler(irq, pl061_irq_handler);
if (!test_and_set_bit(irq, init_irq)) { /* list initialized? */
chip_list = kmalloc(sizeof(*chip_list), GFP_KERNEL);
if (chip_list == NULL) {
@@ -303,9 +303,9 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
goto iounmap;
}
INIT_LIST_HEAD(chip_list);
- set_irq_data(irq, chip_list);
+ irq_set_handler_data(irq, chip_list);
} else
- chip_list = get_irq_data(irq);
+ chip_list = irq_get_handler_data(irq);
list_add(&chip->list, chip_list);
for (i = 0; i < PL061_GPIO_NR; i++) {
@@ -315,10 +315,10 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
else
pl061_direction_input(&chip->gc, i);
- set_irq_chip(i+chip->irq_base, &pl061_irqchip);
- set_irq_handler(i+chip->irq_base, handle_simple_irq);
+ irq_set_chip_and_handler(i + chip->irq_base, &pl061_irqchip,
+ handle_simple_irq);
set_irq_flags(i+chip->irq_base, IRQF_VALID);
- set_irq_chip_data(i+chip->irq_base, chip);
+ irq_set_chip_data(i + chip->irq_base, chip);
}
return 0;
diff --git a/drivers/gpio/stmpe-gpio.c b/drivers/gpio/stmpe-gpio.c
index eb2901f8ab5e..4c980b573328 100644
--- a/drivers/gpio/stmpe-gpio.c
+++ b/drivers/gpio/stmpe-gpio.c
@@ -254,14 +254,14 @@ static int __devinit stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio)
int irq;
for (irq = base; irq < base + stmpe_gpio->chip.ngpio; irq++) {
- set_irq_chip_data(irq, stmpe_gpio);
- set_irq_chip_and_handler(irq, &stmpe_gpio_irq_chip,
+ irq_set_chip_data(irq, stmpe_gpio);
+ irq_set_chip_and_handler(irq, &stmpe_gpio_irq_chip,
handle_simple_irq);
- set_irq_nested_thread(irq, 1);
+ irq_set_nested_thread(irq, 1);
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID);
#else
- set_irq_noprobe(irq);
+ irq_set_noprobe(irq);
#endif
}
@@ -277,8 +277,8 @@ static void stmpe_gpio_irq_remove(struct stmpe_gpio *stmpe_gpio)
#ifdef CONFIG_ARM
set_irq_flags(irq, 0);
#endif
- set_irq_chip_and_handler(irq, NULL, NULL);
- set_irq_chip_data(irq, NULL);
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
}
}
diff --git a/drivers/gpio/sx150x.c b/drivers/gpio/sx150x.c
index d2f874c3d3d5..a4f73534394e 100644
--- a/drivers/gpio/sx150x.c
+++ b/drivers/gpio/sx150x.c
@@ -551,12 +551,12 @@ static int sx150x_install_irq_chip(struct sx150x_chip *chip,
for (n = 0; n < chip->dev_cfg->ngpios; ++n) {
irq = irq_base + n;
- set_irq_chip_and_handler(irq, &chip->irq_chip, handle_edge_irq);
- set_irq_nested_thread(irq, 1);
+ irq_set_chip_and_handler(irq, &chip->irq_chip, handle_edge_irq);
+ irq_set_nested_thread(irq, 1);
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID);
#else
- set_irq_noprobe(irq);
+ irq_set_noprobe(irq);
#endif
}
@@ -583,8 +583,7 @@ static void sx150x_remove_irq_chip(struct sx150x_chip *chip)
for (n = 0; n < chip->dev_cfg->ngpios; ++n) {
irq = chip->irq_base + n;
- set_irq_handler(irq, NULL);
- set_irq_chip(irq, NULL);
+ irq_set_chip_and_handler(irq, NULL, NULL);
}
}
diff --git a/drivers/gpio/tc3589x-gpio.c b/drivers/gpio/tc3589x-gpio.c
index 27200af1a595..2a82e8999a42 100644
--- a/drivers/gpio/tc3589x-gpio.c
+++ b/drivers/gpio/tc3589x-gpio.c
@@ -239,14 +239,14 @@ static int tc3589x_gpio_irq_init(struct tc3589x_gpio *tc3589x_gpio)
int irq;
for (irq = base; irq < base + tc3589x_gpio->chip.ngpio; irq++) {
- set_irq_chip_data(irq, tc3589x_gpio);
- set_irq_chip_and_handler(irq, &tc3589x_gpio_irq_chip,
+ irq_set_chip_data(irq, tc3589x_gpio);
+ irq_set_chip_and_handler(irq, &tc3589x_gpio_irq_chip,
handle_simple_irq);
- set_irq_nested_thread(irq, 1);
+ irq_set_nested_thread(irq, 1);
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID);
#else
- set_irq_noprobe(irq);
+ irq_set_noprobe(irq);
#endif
}
@@ -262,8 +262,8 @@ static void tc3589x_gpio_irq_remove(struct tc3589x_gpio *tc3589x_gpio)
#ifdef CONFIG_ARM
set_irq_flags(irq, 0);
#endif
- set_irq_chip_and_handler(irq, NULL, NULL);
- set_irq_chip_data(irq, NULL);
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
}
}
diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c
index ffcd815b8b8b..edbe1eae531f 100644
--- a/drivers/gpio/timbgpio.c
+++ b/drivers/gpio/timbgpio.c
@@ -196,7 +196,7 @@ out:
static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
{
- struct timbgpio *tgpio = get_irq_data(irq);
+ struct timbgpio *tgpio = irq_get_handler_data(irq);
unsigned long ipr;
int offset;
@@ -292,16 +292,16 @@ static int __devinit timbgpio_probe(struct platform_device *pdev)
return 0;
for (i = 0; i < pdata->nr_pins; i++) {
- set_irq_chip_and_handler_name(tgpio->irq_base + i,
+ irq_set_chip_and_handler_name(tgpio->irq_base + i,
&timbgpio_irqchip, handle_simple_irq, "mux");
- set_irq_chip_data(tgpio->irq_base + i, tgpio);
+ irq_set_chip_data(tgpio->irq_base + i, tgpio);
#ifdef CONFIG_ARM
set_irq_flags(tgpio->irq_base + i, IRQF_VALID | IRQF_PROBE);
#endif
}
- set_irq_data(irq, tgpio);
- set_irq_chained_handler(irq, timbgpio_irq);
+ irq_set_handler_data(irq, tgpio);
+ irq_set_chained_handler(irq, timbgpio_irq);
return 0;
@@ -327,12 +327,12 @@ static int __devexit timbgpio_remove(struct platform_device *pdev)
if (irq >= 0 && tgpio->irq_base > 0) {
int i;
for (i = 0; i < tgpio->gpio.ngpio; i++) {
- set_irq_chip(tgpio->irq_base + i, NULL);
- set_irq_chip_data(tgpio->irq_base + i, NULL);
+ irq_set_chip(tgpio->irq_base + i, NULL);
+ irq_set_chip_data(tgpio->irq_base + i, NULL);
}
- set_irq_handler(irq, NULL);
- set_irq_data(irq, NULL);
+ irq_set_handler(irq, NULL);
+ irq_set_handler_data(irq, NULL);
}
err = gpiochip_remove(&tgpio->gpio);
diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
index cffa3bd7ad3b..a365be040b36 100644
--- a/drivers/gpio/vr41xx_giu.c
+++ b/drivers/gpio/vr41xx_giu.c
@@ -238,13 +238,13 @@ void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger,
break;
}
}
- set_irq_chip_and_handler(GIU_IRQ(pin),
+ irq_set_chip_and_handler(GIU_IRQ(pin),
&giuint_low_irq_chip,
handle_edge_irq);
} else {
giu_clear(GIUINTTYPL, mask);
giu_clear(GIUINTHTSELL, mask);
- set_irq_chip_and_handler(GIU_IRQ(pin),
+ irq_set_chip_and_handler(GIU_IRQ(pin),
&giuint_low_irq_chip,
handle_level_irq);
}
@@ -273,13 +273,13 @@ void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger,
break;
}
}
- set_irq_chip_and_handler(GIU_IRQ(pin),
+ irq_set_chip_and_handler(GIU_IRQ(pin),
&giuint_high_irq_chip,
handle_edge_irq);
} else {
giu_clear(GIUINTTYPH, mask);
giu_clear(GIUINTHTSELH, mask);
- set_irq_chip_and_handler(GIU_IRQ(pin),
+ irq_set_chip_and_handler(GIU_IRQ(pin),
&giuint_high_irq_chip,
handle_level_irq);
}
@@ -539,9 +539,9 @@ static int __devinit giu_probe(struct platform_device *pdev)
chip = &giuint_high_irq_chip;
if (trigger & (1 << pin))
- set_irq_chip_and_handler(i, chip, handle_edge_irq);
+ irq_set_chip_and_handler(i, chip, handle_edge_irq);
else
- set_irq_chip_and_handler(i, chip, handle_level_irq);
+ irq_set_chip_and_handler(i, chip, handle_level_irq);
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 4c95b5fd9df3..799e1490cf24 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1073,6 +1073,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
uint32_t __user *encoder_id;
struct drm_mode_group *mode_group;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
/*
@@ -1244,6 +1247,9 @@ int drm_mode_getcrtc(struct drm_device *dev,
struct drm_mode_object *obj;
int ret = 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
@@ -1312,6 +1318,9 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
uint64_t __user *prop_values;
uint32_t __user *encoder_ptr;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
@@ -1431,6 +1440,9 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
struct drm_encoder *encoder;
int ret = 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, enc_resp->encoder_id,
DRM_MODE_OBJECT_ENCODER);
@@ -1486,6 +1498,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
int ret = 0;
int i;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, crtc_req->crtc_id,
DRM_MODE_OBJECT_CRTC);
@@ -1603,6 +1618,9 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
struct drm_crtc *crtc;
int ret = 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (!req->flags) {
DRM_ERROR("no operation set\n");
return -EINVAL;
@@ -1667,6 +1685,9 @@ int drm_mode_addfb(struct drm_device *dev,
struct drm_framebuffer *fb;
int ret = 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if ((config->min_width > r->width) || (r->width > config->max_width)) {
DRM_ERROR("mode new framebuffer width not within limits\n");
return -EINVAL;
@@ -1724,6 +1745,9 @@ int drm_mode_rmfb(struct drm_device *dev,
int ret = 0;
int found = 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
/* TODO check that we realy get a framebuffer back. */
@@ -1780,6 +1804,9 @@ int drm_mode_getfb(struct drm_device *dev,
struct drm_framebuffer *fb;
int ret = 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
if (!obj) {
@@ -1813,6 +1840,9 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
int num_clips;
int ret = 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
if (!obj) {
@@ -1996,6 +2026,9 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev,
struct drm_mode_modeinfo *umode = &mode_cmd->mode;
int ret = 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
@@ -2042,6 +2075,9 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev,
struct drm_mode_modeinfo *umode = &mode_cmd->mode;
int ret = 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
@@ -2211,6 +2247,9 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
uint64_t __user *values_ptr;
uint32_t __user *blob_length_ptr;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
if (!obj) {
@@ -2333,6 +2372,9 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
int ret = 0;
void *blob_ptr;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
if (!obj) {
@@ -2393,6 +2435,9 @@ int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
int ret = -EINVAL;
int i;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
@@ -2509,6 +2554,9 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
int size;
int ret = 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
@@ -2560,6 +2608,9 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
int size;
int ret = 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 57ce27c9a747..74e4ff578017 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -499,11 +499,12 @@ EXPORT_SYMBOL(drm_gem_vm_open);
void drm_gem_vm_close(struct vm_area_struct *vma)
{
struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_device *dev = obj->dev;
- mutex_lock(&obj->dev->struct_mutex);
+ mutex_lock(&dev->struct_mutex);
drm_vm_close_locked(vma);
drm_gem_object_unreference(obj);
- mutex_unlock(&obj->dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
}
EXPORT_SYMBOL(drm_gem_vm_close);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 7f6912a16761..904d7e9c8e47 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -280,6 +280,9 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
if (dev->driver->dumb_create)
req->value = 1;
break;
+ case DRM_CAP_VBLANK_HIGH_CRTC:
+ req->value = 1;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index a34ef97d3c81..741457bd1c46 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1125,7 +1125,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
{
union drm_wait_vblank *vblwait = data;
int ret = 0;
- unsigned int flags, seq, crtc;
+ unsigned int flags, seq, crtc, high_crtc;
if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled))
return -EINVAL;
@@ -1134,16 +1134,21 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
return -EINVAL;
if (vblwait->request.type &
- ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
+ ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
+ _DRM_VBLANK_HIGH_CRTC_MASK)) {
DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
vblwait->request.type,
- (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
+ (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
+ _DRM_VBLANK_HIGH_CRTC_MASK));
return -EINVAL;
}
flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
- crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
-
+ high_crtc = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
+ if (high_crtc)
+ crtc = high_crtc >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
+ else
+ crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
if (crtc >= dev->num_crtcs)
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 09e0327fc6ce..87c8e29465e3 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -892,7 +892,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
- GEN6_CAGF_SHIFT) * 100);
+ GEN6_CAGF_SHIFT) * 50);
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
GEN6_CURICONT_MASK);
seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
@@ -908,15 +908,15 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
max_freq = (rp_state_cap & 0xff0000) >> 16;
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
- max_freq * 100);
+ max_freq * 50);
max_freq = (rp_state_cap & 0xff00) >> 8;
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
- max_freq * 100);
+ max_freq * 50);
max_freq = rp_state_cap & 0xff;
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
- max_freq * 100);
+ max_freq * 50);
__gen6_gt_force_wake_put(dev_priv);
} else {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c4c2855d002d..7ce3f353af33 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -224,7 +224,7 @@ i915_gem_dumb_create(struct drm_file *file,
struct drm_mode_create_dumb *args)
{
/* have to work out size/pitch and return them */
- args->pitch = ALIGN(args->width & ((args->bpp + 1) / 8), 64);
+ args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
args->size = args->pitch * args->height;
return i915_gem_create(file, dev,
args->size, &args->handle);
@@ -1356,9 +1356,10 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
if (!obj->fault_mappable)
return;
- unmap_mapping_range(obj->base.dev->dev_mapping,
- (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
- obj->base.size, 1);
+ if (obj->base.dev->dev_mapping)
+ unmap_mapping_range(obj->base.dev->dev_mapping,
+ (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
+ obj->base.size, 1);
obj->fault_mappable = false;
}
@@ -1796,8 +1797,10 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
return;
spin_lock(&file_priv->mm.lock);
- list_del(&request->client_list);
- request->file_priv = NULL;
+ if (request->file_priv) {
+ list_del(&request->client_list);
+ request->file_priv = NULL;
+ }
spin_unlock(&file_priv->mm.lock);
}
@@ -2217,13 +2220,18 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
{
int ret;
+ if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
+ return 0;
+
trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
ret = ring->flush(ring, invalidate_domains, flush_domains);
if (ret)
return ret;
- i915_gem_process_flushing_list(ring, flush_domains);
+ if (flush_domains & I915_GEM_GPU_DOMAINS)
+ i915_gem_process_flushing_list(ring, flush_domains);
+
return 0;
}
@@ -2579,8 +2587,23 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
reg = &dev_priv->fence_regs[obj->fence_reg];
list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
- if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
- pipelined = NULL;
+ if (obj->tiling_changed) {
+ ret = i915_gem_object_flush_fence(obj, pipelined);
+ if (ret)
+ return ret;
+
+ if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
+ pipelined = NULL;
+
+ if (pipelined) {
+ reg->setup_seqno =
+ i915_gem_next_request_seqno(pipelined);
+ obj->last_fenced_seqno = reg->setup_seqno;
+ obj->last_fenced_ring = pipelined;
+ }
+
+ goto update;
+ }
if (!pipelined) {
if (reg->setup_seqno) {
@@ -2599,31 +2622,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
ret = i915_gem_object_flush_fence(obj, pipelined);
if (ret)
return ret;
- } else if (obj->tiling_changed) {
- if (obj->fenced_gpu_access) {
- if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->ring,
- 0, obj->base.write_domain);
- if (ret)
- return ret;
- }
-
- obj->fenced_gpu_access = false;
- }
- }
-
- if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
- pipelined = NULL;
- BUG_ON(!pipelined && reg->setup_seqno);
-
- if (obj->tiling_changed) {
- if (pipelined) {
- reg->setup_seqno =
- i915_gem_next_request_seqno(pipelined);
- obj->last_fenced_seqno = reg->setup_seqno;
- obj->last_fenced_ring = pipelined;
- }
- goto update;
}
return 0;
@@ -3606,6 +3604,8 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
return;
}
+ trace_i915_gem_object_destroy(obj);
+
if (obj->base.map_list.map)
i915_gem_free_mmap_offset(obj);
@@ -3615,8 +3615,6 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
kfree(obj->page_cpu_valid);
kfree(obj->bit_17);
kfree(obj);
-
- trace_i915_gem_object_destroy(obj);
}
void i915_gem_free_object(struct drm_gem_object *gem_obj)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 7ff7f933ddf1..20a4cc5b818f 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -367,6 +367,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
uint32_t __iomem *reloc_entry;
void __iomem *reloc_page;
+ /* We can't wait for rendering with pagefaults disabled */
+ if (obj->active && in_atomic())
+ return -EFAULT;
+
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
if (ret)
return ret;
@@ -440,15 +444,24 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
- int ret;
-
+ int ret = 0;
+
+ /* This is the fast path and we cannot handle a pagefault whilst
+ * holding the struct mutex lest the user pass in the relocations
+ * contained within a mmaped bo. For in such a case we, the page
+ * fault handler would call i915_gem_fault() and we would try to
+ * acquire the struct mutex again. Obviously this is bad and so
+ * lockdep complains vehemently.
+ */
+ pagefault_disable();
list_for_each_entry(obj, objects, exec_list) {
ret = i915_gem_execbuffer_relocate_object(obj, eb);
if (ret)
- return ret;
+ break;
}
+ pagefault_enable();
- return 0;
+ return ret;
}
static int
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3106c0dc8389..432fc04c6bff 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1516,9 +1516,10 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
reg = PIPECONF(pipe);
val = I915_READ(reg);
- val |= PIPECONF_ENABLE;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ if (val & PIPECONF_ENABLE)
+ return;
+
+ I915_WRITE(reg, val | PIPECONF_ENABLE);
intel_wait_for_vblank(dev_priv->dev, pipe);
}
@@ -1552,9 +1553,10 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
reg = PIPECONF(pipe);
val = I915_READ(reg);
- val &= ~PIPECONF_ENABLE;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ if ((val & PIPECONF_ENABLE) == 0)
+ return;
+
+ I915_WRITE(reg, val & ~PIPECONF_ENABLE);
intel_wait_for_pipe_off(dev_priv->dev, pipe);
}
@@ -1577,9 +1579,10 @@ static void intel_enable_plane(struct drm_i915_private *dev_priv,
reg = DSPCNTR(plane);
val = I915_READ(reg);
- val |= DISPLAY_PLANE_ENABLE;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ if (val & DISPLAY_PLANE_ENABLE)
+ return;
+
+ I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
intel_wait_for_vblank(dev_priv->dev, pipe);
}
@@ -1610,9 +1613,10 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv,
reg = DSPCNTR(plane);
val = I915_READ(reg);
- val &= ~DISPLAY_PLANE_ENABLE;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ if ((val & DISPLAY_PLANE_ENABLE) == 0)
+ return;
+
+ I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
intel_flush_display_plane(dev_priv, plane);
intel_wait_for_vblank(dev_priv->dev, pipe);
}
@@ -1769,7 +1773,6 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
return;
I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
- POSTING_READ(DPFC_CONTROL);
intel_wait_for_vblank(dev, intel_crtc->pipe);
}
@@ -1861,7 +1864,6 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
return;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
- POSTING_READ(ILK_DPFC_CONTROL);
intel_wait_for_vblank(dev, intel_crtc->pipe);
}
@@ -3883,10 +3885,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
display, cursor);
}
-static inline bool single_plane_enabled(unsigned int mask)
-{
- return mask && (mask & -mask) == 0;
-}
+#define single_plane_enabled(mask) is_power_of_2(mask)
static void g4x_update_wm(struct drm_device *dev)
{
@@ -5777,7 +5776,6 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
- POSTING_READ(dpll_reg);
intel_wait_for_vblank(dev, pipe);
dpll = I915_READ(dpll_reg);
@@ -5821,7 +5819,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
dpll |= DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
- dpll = I915_READ(dpll_reg);
intel_wait_for_vblank(dev, pipe);
dpll = I915_READ(dpll_reg);
if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
@@ -6933,7 +6930,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
if (pcu_mbox & (1<<31)) { /* OC supported */
max_freq = pcu_mbox & 0xff;
- DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 100);
+ DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
}
/* In units of 100MHz */
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d29e33f815d7..0daefca5cbb8 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1957,9 +1957,9 @@ intel_dp_init(struct drm_device *dev, int output_reg)
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
} else {
/* if this fails, presume the device is a ghost */
- DRM_ERROR("failed to retrieve link info\n");
- intel_dp_destroy(&intel_connector->base);
+ DRM_INFO("failed to retrieve link info, disabling eDP\n");
intel_dp_encoder_destroy(&intel_dp->base.base);
+ intel_dp_destroy(&intel_connector->base);
return;
}
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 789c47801ba8..e9e6f71418a4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -65,62 +65,60 @@ render_ring_flush(struct intel_ring_buffer *ring,
u32 cmd;
int ret;
- if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
+ /*
+ * read/write caches:
+ *
+ * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
+ * also flushed at 2d versus 3d pipeline switches.
+ *
+ * read-only caches:
+ *
+ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+ * MI_READ_FLUSH is set, and is always flushed on 965.
+ *
+ * I915_GEM_DOMAIN_COMMAND may not exist?
+ *
+ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+ * invalidated when MI_EXE_FLUSH is set.
+ *
+ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+ * invalidated with every MI_FLUSH.
+ *
+ * TLBs:
+ *
+ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+ * are flushed at any MI_FLUSH.
+ */
+
+ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+ if ((invalidate_domains|flush_domains) &
+ I915_GEM_DOMAIN_RENDER)
+ cmd &= ~MI_NO_WRITE_FLUSH;
+ if (INTEL_INFO(dev)->gen < 4) {
/*
- * read/write caches:
- *
- * I915_GEM_DOMAIN_RENDER is always invalidated, but is
- * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
- * also flushed at 2d versus 3d pipeline switches.
- *
- * read-only caches:
- *
- * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
- * MI_READ_FLUSH is set, and is always flushed on 965.
- *
- * I915_GEM_DOMAIN_COMMAND may not exist?
- *
- * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
- * invalidated when MI_EXE_FLUSH is set.
- *
- * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
- * invalidated with every MI_FLUSH.
- *
- * TLBs:
- *
- * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
- * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
- * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
- * are flushed at any MI_FLUSH.
+ * On the 965, the sampler cache always gets flushed
+ * and this bit is reserved.
*/
+ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+ cmd |= MI_READ_FLUSH;
+ }
+ if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+ cmd |= MI_EXE_FLUSH;
- cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
- if ((invalidate_domains|flush_domains) &
- I915_GEM_DOMAIN_RENDER)
- cmd &= ~MI_NO_WRITE_FLUSH;
- if (INTEL_INFO(dev)->gen < 4) {
- /*
- * On the 965, the sampler cache always gets flushed
- * and this bit is reserved.
- */
- if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
- cmd |= MI_READ_FLUSH;
- }
- if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
- cmd |= MI_EXE_FLUSH;
-
- if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
- (IS_G4X(dev) || IS_GEN5(dev)))
- cmd |= MI_INVALIDATE_ISP;
+ if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
+ (IS_G4X(dev) || IS_GEN5(dev)))
+ cmd |= MI_INVALIDATE_ISP;
- ret = intel_ring_begin(ring, 2);
- if (ret)
- return ret;
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
- }
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
return 0;
}
@@ -568,9 +566,6 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
{
int ret;
- if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
- return 0;
-
ret = intel_ring_begin(ring, 2);
if (ret)
return ret;
@@ -1056,9 +1051,6 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
uint32_t cmd;
int ret;
- if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0)
- return 0;
-
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
@@ -1230,9 +1222,6 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
uint32_t cmd;
int ret;
- if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0)
- return 0;
-
ret = blt_ring_begin(ring, 4);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 3cd3234ba0af..10e41af6b026 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -957,7 +957,11 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
/* adjust pixel clock as needed */
adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
- if (ASIC_IS_AVIVO(rdev))
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ /* TV seems to prefer the legacy algo on some boards */
+ radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
+ &ref_div, &post_div);
+ else if (ASIC_IS_AVIVO(rdev))
radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div);
else
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index cf7c8d5b4ec2..cf602e2d0718 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -448,7 +448,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
{
- int edid_info;
+ int edid_info, size;
struct edid *edid;
unsigned char *raw;
edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
@@ -456,11 +456,12 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
return false;
raw = rdev->bios + edid_info;
- edid = kmalloc(EDID_LENGTH * (raw[0x7e] + 1), GFP_KERNEL);
+ size = EDID_LENGTH * (raw[0x7e] + 1);
+ edid = kmalloc(size, GFP_KERNEL);
if (edid == NULL)
return false;
- memcpy((unsigned char *)edid, raw, EDID_LENGTH * (raw[0x7e] + 1));
+ memcpy((unsigned char *)edid, raw, size);
if (!drm_edid_is_valid(edid)) {
kfree(edid);
@@ -468,6 +469,7 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
}
rdev->mode_info.bios_hardcoded_edid = edid;
+ rdev->mode_info.bios_hardcoded_edid_size = size;
return true;
}
@@ -475,8 +477,17 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
struct edid *
radeon_bios_get_hardcoded_edid(struct radeon_device *rdev)
{
- if (rdev->mode_info.bios_hardcoded_edid)
- return rdev->mode_info.bios_hardcoded_edid;
+ struct edid *edid;
+
+ if (rdev->mode_info.bios_hardcoded_edid) {
+ edid = kmalloc(rdev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL);
+ if (edid) {
+ memcpy((unsigned char *)edid,
+ (unsigned char *)rdev->mode_info.bios_hardcoded_edid,
+ rdev->mode_info.bios_hardcoded_edid_size);
+ return edid;
+ }
+ }
return NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 28c7961cd19b..2ef6d5135064 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -633,6 +633,8 @@ static int radeon_vga_mode_valid(struct drm_connector *connector,
static enum drm_connector_status
radeon_vga_detect(struct drm_connector *connector, bool force)
{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
struct drm_encoder_helper_funcs *encoder_funcs;
@@ -683,6 +685,17 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
if (ret == connector_status_connected)
ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true);
+
+ /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the
+ * vbios to deal with KVMs. If we have one and are not able to detect a monitor
+ * by other means, assume the CRT is connected and use that EDID.
+ */
+ if ((!rdev->is_atom_bios) &&
+ (ret == connector_status_disconnected) &&
+ rdev->mode_info.bios_hardcoded_edid_size) {
+ ret = connector_status_connected;
+ }
+
radeon_connector_update_scratch_regs(connector, ret);
return ret;
}
@@ -794,6 +807,8 @@ static int radeon_dvi_get_modes(struct drm_connector *connector)
static enum drm_connector_status
radeon_dvi_detect(struct drm_connector *connector, bool force)
{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = NULL;
struct drm_encoder_helper_funcs *encoder_funcs;
@@ -833,8 +848,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
* you don't really know what's connected to which port as both are digital.
*/
if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
- struct drm_device *dev = connector->dev;
- struct radeon_device *rdev = dev->dev_private;
struct drm_connector *list_connector;
struct radeon_connector *list_radeon_connector;
list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
@@ -899,6 +912,19 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true);
}
+ /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the
+ * vbios to deal with KVMs. If we have one and are not able to detect a monitor
+ * by other means, assume the DFP is connected and use that EDID. In most
+ * cases the DVI port is actually a virtual KVM port connected to the service
+ * processor.
+ */
+ if ((!rdev->is_atom_bios) &&
+ (ret == connector_status_disconnected) &&
+ rdev->mode_info.bios_hardcoded_edid_size) {
+ radeon_connector->use_digital = true;
+ ret = connector_status_connected;
+ }
+
out:
/* updated in get modes as well since we need to know if it's analog or digital */
radeon_connector_update_scratch_regs(connector, ret);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index e4582814bb78..9c57538231d5 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -239,6 +239,7 @@ struct radeon_mode_info {
struct drm_property *underscan_vborder_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
+ int bios_hardcoded_edid_size;
/* pointer to fbdev info structure */
struct radeon_fbdev *rfbdev;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 2aed03bde4b2..08de669e025a 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -365,12 +365,14 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
else if (strncmp("high", buf, strlen("high")) == 0)
rdev->pm.profile = PM_PROFILE_HIGH;
else {
- DRM_ERROR("invalid power profile!\n");
+ count = -EINVAL;
goto fail;
}
radeon_pm_update_profile(rdev);
radeon_pm_set_clocks(rdev);
- }
+ } else
+ count = -EINVAL;
+
fail:
mutex_unlock(&rdev->pm.mutex);
@@ -413,7 +415,7 @@ static ssize_t radeon_set_pm_method(struct device *dev,
mutex_unlock(&rdev->pm.mutex);
cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
} else {
- DRM_ERROR("invalid power method!\n");
+ count = -EINVAL;
goto fail;
}
radeon_pm_compute_clocks(rdev);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 81131eda5544..060ef6327876 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -315,11 +315,22 @@ config SENSORS_F71805F
will be called f71805f.
config SENSORS_F71882FG
- tristate "Fintek F71858FG, F71862FG, F71882FG, F71889FG and F8000"
+ tristate "Fintek F71882FG and compatibles"
help
If you say yes here you get support for hardware monitoring
- features of the Fintek F71858FG, F71862FG/71863FG, F71882FG/F71883FG,
- F71889FG and F8000 Super-I/O chips.
+ features of many Fintek Super-I/O (LPC) chips. The currently
+ supported chips are:
+ F71808E
+ F71858FG
+ F71862FG
+ F71863FG
+ F71869F/E
+ F71882FG
+ F71883FG
+ F71889FG/ED/A
+ F8000
+ F81801U
+ F81865F
This driver can also be built as a module. If so, the module
will be called f71882fg.
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index a4d430ee7e20..ca07a32447c2 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -54,7 +54,9 @@
#define SIO_F71882_ID 0x0541 /* Chipset ID */
#define SIO_F71889_ID 0x0723 /* Chipset ID */
#define SIO_F71889E_ID 0x0909 /* Chipset ID */
+#define SIO_F71889A_ID 0x1005 /* Chipset ID */
#define SIO_F8000_ID 0x0581 /* Chipset ID */
+#define SIO_F81865_ID 0x0704 /* Chipset ID */
#define REGION_LENGTH 8
#define ADDR_REG_OFFSET 5
@@ -106,7 +108,7 @@ module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
enum chips { f71808e, f71858fg, f71862fg, f71869, f71882fg, f71889fg,
- f71889ed, f8000 };
+ f71889ed, f71889a, f8000, f81865f };
static const char *f71882fg_names[] = {
"f71808e",
@@ -114,42 +116,76 @@ static const char *f71882fg_names[] = {
"f71862fg",
"f71869", /* Both f71869f and f71869e, reg. compatible and same id */
"f71882fg",
- "f71889fg",
+ "f71889fg", /* f81801u too, same id */
"f71889ed",
+ "f71889a",
"f8000",
+ "f81865f",
};
-static const char f71882fg_has_in[8][F71882FG_MAX_INS] = {
- { 1, 1, 1, 1, 1, 1, 0, 1, 1 }, /* f71808e */
- { 1, 1, 1, 0, 0, 0, 0, 0, 0 }, /* f71858fg */
- { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71862fg */
- { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71869 */
- { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71882fg */
- { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71889fg */
- { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71889ed */
- { 1, 1, 1, 0, 0, 0, 0, 0, 0 }, /* f8000 */
+static const char f71882fg_has_in[][F71882FG_MAX_INS] = {
+ [f71808e] = { 1, 1, 1, 1, 1, 1, 0, 1, 1 },
+ [f71858fg] = { 1, 1, 1, 0, 0, 0, 0, 0, 0 },
+ [f71862fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
+ [f71869] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
+ [f71882fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
+ [f71889fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
+ [f71889ed] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
+ [f71889a] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
+ [f8000] = { 1, 1, 1, 0, 0, 0, 0, 0, 0 },
+ [f81865f] = { 1, 1, 1, 1, 1, 1, 1, 0, 0 },
};
-static const char f71882fg_has_in1_alarm[8] = {
- 0, /* f71808e */
- 0, /* f71858fg */
- 0, /* f71862fg */
- 0, /* f71869 */
- 1, /* f71882fg */
- 1, /* f71889fg */
- 1, /* f71889ed */
- 0, /* f8000 */
+static const char f71882fg_has_in1_alarm[] = {
+ [f71808e] = 0,
+ [f71858fg] = 0,
+ [f71862fg] = 0,
+ [f71869] = 0,
+ [f71882fg] = 1,
+ [f71889fg] = 1,
+ [f71889ed] = 1,
+ [f71889a] = 1,
+ [f8000] = 0,
+ [f81865f] = 1,
};
-static const char f71882fg_has_beep[8] = {
- 0, /* f71808e */
- 0, /* f71858fg */
- 1, /* f71862fg */
- 1, /* f71869 */
- 1, /* f71882fg */
- 1, /* f71889fg */
- 1, /* f71889ed */
- 0, /* f8000 */
+static const char f71882fg_has_beep[] = {
+ [f71808e] = 0,
+ [f71858fg] = 0,
+ [f71862fg] = 1,
+ [f71869] = 1,
+ [f71882fg] = 1,
+ [f71889fg] = 1,
+ [f71889ed] = 1,
+ [f71889a] = 1,
+ [f8000] = 0,
+ [f81865f] = 1,
+};
+
+static const char f71882fg_nr_fans[] = {
+ [f71808e] = 3,
+ [f71858fg] = 3,
+ [f71862fg] = 3,
+ [f71869] = 3,
+ [f71882fg] = 4,
+ [f71889fg] = 3,
+ [f71889ed] = 3,
+ [f71889a] = 3,
+ [f8000] = 3,
+ [f81865f] = 2,
+};
+
+static const char f71882fg_nr_temps[] = {
+ [f71808e] = 2,
+ [f71858fg] = 3,
+ [f71862fg] = 3,
+ [f71869] = 3,
+ [f71882fg] = 3,
+ [f71889fg] = 3,
+ [f71889ed] = 3,
+ [f71889a] = 3,
+ [f8000] = 3,
+ [f81865f] = 2,
};
static struct platform_device *f71882fg_pdev;
@@ -1071,9 +1107,9 @@ static u16 f71882fg_read_temp(struct f71882fg_data *data, int nr)
static struct f71882fg_data *f71882fg_update_device(struct device *dev)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
+ int nr_fans = f71882fg_nr_fans[data->type];
+ int nr_temps = f71882fg_nr_temps[data->type];
int nr, reg, point;
- int nr_fans = (data->type == f71882fg) ? 4 : 3;
- int nr_temps = (data->type == f71808e) ? 2 : 3;
mutex_lock(&data->update_lock);
@@ -2042,8 +2078,9 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
{
struct f71882fg_data *data;
struct f71882fg_sio_data *sio_data = pdev->dev.platform_data;
- int err, i, nr_fans = (sio_data->type == f71882fg) ? 4 : 3;
- int nr_temps = (sio_data->type == f71808e) ? 2 : 3;
+ int nr_fans = f71882fg_nr_fans[sio_data->type];
+ int nr_temps = f71882fg_nr_temps[sio_data->type];
+ int err, i;
u8 start_reg, reg;
data = kzalloc(sizeof(struct f71882fg_data), GFP_KERNEL);
@@ -2138,6 +2175,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
/* Fall through to select correct fan/pwm reg bank! */
case f71889fg:
case f71889ed:
+ case f71889a:
reg = f71882fg_read8(data, F71882FG_REG_FAN_FAULT_T);
if (reg & F71882FG_FAN_NEG_TEMP_EN)
data->auto_point_temp_signed = 1;
@@ -2163,16 +2201,12 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
case f71862fg:
err = (data->pwm_enable & 0x15) != 0x15;
break;
- case f71808e:
- case f71869:
- case f71882fg:
- case f71889fg:
- case f71889ed:
- err = 0;
- break;
case f8000:
err = data->pwm_enable & 0x20;
break;
+ default:
+ err = 0;
+ break;
}
if (err) {
dev_err(&pdev->dev,
@@ -2199,6 +2233,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
case f71869:
case f71889fg:
case f71889ed:
+ case f71889a:
for (i = 0; i < nr_fans; i++) {
data->pwm_auto_point_mapping[i] =
f71882fg_read8(data,
@@ -2276,8 +2311,9 @@ exit_free:
static int f71882fg_remove(struct platform_device *pdev)
{
struct f71882fg_data *data = platform_get_drvdata(pdev);
- int i, nr_fans = (data->type == f71882fg) ? 4 : 3;
- int nr_temps = (data->type == f71808e) ? 2 : 3;
+ int nr_fans = f71882fg_nr_fans[data->type];
+ int nr_temps = f71882fg_nr_temps[data->type];
+ int i;
u8 start_reg = f71882fg_read8(data, F71882FG_REG_START);
if (data->hwmon_dev)
@@ -2406,9 +2442,15 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
case SIO_F71889E_ID:
sio_data->type = f71889ed;
break;
+ case SIO_F71889A_ID:
+ sio_data->type = f71889a;
+ break;
case SIO_F8000_ID:
sio_data->type = f8000;
break;
+ case SIO_F81865_ID:
+ sio_data->type = f81865f;
+ break;
default:
pr_info("Unsupported Fintek device: %04x\n",
(unsigned int)devid);
diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus_core.c
index 6474512f49b0..edfb92e41735 100644
--- a/drivers/hwmon/pmbus_core.c
+++ b/drivers/hwmon/pmbus_core.c
@@ -752,7 +752,7 @@ static void pmbus_add_boolean_cmp(struct pmbus_data *data,
static void pmbus_add_sensor(struct pmbus_data *data,
const char *name, const char *type, int seq,
int page, int reg, enum pmbus_sensor_classes class,
- bool update)
+ bool update, bool readonly)
{
struct pmbus_sensor *sensor;
@@ -765,7 +765,7 @@ static void pmbus_add_sensor(struct pmbus_data *data,
sensor->reg = reg;
sensor->class = class;
sensor->update = update;
- if (update)
+ if (readonly)
PMBUS_ADD_GET_ATTR(data, sensor->name, sensor,
data->num_sensors);
else
@@ -916,14 +916,14 @@ static void pmbus_find_attributes(struct i2c_client *client,
i0 = data->num_sensors;
pmbus_add_label(data, "in", in_index, "vin", 0);
- pmbus_add_sensor(data, "in", "input", in_index,
- 0, PMBUS_READ_VIN, PSC_VOLTAGE_IN, true);
+ pmbus_add_sensor(data, "in", "input", in_index, 0,
+ PMBUS_READ_VIN, PSC_VOLTAGE_IN, true, true);
if (pmbus_check_word_register(client, 0,
PMBUS_VIN_UV_WARN_LIMIT)) {
i1 = data->num_sensors;
pmbus_add_sensor(data, "in", "min", in_index,
0, PMBUS_VIN_UV_WARN_LIMIT,
- PSC_VOLTAGE_IN, false);
+ PSC_VOLTAGE_IN, false, false);
if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
pmbus_add_boolean_reg(data, "in", "min_alarm",
in_index,
@@ -937,7 +937,7 @@ static void pmbus_find_attributes(struct i2c_client *client,
i1 = data->num_sensors;
pmbus_add_sensor(data, "in", "lcrit", in_index,
0, PMBUS_VIN_UV_FAULT_LIMIT,
- PSC_VOLTAGE_IN, false);
+ PSC_VOLTAGE_IN, false, false);
if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
pmbus_add_boolean_reg(data, "in", "lcrit_alarm",
in_index,
@@ -951,7 +951,7 @@ static void pmbus_find_attributes(struct i2c_client *client,
i1 = data->num_sensors;
pmbus_add_sensor(data, "in", "max", in_index,
0, PMBUS_VIN_OV_WARN_LIMIT,
- PSC_VOLTAGE_IN, false);
+ PSC_VOLTAGE_IN, false, false);
if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
pmbus_add_boolean_reg(data, "in", "max_alarm",
in_index,
@@ -965,7 +965,7 @@ static void pmbus_find_attributes(struct i2c_client *client,
i1 = data->num_sensors;
pmbus_add_sensor(data, "in", "crit", in_index,
0, PMBUS_VIN_OV_FAULT_LIMIT,
- PSC_VOLTAGE_IN, false);
+ PSC_VOLTAGE_IN, false, false);
if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
pmbus_add_boolean_reg(data, "in", "crit_alarm",
in_index,
@@ -988,7 +988,7 @@ static void pmbus_find_attributes(struct i2c_client *client,
if (info->func[0] & PMBUS_HAVE_VCAP) {
pmbus_add_label(data, "in", in_index, "vcap", 0);
pmbus_add_sensor(data, "in", "input", in_index, 0,
- PMBUS_READ_VCAP, PSC_VOLTAGE_IN, true);
+ PMBUS_READ_VCAP, PSC_VOLTAGE_IN, true, true);
in_index++;
}
@@ -1004,13 +1004,13 @@ static void pmbus_find_attributes(struct i2c_client *client,
i0 = data->num_sensors;
pmbus_add_label(data, "in", in_index, "vout", page + 1);
pmbus_add_sensor(data, "in", "input", in_index, page,
- PMBUS_READ_VOUT, PSC_VOLTAGE_OUT, true);
+ PMBUS_READ_VOUT, PSC_VOLTAGE_OUT, true, true);
if (pmbus_check_word_register(client, page,
PMBUS_VOUT_UV_WARN_LIMIT)) {
i1 = data->num_sensors;
pmbus_add_sensor(data, "in", "min", in_index, page,
PMBUS_VOUT_UV_WARN_LIMIT,
- PSC_VOLTAGE_OUT, false);
+ PSC_VOLTAGE_OUT, false, false);
if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) {
pmbus_add_boolean_reg(data, "in", "min_alarm",
in_index,
@@ -1025,7 +1025,7 @@ static void pmbus_find_attributes(struct i2c_client *client,
i1 = data->num_sensors;
pmbus_add_sensor(data, "in", "lcrit", in_index, page,
PMBUS_VOUT_UV_FAULT_LIMIT,
- PSC_VOLTAGE_OUT, false);
+ PSC_VOLTAGE_OUT, false, false);
if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) {
pmbus_add_boolean_reg(data, "in", "lcrit_alarm",
in_index,
@@ -1040,7 +1040,7 @@ static void pmbus_find_attributes(struct i2c_client *client,
i1 = data->num_sensors;
pmbus_add_sensor(data, "in", "max", in_index, page,
PMBUS_VOUT_OV_WARN_LIMIT,
- PSC_VOLTAGE_OUT, false);
+ PSC_VOLTAGE_OUT, false, false);
if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) {
pmbus_add_boolean_reg(data, "in", "max_alarm",
in_index,
@@ -1055,7 +1055,7 @@ static void pmbus_find_attributes(struct i2c_client *client,
i1 = data->num_sensors;
pmbus_add_sensor(data, "in", "crit", in_index, page,
PMBUS_VOUT_OV_FAULT_LIMIT,
- PSC_VOLTAGE_OUT, false);
+ PSC_VOLTAGE_OUT, false, false);
if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) {
pmbus_add_boolean_reg(data, "in", "crit_alarm",
in_index,
@@ -1088,14 +1088,14 @@ static void pmbus_find_attributes(struct i2c_client *client,
if (info->func[0] & PMBUS_HAVE_IIN) {
i0 = data->num_sensors;
pmbus_add_label(data, "curr", in_index, "iin", 0);
- pmbus_add_sensor(data, "curr", "input", in_index,
- 0, PMBUS_READ_IIN, PSC_CURRENT_IN, true);
+ pmbus_add_sensor(data, "curr", "input", in_index, 0,
+ PMBUS_READ_IIN, PSC_CURRENT_IN, true, true);
if (pmbus_check_word_register(client, 0,
PMBUS_IIN_OC_WARN_LIMIT)) {
i1 = data->num_sensors;
pmbus_add_sensor(data, "curr", "max", in_index,
0, PMBUS_IIN_OC_WARN_LIMIT,
- PSC_CURRENT_IN, false);
+ PSC_CURRENT_IN, false, false);
if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
pmbus_add_boolean_reg(data, "curr", "max_alarm",
in_index,
@@ -1108,7 +1108,7 @@ static void pmbus_find_attributes(struct i2c_client *client,
i1 = data->num_sensors;
pmbus_add_sensor(data, "curr", "crit", in_index,
0, PMBUS_IIN_OC_FAULT_LIMIT,
- PSC_CURRENT_IN, false);
+ PSC_CURRENT_IN, false, false);
if (info->func[0] & PMBUS_HAVE_STATUS_INPUT)
pmbus_add_boolean_reg(data, "curr",
"crit_alarm",
@@ -1131,13 +1131,13 @@ static void pmbus_find_attributes(struct i2c_client *client,
i0 = data->num_sensors;
pmbus_add_label(data, "curr", in_index, "iout", page + 1);
pmbus_add_sensor(data, "curr", "input", in_index, page,
- PMBUS_READ_IOUT, PSC_CURRENT_OUT, true);
+ PMBUS_READ_IOUT, PSC_CURRENT_OUT, true, true);
if (pmbus_check_word_register(client, page,
PMBUS_IOUT_OC_WARN_LIMIT)) {
i1 = data->num_sensors;
pmbus_add_sensor(data, "curr", "max", in_index, page,
PMBUS_IOUT_OC_WARN_LIMIT,
- PSC_CURRENT_OUT, false);
+ PSC_CURRENT_OUT, false, false);
if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) {
pmbus_add_boolean_reg(data, "curr", "max_alarm",
in_index,
@@ -1151,7 +1151,7 @@ static void pmbus_find_attributes(struct i2c_client *client,
i1 = data->num_sensors;
pmbus_add_sensor(data, "curr", "lcrit", in_index, page,
PMBUS_IOUT_UC_FAULT_LIMIT,
- PSC_CURRENT_OUT, false);
+ PSC_CURRENT_OUT, false, false);
if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) {
pmbus_add_boolean_reg(data, "curr",
"lcrit_alarm",
@@ -1166,7 +1166,7 @@ static void pmbus_find_attributes(struct i2c_client *client,
i1 = data->num_sensors;
pmbus_add_sensor(data, "curr", "crit", in_index, page,
PMBUS_IOUT_OC_FAULT_LIMIT,
- PSC_CURRENT_OUT, false);
+ PSC_CURRENT_OUT, false, false);
if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) {
pmbus_add_boolean_reg(data, "curr",
"crit_alarm",
@@ -1199,13 +1199,13 @@ static void pmbus_find_attributes(struct i2c_client *client,
i0 = data->num_sensors;
pmbus_add_label(data, "power", in_index, "pin", 0);
pmbus_add_sensor(data, "power", "input", in_index,
- 0, PMBUS_READ_PIN, PSC_POWER, true);
+ 0, PMBUS_READ_PIN, PSC_POWER, true, true);
if (pmbus_check_word_register(client, 0,
PMBUS_PIN_OP_WARN_LIMIT)) {
i1 = data->num_sensors;
pmbus_add_sensor(data, "power", "max", in_index,
0, PMBUS_PIN_OP_WARN_LIMIT, PSC_POWER,
- false);
+ false, false);
if (info->func[0] & PMBUS_HAVE_STATUS_INPUT)
pmbus_add_boolean_reg(data, "power",
"alarm",
@@ -1228,7 +1228,7 @@ static void pmbus_find_attributes(struct i2c_client *client,
i0 = data->num_sensors;
pmbus_add_label(data, "power", in_index, "pout", page + 1);
pmbus_add_sensor(data, "power", "input", in_index, page,
- PMBUS_READ_POUT, PSC_POWER, true);
+ PMBUS_READ_POUT, PSC_POWER, true, true);
/*
* Per hwmon sysfs API, power_cap is to be used to limit output
* power.
@@ -1241,7 +1241,8 @@ static void pmbus_find_attributes(struct i2c_client *client,
if (pmbus_check_word_register(client, page, PMBUS_POUT_MAX)) {
i1 = data->num_sensors;
pmbus_add_sensor(data, "power", "cap", in_index, page,
- PMBUS_POUT_MAX, PSC_POWER, false);
+ PMBUS_POUT_MAX, PSC_POWER,
+ false, false);
need_alarm = true;
}
if (pmbus_check_word_register(client, page,
@@ -1249,7 +1250,7 @@ static void pmbus_find_attributes(struct i2c_client *client,
i1 = data->num_sensors;
pmbus_add_sensor(data, "power", "max", in_index, page,
PMBUS_POUT_OP_WARN_LIMIT, PSC_POWER,
- false);
+ false, false);
need_alarm = true;
}
if (need_alarm && (info->func[page] & PMBUS_HAVE_STATUS_IOUT))
@@ -1264,7 +1265,7 @@ static void pmbus_find_attributes(struct i2c_client *client,
i1 = data->num_sensors;
pmbus_add_sensor(data, "power", "crit", in_index, page,
PMBUS_POUT_OP_FAULT_LIMIT, PSC_POWER,
- false);
+ false, false);
if (info->func[page] & PMBUS_HAVE_STATUS_IOUT)
pmbus_add_boolean_reg(data, "power",
"crit_alarm",
@@ -1302,7 +1303,7 @@ static void pmbus_find_attributes(struct i2c_client *client,
i0 = data->num_sensors;
pmbus_add_sensor(data, "temp", "input", in_index, page,
pmbus_temp_registers[t],
- PSC_TEMPERATURE, true);
+ PSC_TEMPERATURE, true, true);
/*
* PMBus provides only one status register for TEMP1-3.
@@ -1323,7 +1324,7 @@ static void pmbus_find_attributes(struct i2c_client *client,
i1 = data->num_sensors;
pmbus_add_sensor(data, "temp", "min", in_index,
page, PMBUS_UT_WARN_LIMIT,
- PSC_TEMPERATURE, true);
+ PSC_TEMPERATURE, true, false);
if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) {
pmbus_add_boolean_cmp(data, "temp",
"min_alarm", in_index, i1, i0,
@@ -1338,7 +1339,7 @@ static void pmbus_find_attributes(struct i2c_client *client,
pmbus_add_sensor(data, "temp", "lcrit",
in_index, page,
PMBUS_UT_FAULT_LIMIT,
- PSC_TEMPERATURE, true);
+ PSC_TEMPERATURE, true, false);
if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) {
pmbus_add_boolean_cmp(data, "temp",
"lcrit_alarm", in_index, i1, i0,
@@ -1352,7 +1353,7 @@ static void pmbus_find_attributes(struct i2c_client *client,
i1 = data->num_sensors;
pmbus_add_sensor(data, "temp", "max", in_index,
page, PMBUS_OT_WARN_LIMIT,
- PSC_TEMPERATURE, true);
+ PSC_TEMPERATURE, true, false);
if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) {
pmbus_add_boolean_cmp(data, "temp",
"max_alarm", in_index, i0, i1,
@@ -1366,7 +1367,7 @@ static void pmbus_find_attributes(struct i2c_client *client,
i1 = data->num_sensors;
pmbus_add_sensor(data, "temp", "crit", in_index,
page, PMBUS_OT_FAULT_LIMIT,
- PSC_TEMPERATURE, true);
+ PSC_TEMPERATURE, true, false);
if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) {
pmbus_add_boolean_cmp(data, "temp",
"crit_alarm", in_index, i0, i1,
@@ -1421,7 +1422,8 @@ static void pmbus_find_attributes(struct i2c_client *client,
i0 = data->num_sensors;
pmbus_add_sensor(data, "fan", "input", in_index, page,
- pmbus_fan_registers[f], PSC_FAN, true);
+ pmbus_fan_registers[f], PSC_FAN, true,
+ true);
/*
* Each fan status register covers multiple fans,
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig
index eb4af28f8567..1f29bab6b3e5 100644
--- a/drivers/hwspinlock/Kconfig
+++ b/drivers/hwspinlock/Kconfig
@@ -4,6 +4,7 @@
config HWSPINLOCK
tristate "Generic Hardware Spinlock framework"
+ depends on ARCH_OMAP4
help
Say y here to support the generic hardware spinlock framework.
You only need to enable this if you have hardware spinlock module
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index f4077840d3ab..0e406d73b2c8 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -440,6 +440,7 @@ void do_ide_request(struct request_queue *q)
struct ide_host *host = hwif->host;
struct request *rq = NULL;
ide_startstop_t startstop;
+ unsigned long queue_run_ms = 3; /* old plug delay */
spin_unlock_irq(q->queue_lock);
@@ -459,6 +460,9 @@ repeat:
prev_port = hwif->host->cur_port;
if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
time_after(drive->sleep, jiffies)) {
+ unsigned long left = jiffies - drive->sleep;
+
+ queue_run_ms = jiffies_to_msecs(left + 1);
ide_unlock_port(hwif);
goto plug_device;
}
@@ -547,8 +551,10 @@ plug_device:
plug_device_2:
spin_lock_irq(q->queue_lock);
- if (rq)
+ if (rq) {
blk_requeue_request(q, rq);
+ blk_delay_queue(q, queue_run_ms);
+ }
}
void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
@@ -562,6 +568,10 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
blk_requeue_request(q, rq);
spin_unlock_irqrestore(q->queue_lock, flags);
+
+ /* Use 3ms as that was the old plug delay */
+ if (rq)
+ blk_delay_queue(q, 3);
}
static int drive_is_ready(ide_drive_t *drive)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index c7a6213c6996..fbe1973f77b0 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -625,7 +625,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
!!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
- MLX4_PROTOCOL_IB);
+ MLX4_PROT_IB_IPV6);
if (err)
return err;
@@ -636,7 +636,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
return 0;
err_add:
- mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB);
+ mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6);
return err;
}
@@ -666,7 +666,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
struct mlx4_ib_gid_entry *ge;
err = mlx4_multicast_detach(mdev->dev,
- &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB);
+ &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6);
if (err)
return err;
@@ -721,7 +721,6 @@ static int init_node_data(struct mlx4_ib_dev *dev)
if (err)
goto out;
- dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
out:
@@ -954,7 +953,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event
mlx4_foreach_ib_transport_port(port, ibdev->dev) {
oldnd = iboe->netdevs[port - 1];
iboe->netdevs[port - 1] =
- mlx4_get_protocol_dev(ibdev->dev, MLX4_PROTOCOL_EN, port);
+ mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
if (oldnd != iboe->netdevs[port - 1]) {
if (iboe->netdevs[port - 1])
netdev_added(ibdev, port);
@@ -1207,7 +1206,7 @@ static struct mlx4_interface mlx4_ib_interface = {
.add = mlx4_ib_add,
.remove = mlx4_ib_remove,
.event = mlx4_ib_event,
- .protocol = MLX4_PROTOCOL_IB
+ .protocol = MLX4_PROT_IB_IPV6
};
static int __init mlx4_ib_init(void)
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index ef3291551bc6..cfa3a2b22232 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1116,7 +1116,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
return rc;
}
- if (netif_is_bond_slave(netdev))
+ if (netif_is_bond_slave(nesvnic->netdev))
netdev = nesvnic->netdev->master;
else
netdev = nesvnic->netdev;
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 112ec55f2939..434fd800cd24 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -641,7 +641,7 @@ config TOUCHSCREEN_TOUCHIT213
config TOUCHSCREEN_TSC2005
tristate "TSC2005 based touchscreens"
- depends on SPI_MASTER
+ depends on SPI_MASTER && GENERIC_HARDIRQS
help
Say Y here if you have a TSC2005 based touchscreen.
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index 87420616efa4..cbf0ff322676 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -358,7 +358,7 @@ static void __tsc2005_enable(struct tsc2005 *ts)
if (ts->esd_timeout && ts->set_reset) {
ts->last_valid_interrupt = jiffies;
schedule_delayed_work(&ts->esd_work,
- round_jiffies(jiffies +
+ round_jiffies_relative(
msecs_to_jiffies(ts->esd_timeout)));
}
@@ -477,7 +477,14 @@ static void tsc2005_esd_work(struct work_struct *work)
int error;
u16 r;
- mutex_lock(&ts->mutex);
+ if (!mutex_trylock(&ts->mutex)) {
+ /*
+ * If the mutex is taken, it means that disable or enable is in
+ * progress. In that case just reschedule the work. If the work
+ * is not needed, it will be canceled by disable.
+ */
+ goto reschedule;
+ }
if (time_is_after_jiffies(ts->last_valid_interrupt +
msecs_to_jiffies(ts->esd_timeout)))
@@ -510,11 +517,12 @@ static void tsc2005_esd_work(struct work_struct *work)
tsc2005_start_scan(ts);
out:
+ mutex_unlock(&ts->mutex);
+reschedule:
/* re-arm the watchdog */
schedule_delayed_work(&ts->esd_work,
- round_jiffies(jiffies +
+ round_jiffies_relative(
msecs_to_jiffies(ts->esd_timeout)));
- mutex_unlock(&ts->mutex);
}
static int tsc2005_open(struct input_dev *input)
@@ -663,7 +671,7 @@ static int __devinit tsc2005_probe(struct spi_device *spi)
goto err_remove_sysfs;
}
- set_irq_wake(spi->irq, 1);
+ irq_set_irq_wake(spi->irq, 1);
return 0;
err_remove_sysfs:
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index c41eb6180c9c..4bebae733349 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -231,6 +231,26 @@ void led_trigger_event(struct led_trigger *trigger,
}
EXPORT_SYMBOL_GPL(led_trigger_event);
+void led_trigger_blink(struct led_trigger *trigger,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct list_head *entry;
+
+ if (!trigger)
+ return;
+
+ read_lock(&trigger->leddev_list_lock);
+ list_for_each(entry, &trigger->led_cdevs) {
+ struct led_classdev *led_cdev;
+
+ led_cdev = list_entry(entry, struct led_classdev, trig_list);
+ led_blink_set(led_cdev, delay_on, delay_off);
+ }
+ read_unlock(&trigger->leddev_list_lock);
+}
+EXPORT_SYMBOL_GPL(led_trigger_blink);
+
void led_trigger_register_simple(const char *name, struct led_trigger **tp)
{
struct led_trigger *trigger;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 98d9ec85e0eb..8420129fc5ee 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -327,4 +327,10 @@ config DM_UEVENT
---help---
Generate udev events for DM events.
+config DM_FLAKEY
+ tristate "Flakey target (EXPERIMENTAL)"
+ depends on BLK_DEV_DM && EXPERIMENTAL
+ ---help---
+ A target that intermittently fails I/O for debugging purposes.
+
endif # MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index d0138606c2e8..448838b1f92a 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
obj-$(CONFIG_DM_DELAY) += dm-delay.o
+obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o
obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
obj-$(CONFIG_DM_MULTIPATH_QL) += dm-queue-length.o
obj-$(CONFIG_DM_MULTIPATH_ST) += dm-service-time.o
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 2c62c1169f78..c8827ffd85bb 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1324,20 +1324,29 @@ static int crypt_setkey_allcpus(struct crypt_config *cc)
static int crypt_set_key(struct crypt_config *cc, char *key)
{
+ int r = -EINVAL;
+ int key_string_len = strlen(key);
+
/* The key size may not be changed. */
- if (cc->key_size != (strlen(key) >> 1))
- return -EINVAL;
+ if (cc->key_size != (key_string_len >> 1))
+ goto out;
/* Hyphen (which gives a key_size of zero) means there is no key. */
if (!cc->key_size && strcmp(key, "-"))
- return -EINVAL;
+ goto out;
if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
- return -EINVAL;
+ goto out;
set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
- return crypt_setkey_allcpus(cc);
+ r = crypt_setkey_allcpus(cc);
+
+out:
+ /* Hex key string not needed after here, so wipe it. */
+ memset(key, '0', key_string_len);
+
+ return r;
}
static int crypt_wipe_key(struct crypt_config *cc)
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
new file mode 100644
index 000000000000..ea790623c30b
--- /dev/null
+++ b/drivers/md/dm-flakey.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (C) 2003 Sistina Software (UK) Limited.
+ * Copyright (C) 2004, 2010 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include <linux/device-mapper.h>
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/slab.h>
+
+#define DM_MSG_PREFIX "flakey"
+
+/*
+ * Flakey: Used for testing only, simulates intermittent,
+ * catastrophic device failure.
+ */
+struct flakey_c {
+ struct dm_dev *dev;
+ unsigned long start_time;
+ sector_t start;
+ unsigned up_interval;
+ unsigned down_interval;
+};
+
+/*
+ * Construct a flakey mapping: <dev_path> <offset> <up interval> <down interval>
+ */
+static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ struct flakey_c *fc;
+ unsigned long long tmp;
+
+ if (argc != 4) {
+ ti->error = "dm-flakey: Invalid argument count";
+ return -EINVAL;
+ }
+
+ fc = kmalloc(sizeof(*fc), GFP_KERNEL);
+ if (!fc) {
+ ti->error = "dm-flakey: Cannot allocate linear context";
+ return -ENOMEM;
+ }
+ fc->start_time = jiffies;
+
+ if (sscanf(argv[1], "%llu", &tmp) != 1) {
+ ti->error = "dm-flakey: Invalid device sector";
+ goto bad;
+ }
+ fc->start = tmp;
+
+ if (sscanf(argv[2], "%u", &fc->up_interval) != 1) {
+ ti->error = "dm-flakey: Invalid up interval";
+ goto bad;
+ }
+
+ if (sscanf(argv[3], "%u", &fc->down_interval) != 1) {
+ ti->error = "dm-flakey: Invalid down interval";
+ goto bad;
+ }
+
+ if (!(fc->up_interval + fc->down_interval)) {
+ ti->error = "dm-flakey: Total (up + down) interval is zero";
+ goto bad;
+ }
+
+ if (fc->up_interval + fc->down_interval < fc->up_interval) {
+ ti->error = "dm-flakey: Interval overflow";
+ goto bad;
+ }
+
+ if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &fc->dev)) {
+ ti->error = "dm-flakey: Device lookup failed";
+ goto bad;
+ }
+
+ ti->num_flush_requests = 1;
+ ti->private = fc;
+ return 0;
+
+bad:
+ kfree(fc);
+ return -EINVAL;
+}
+
+static void flakey_dtr(struct dm_target *ti)
+{
+ struct flakey_c *fc = ti->private;
+
+ dm_put_device(ti, fc->dev);
+ kfree(fc);
+}
+
+static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector)
+{
+ struct flakey_c *fc = ti->private;
+
+ return fc->start + (bi_sector - ti->begin);
+}
+
+static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
+{
+ struct flakey_c *fc = ti->private;
+
+ bio->bi_bdev = fc->dev->bdev;
+ if (bio_sectors(bio))
+ bio->bi_sector = flakey_map_sector(ti, bio->bi_sector);
+}
+
+static int flakey_map(struct dm_target *ti, struct bio *bio,
+ union map_info *map_context)
+{
+ struct flakey_c *fc = ti->private;
+ unsigned elapsed;
+
+ /* Are we alive ? */
+ elapsed = (jiffies - fc->start_time) / HZ;
+ if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval)
+ return -EIO;
+
+ flakey_map_bio(ti, bio);
+
+ return DM_MAPIO_REMAPPED;
+}
+
+static int flakey_status(struct dm_target *ti, status_type_t type,
+ char *result, unsigned int maxlen)
+{
+ struct flakey_c *fc = ti->private;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ result[0] = '\0';
+ break;
+
+ case STATUSTYPE_TABLE:
+ snprintf(result, maxlen, "%s %llu %u %u", fc->dev->name,
+ (unsigned long long)fc->start, fc->up_interval,
+ fc->down_interval);
+ break;
+ }
+ return 0;
+}
+
+static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
+{
+ struct flakey_c *fc = ti->private;
+
+ return __blkdev_driver_ioctl(fc->dev->bdev, fc->dev->mode, cmd, arg);
+}
+
+static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+ struct bio_vec *biovec, int max_size)
+{
+ struct flakey_c *fc = ti->private;
+ struct request_queue *q = bdev_get_queue(fc->dev->bdev);
+
+ if (!q->merge_bvec_fn)
+ return max_size;
+
+ bvm->bi_bdev = fc->dev->bdev;
+ bvm->bi_sector = flakey_map_sector(ti, bvm->bi_sector);
+
+ return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
+}
+
+static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
+{
+ struct flakey_c *fc = ti->private;
+
+ return fn(ti, fc->dev, fc->start, ti->len, data);
+}
+
+static struct target_type flakey_target = {
+ .name = "flakey",
+ .version = {1, 1, 0},
+ .module = THIS_MODULE,
+ .ctr = flakey_ctr,
+ .dtr = flakey_dtr,
+ .map = flakey_map,
+ .status = flakey_status,
+ .ioctl = flakey_ioctl,
+ .merge = flakey_merge,
+ .iterate_devices = flakey_iterate_devices,
+};
+
+static int __init dm_flakey_init(void)
+{
+ int r = dm_register_target(&flakey_target);
+
+ if (r < 0)
+ DMERR("register failed %d", r);
+
+ return r;
+}
+
+static void __exit dm_flakey_exit(void)
+{
+ dm_unregister_target(&flakey_target);
+}
+
+/* Module hooks */
+module_init(dm_flakey_init);
+module_exit(dm_flakey_exit);
+
+MODULE_DESCRIPTION(DM_NAME " flakey target");
+MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 6d12775a1061..4cacdad2270a 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1501,14 +1501,10 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user)
return r;
}
-static void free_params(struct dm_ioctl *param)
-{
- vfree(param);
-}
-
static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
{
struct dm_ioctl tmp, *dmi;
+ int secure_data;
if (copy_from_user(&tmp, user, sizeof(tmp) - sizeof(tmp.data)))
return -EFAULT;
@@ -1516,17 +1512,30 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
if (tmp.data_size < (sizeof(tmp) - sizeof(tmp.data)))
return -EINVAL;
+ secure_data = tmp.flags & DM_SECURE_DATA_FLAG;
+
dmi = vmalloc(tmp.data_size);
- if (!dmi)
+ if (!dmi) {
+ if (secure_data && clear_user(user, tmp.data_size))
+ return -EFAULT;
return -ENOMEM;
-
- if (copy_from_user(dmi, user, tmp.data_size)) {
- vfree(dmi);
- return -EFAULT;
}
+ if (copy_from_user(dmi, user, tmp.data_size))
+ goto bad;
+
+ /* Wipe the user buffer so we do not return it to userspace */
+ if (secure_data && clear_user(user, tmp.data_size))
+ goto bad;
+
*param = dmi;
return 0;
+
+bad:
+ if (secure_data)
+ memset(dmi, 0, tmp.data_size);
+ vfree(dmi);
+ return -EFAULT;
}
static int validate_params(uint cmd, struct dm_ioctl *param)
@@ -1534,6 +1543,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
/* Always clear this flag */
param->flags &= ~DM_BUFFER_FULL_FLAG;
param->flags &= ~DM_UEVENT_GENERATED_FLAG;
+ param->flags &= ~DM_SECURE_DATA_FLAG;
/* Ignores parameters */
if (cmd == DM_REMOVE_ALL_CMD ||
@@ -1561,10 +1571,11 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
{
int r = 0;
+ int wipe_buffer;
unsigned int cmd;
struct dm_ioctl *uninitialized_var(param);
ioctl_fn fn = NULL;
- size_t param_size;
+ size_t input_param_size;
/* only root can play with this */
if (!capable(CAP_SYS_ADMIN))
@@ -1611,13 +1622,15 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
if (r)
return r;
+ input_param_size = param->data_size;
+ wipe_buffer = param->flags & DM_SECURE_DATA_FLAG;
+
r = validate_params(cmd, param);
if (r)
goto out;
- param_size = param->data_size;
param->data_size = sizeof(*param);
- r = fn(param, param_size);
+ r = fn(param, input_param_size);
/*
* Copy the results back to userland.
@@ -1625,8 +1638,11 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
if (!r && copy_to_user(user, param, param->data_size))
r = -EFAULT;
- out:
- free_params(param);
+out:
+ if (wipe_buffer)
+ memset(param, 0, input_param_size);
+
+ vfree(param);
return r;
}
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 57968eb382c1..a1f321889676 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -543,7 +543,7 @@ static int disk_ctr(struct dm_dirty_log *log, struct dm_target *ti,
return -EINVAL;
}
- r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &dev);
+ r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev);
if (r)
return r;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 4b0b63c290a6..a550a057d991 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -844,8 +844,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
{
/* target parameters */
static struct param _params[] = {
- {1, 1024, "invalid number of priority groups"},
- {1, 1024, "invalid initial priority group number"},
+ {0, 1024, "invalid number of priority groups"},
+ {0, 1024, "invalid initial priority group number"},
};
int r;
@@ -879,6 +879,13 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
if (r)
goto bad;
+ if ((!m->nr_priority_groups && next_pg_num) ||
+ (m->nr_priority_groups && !next_pg_num)) {
+ ti->error = "invalid initial priority group";
+ r = -EINVAL;
+ goto bad;
+ }
+
/* parse the priority groups */
while (as.argc) {
struct priority_group *pg;
@@ -1065,7 +1072,7 @@ out:
static int action_dev(struct multipath *m, struct dm_dev *dev,
action_fn action)
{
- int r = 0;
+ int r = -EINVAL;
struct pgpath *pgpath;
struct priority_group *pg;
@@ -1415,7 +1422,7 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
else if (m->current_pg)
pg_num = m->current_pg->pg_num;
else
- pg_num = 1;
+ pg_num = (m->nr_priority_groups ? 1 : 0);
DMEMIT("%u ", pg_num);
@@ -1669,7 +1676,7 @@ out:
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 2, 0},
+ .version = {1, 3, 0},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index fdde53cd12b7..a2d330942cb2 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1080,7 +1080,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
argv++;
argc--;
- r = dm_get_device(ti, cow_path, FMODE_READ | FMODE_WRITE, &s->cow);
+ r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
if (r) {
ti->error = "Cannot get COW device";
goto bad_cow;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index dddfa14f2982..3d80cf0c152d 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -396,9 +396,29 @@ static void stripe_io_hints(struct dm_target *ti,
blk_limits_io_opt(limits, chunk_size * sc->stripes);
}
+static int stripe_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+ struct bio_vec *biovec, int max_size)
+{
+ struct stripe_c *sc = ti->private;
+ sector_t bvm_sector = bvm->bi_sector;
+ uint32_t stripe;
+ struct request_queue *q;
+
+ stripe_map_sector(sc, bvm_sector, &stripe, &bvm_sector);
+
+ q = bdev_get_queue(sc->stripe[stripe].dev->bdev);
+ if (!q->merge_bvec_fn)
+ return max_size;
+
+ bvm->bi_bdev = sc->stripe[stripe].dev->bdev;
+ bvm->bi_sector = sc->stripe[stripe].physical_start + bvm_sector;
+
+ return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
+}
+
static struct target_type stripe_target = {
.name = "striped",
- .version = {1, 3, 1},
+ .version = {1, 4, 0},
.module = THIS_MODULE,
.ctr = stripe_ctr,
.dtr = stripe_dtr,
@@ -407,6 +427,7 @@ static struct target_type stripe_target = {
.status = stripe_status,
.iterate_devices = stripe_iterate_devices,
.io_hints = stripe_io_hints,
+ .merge = stripe_merge,
};
int __init dm_stripe_init(void)
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 9be6a830f1d2..ac0e42b47b2a 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -187,7 +187,7 @@ static void ite_decode_bytes(struct ite_dev *dev, const u8 * data, int
sample_period = dev->params.sample_period;
ldata = (unsigned long *)data;
size = length << 3;
- next_one = generic_find_next_le_bit(ldata, size, 0);
+ next_one = find_next_bit_le(ldata, size, 0);
if (next_one > 0) {
ev.pulse = true;
ev.duration =
@@ -196,14 +196,14 @@ static void ite_decode_bytes(struct ite_dev *dev, const u8 * data, int
}
while (next_one < size) {
- next_zero = generic_find_next_zero_le_bit(ldata, size, next_one + 1);
+ next_zero = find_next_zero_bit_le(ldata, size, next_one + 1);
ev.pulse = false;
ev.duration = ITE_BITS_TO_NS(next_zero - next_one, sample_period);
ir_raw_event_store_with_filter(dev->rdev, &ev);
if (next_zero < size) {
next_one =
- generic_find_next_le_bit(ldata,
+ find_next_bit_le(ldata,
size,
next_zero + 1);
ev.pulse = true;
diff --git a/drivers/memstick/host/Kconfig b/drivers/memstick/host/Kconfig
index 4ce5c8dffb68..cc0997a05171 100644
--- a/drivers/memstick/host/Kconfig
+++ b/drivers/memstick/host/Kconfig
@@ -30,3 +30,15 @@ config MEMSTICK_JMICRON_38X
To compile this driver as a module, choose M here: the
module will be called jmb38x_ms.
+
+config MEMSTICK_R592
+ tristate "Ricoh R5C592 MemoryStick interface support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && PCI
+
+ help
+ Say Y here if you want to be able to access MemoryStick cards with
+ the Ricoh R5C592 MemoryStick card reader (which is part of 5 in one
+ multifunction reader)
+
+ To compile this driver as a module, choose M here: the module will
+ be called r592.
diff --git a/drivers/memstick/host/Makefile b/drivers/memstick/host/Makefile
index a1815e9dd019..31ba8d378e46 100644
--- a/drivers/memstick/host/Makefile
+++ b/drivers/memstick/host/Makefile
@@ -4,3 +4,4 @@
obj-$(CONFIG_MEMSTICK_TIFM_MS) += tifm_ms.o
obj-$(CONFIG_MEMSTICK_JMICRON_38X) += jmb38x_ms.o
+obj-$(CONFIG_MEMSTICK_R592) += r592.o
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
new file mode 100644
index 000000000000..767406c95291
--- /dev/null
+++ b/drivers/memstick/host/r592.c
@@ -0,0 +1,908 @@
+/*
+ * Copyright (C) 2010 - Maxim Levitsky
+ * driver for Ricoh memstick readers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/freezer.h>
+#include <linux/jiffies.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/highmem.h>
+#include <asm/byteorder.h>
+#include <linux/swab.h>
+#include "r592.h"
+
+static int enable_dma = 1;
+static int debug;
+
+static const char *tpc_names[] = {
+ "MS_TPC_READ_MG_STATUS",
+ "MS_TPC_READ_LONG_DATA",
+ "MS_TPC_READ_SHORT_DATA",
+ "MS_TPC_READ_REG",
+ "MS_TPC_READ_QUAD_DATA",
+ "INVALID",
+ "MS_TPC_GET_INT",
+ "MS_TPC_SET_RW_REG_ADRS",
+ "MS_TPC_EX_SET_CMD",
+ "MS_TPC_WRITE_QUAD_DATA",
+ "MS_TPC_WRITE_REG",
+ "MS_TPC_WRITE_SHORT_DATA",
+ "MS_TPC_WRITE_LONG_DATA",
+ "MS_TPC_SET_CMD",
+};
+
+/**
+ * memstick_debug_get_tpc_name - debug helper that returns string for
+ * a TPC number
+ */
+const char *memstick_debug_get_tpc_name(int tpc)
+{
+ return tpc_names[tpc-1];
+}
+EXPORT_SYMBOL(memstick_debug_get_tpc_name);
+
+
+/* Read a register*/
+static inline u32 r592_read_reg(struct r592_device *dev, int address)
+{
+ u32 value = readl(dev->mmio + address);
+ dbg_reg("reg #%02d == 0x%08x", address, value);
+ return value;
+}
+
+/* Write a register */
+static inline void r592_write_reg(struct r592_device *dev,
+ int address, u32 value)
+{
+ dbg_reg("reg #%02d <- 0x%08x", address, value);
+ writel(value, dev->mmio + address);
+}
+
+/* Reads a big endian DWORD register */
+static inline u32 r592_read_reg_raw_be(struct r592_device *dev, int address)
+{
+ u32 value = __raw_readl(dev->mmio + address);
+ dbg_reg("reg #%02d == 0x%08x", address, value);
+ return be32_to_cpu(value);
+}
+
+/* Writes a big endian DWORD register */
+static inline void r592_write_reg_raw_be(struct r592_device *dev,
+ int address, u32 value)
+{
+ dbg_reg("reg #%02d <- 0x%08x", address, value);
+ __raw_writel(cpu_to_be32(value), dev->mmio + address);
+}
+
+/* Set specific bits in a register (little endian) */
+static inline void r592_set_reg_mask(struct r592_device *dev,
+ int address, u32 mask)
+{
+ u32 reg = readl(dev->mmio + address);
+ dbg_reg("reg #%02d |= 0x%08x (old =0x%08x)", address, mask, reg);
+ writel(reg | mask , dev->mmio + address);
+}
+
+/* Clear specific bits in a register (little endian) */
+static inline void r592_clear_reg_mask(struct r592_device *dev,
+ int address, u32 mask)
+{
+ u32 reg = readl(dev->mmio + address);
+ dbg_reg("reg #%02d &= 0x%08x (old = 0x%08x, mask = 0x%08x)",
+ address, ~mask, reg, mask);
+ writel(reg & ~mask, dev->mmio + address);
+}
+
+
+/* Wait for status bits while checking for errors */
+static int r592_wait_status(struct r592_device *dev, u32 mask, u32 wanted_mask)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ u32 reg = r592_read_reg(dev, R592_STATUS);
+
+ if ((reg & mask) == wanted_mask)
+ return 0;
+
+ while (time_before(jiffies, timeout)) {
+
+ reg = r592_read_reg(dev, R592_STATUS);
+
+ if ((reg & mask) == wanted_mask)
+ return 0;
+
+ if (reg & (R592_STATUS_SEND_ERR | R592_STATUS_RECV_ERR))
+ return -EIO;
+
+ cpu_relax();
+ }
+ return -ETIME;
+}
+
+
+/* Enable/disable device */
+static int r592_enable_device(struct r592_device *dev, bool enable)
+{
+ dbg("%sabling the device", enable ? "en" : "dis");
+
+ if (enable) {
+
+ /* Power up the card */
+ r592_write_reg(dev, R592_POWER, R592_POWER_0 | R592_POWER_1);
+
+ /* Perform a reset */
+ r592_set_reg_mask(dev, R592_IO, R592_IO_RESET);
+
+ msleep(100);
+ } else
+ /* Power down the card */
+ r592_write_reg(dev, R592_POWER, 0);
+
+ return 0;
+}
+
+/* Set serial/parallel mode */
+static int r592_set_mode(struct r592_device *dev, bool parallel_mode)
+{
+ if (!parallel_mode) {
+ dbg("switching to serial mode");
+
+ /* Set serial mode */
+ r592_write_reg(dev, R592_IO_MODE, R592_IO_MODE_SERIAL);
+
+ r592_clear_reg_mask(dev, R592_POWER, R592_POWER_20);
+
+ } else {
+ dbg("switching to parallel mode");
+
+ /* This setting should be set _before_ switch TPC */
+ r592_set_reg_mask(dev, R592_POWER, R592_POWER_20);
+
+ r592_clear_reg_mask(dev, R592_IO,
+ R592_IO_SERIAL1 | R592_IO_SERIAL2);
+
+ /* Set the parallel mode now */
+ r592_write_reg(dev, R592_IO_MODE, R592_IO_MODE_PARALLEL);
+ }
+
+ dev->parallel_mode = parallel_mode;
+ return 0;
+}
+
+/* Perform a controller reset without powering down the card */
+static void r592_host_reset(struct r592_device *dev)
+{
+ r592_set_reg_mask(dev, R592_IO, R592_IO_RESET);
+ msleep(100);
+ r592_set_mode(dev, dev->parallel_mode);
+}
+
+/* Disable all hardware interrupts */
+static void r592_clear_interrupts(struct r592_device *dev)
+{
+ /* Disable & ACK all interrupts */
+ r592_clear_reg_mask(dev, R592_REG_MSC, IRQ_ALL_ACK_MASK);
+ r592_clear_reg_mask(dev, R592_REG_MSC, IRQ_ALL_EN_MASK);
+}
+
+/* Tests if there is an CRC error */
+static int r592_test_io_error(struct r592_device *dev)
+{
+ if (!(r592_read_reg(dev, R592_STATUS) &
+ (R592_STATUS_SEND_ERR | R592_STATUS_RECV_ERR)))
+ return 0;
+
+ return -EIO;
+}
+
+/* Ensure that FIFO is ready for use */
+static int r592_test_fifo_empty(struct r592_device *dev)
+{
+ if (r592_read_reg(dev, R592_REG_MSC) & R592_REG_MSC_FIFO_EMPTY)
+ return 0;
+
+ dbg("FIFO not ready, trying to reset the device");
+ r592_host_reset(dev);
+
+ if (r592_read_reg(dev, R592_REG_MSC) & R592_REG_MSC_FIFO_EMPTY)
+ return 0;
+
+ message("FIFO still not ready, giving up");
+ return -EIO;
+}
+
+/* Activates the DMA transfer from to FIFO */
+static void r592_start_dma(struct r592_device *dev, bool is_write)
+{
+ unsigned long flags;
+ u32 reg;
+ spin_lock_irqsave(&dev->irq_lock, flags);
+
+ /* Ack interrupts (just in case) + enable them */
+ r592_clear_reg_mask(dev, R592_REG_MSC, DMA_IRQ_ACK_MASK);
+ r592_set_reg_mask(dev, R592_REG_MSC, DMA_IRQ_EN_MASK);
+
+ /* Set DMA address */
+ r592_write_reg(dev, R592_FIFO_DMA, sg_dma_address(&dev->req->sg));
+
+ /* Enable the DMA */
+ reg = r592_read_reg(dev, R592_FIFO_DMA_SETTINGS);
+ reg |= R592_FIFO_DMA_SETTINGS_EN;
+
+ if (!is_write)
+ reg |= R592_FIFO_DMA_SETTINGS_DIR;
+ else
+ reg &= ~R592_FIFO_DMA_SETTINGS_DIR;
+ r592_write_reg(dev, R592_FIFO_DMA_SETTINGS, reg);
+
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+}
+
+/* Cleanups DMA related settings */
+static void r592_stop_dma(struct r592_device *dev, int error)
+{
+ r592_clear_reg_mask(dev, R592_FIFO_DMA_SETTINGS,
+ R592_FIFO_DMA_SETTINGS_EN);
+
+ /* This is only a precation */
+ r592_write_reg(dev, R592_FIFO_DMA,
+ dev->dummy_dma_page_physical_address);
+
+ r592_clear_reg_mask(dev, R592_REG_MSC, DMA_IRQ_EN_MASK);
+ r592_clear_reg_mask(dev, R592_REG_MSC, DMA_IRQ_ACK_MASK);
+ dev->dma_error = error;
+}
+
+/* Test if hardware supports DMA */
+static void r592_check_dma(struct r592_device *dev)
+{
+ dev->dma_capable = enable_dma &&
+ (r592_read_reg(dev, R592_FIFO_DMA_SETTINGS) &
+ R592_FIFO_DMA_SETTINGS_CAP);
+}
+
+/* Transfers fifo contents in/out using DMA */
+static int r592_transfer_fifo_dma(struct r592_device *dev)
+{
+ int len, sg_count;
+ bool is_write;
+
+ if (!dev->dma_capable || !dev->req->long_data)
+ return -EINVAL;
+
+ len = dev->req->sg.length;
+ is_write = dev->req->data_dir == WRITE;
+
+ if (len != R592_LFIFO_SIZE)
+ return -EINVAL;
+
+ dbg_verbose("doing dma transfer");
+
+ dev->dma_error = 0;
+ INIT_COMPLETION(dev->dma_done);
+
+ /* TODO: hidden assumption about nenth beeing always 1 */
+ sg_count = dma_map_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ?
+ PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+
+ if (sg_count != 1 ||
+ (sg_dma_len(&dev->req->sg) < dev->req->sg.length)) {
+ message("problem in dma_map_sg");
+ return -EIO;
+ }
+
+ r592_start_dma(dev, is_write);
+
+ /* Wait for DMA completion */
+ if (!wait_for_completion_timeout(
+ &dev->dma_done, msecs_to_jiffies(1000))) {
+ message("DMA timeout");
+ r592_stop_dma(dev, -ETIMEDOUT);
+ }
+
+ dma_unmap_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ?
+ PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+
+
+ return dev->dma_error;
+}
+
+/*
+ * Writes the FIFO in 4 byte chunks.
+ * If length isn't 4 byte aligned, rest of the data if put to a fifo
+ * to be written later
+ * Use r592_flush_fifo_write to flush that fifo when writing for the
+ * last time
+ */
+static void r592_write_fifo_pio(struct r592_device *dev,
+ unsigned char *buffer, int len)
+{
+ /* flush spill from former write */
+ if (!kfifo_is_empty(&dev->pio_fifo)) {
+
+ u8 tmp[4] = {0};
+ int copy_len = kfifo_in(&dev->pio_fifo, buffer, len);
+
+ if (!kfifo_is_full(&dev->pio_fifo))
+ return;
+ len -= copy_len;
+ buffer += copy_len;
+
+ copy_len = kfifo_out(&dev->pio_fifo, tmp, 4);
+ WARN_ON(copy_len != 4);
+ r592_write_reg_raw_be(dev, R592_FIFO_PIO, *(u32 *)tmp);
+ }
+
+ WARN_ON(!kfifo_is_empty(&dev->pio_fifo));
+
+ /* write full dwords */
+ while (len >= 4) {
+ r592_write_reg_raw_be(dev, R592_FIFO_PIO, *(u32 *)buffer);
+ buffer += 4;
+ len -= 4;
+ }
+
+ /* put remaining bytes to the spill */
+ if (len)
+ kfifo_in(&dev->pio_fifo, buffer, len);
+}
+
+/* Flushes the temporary FIFO used to make aligned DWORD writes */
+static void r592_flush_fifo_write(struct r592_device *dev)
+{
+ u8 buffer[4] = { 0 };
+ int len;
+
+ if (kfifo_is_empty(&dev->pio_fifo))
+ return;
+
+ len = kfifo_out(&dev->pio_fifo, buffer, 4);
+ r592_write_reg_raw_be(dev, R592_FIFO_PIO, *(u32 *)buffer);
+}
+
+/*
+ * Read a fifo in 4 bytes chunks.
+ * If input doesn't fit the buffer, it places bytes of last dword in spill
+ * buffer, so that they don't get lost on last read, just throw these away.
+ */
+static void r592_read_fifo_pio(struct r592_device *dev,
+ unsigned char *buffer, int len)
+{
+ u8 tmp[4];
+
+ /* Read from last spill */
+ if (!kfifo_is_empty(&dev->pio_fifo)) {
+ int bytes_copied =
+ kfifo_out(&dev->pio_fifo, buffer, min(4, len));
+ buffer += bytes_copied;
+ len -= bytes_copied;
+
+ if (!kfifo_is_empty(&dev->pio_fifo))
+ return;
+ }
+
+ /* Reads dwords from FIFO */
+ while (len >= 4) {
+ *(u32 *)buffer = r592_read_reg_raw_be(dev, R592_FIFO_PIO);
+ buffer += 4;
+ len -= 4;
+ }
+
+ if (len) {
+ *(u32 *)tmp = r592_read_reg_raw_be(dev, R592_FIFO_PIO);
+ kfifo_in(&dev->pio_fifo, tmp, 4);
+ len -= kfifo_out(&dev->pio_fifo, buffer, len);
+ }
+
+ WARN_ON(len);
+ return;
+}
+
+/* Transfers actual data using PIO. */
+static int r592_transfer_fifo_pio(struct r592_device *dev)
+{
+ unsigned long flags;
+
+ bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
+ struct sg_mapping_iter miter;
+
+ kfifo_reset(&dev->pio_fifo);
+
+ if (!dev->req->long_data) {
+ if (is_write) {
+ r592_write_fifo_pio(dev, dev->req->data,
+ dev->req->data_len);
+ r592_flush_fifo_write(dev);
+ } else
+ r592_read_fifo_pio(dev, dev->req->data,
+ dev->req->data_len);
+ return 0;
+ }
+
+ local_irq_save(flags);
+ sg_miter_start(&miter, &dev->req->sg, 1, SG_MITER_ATOMIC |
+ (is_write ? SG_MITER_FROM_SG : SG_MITER_TO_SG));
+
+ /* Do the transfer fifo<->memory*/
+ while (sg_miter_next(&miter))
+ if (is_write)
+ r592_write_fifo_pio(dev, miter.addr, miter.length);
+ else
+ r592_read_fifo_pio(dev, miter.addr, miter.length);
+
+
+ /* Write last few non aligned bytes*/
+ if (is_write)
+ r592_flush_fifo_write(dev);
+
+ sg_miter_stop(&miter);
+ local_irq_restore(flags);
+ return 0;
+}
+
+/* Executes one TPC (data is read/written from small or large fifo) */
+static void r592_execute_tpc(struct r592_device *dev)
+{
+ bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
+ int len, error;
+ u32 status, reg;
+
+ if (!dev->req) {
+ message("BUG: tpc execution without request!");
+ return;
+ }
+
+ len = dev->req->long_data ?
+ dev->req->sg.length : dev->req->data_len;
+
+ /* Ensure that FIFO can hold the input data */
+ if (len > R592_LFIFO_SIZE) {
+ message("IO: hardware doesn't support TPCs longer that 512");
+ error = -ENOSYS;
+ goto out;
+ }
+
+ if (!(r592_read_reg(dev, R592_REG_MSC) & R592_REG_MSC_PRSNT)) {
+ dbg("IO: refusing to send TPC because card is absent");
+ error = -ENODEV;
+ goto out;
+ }
+
+ dbg("IO: executing %s LEN=%d",
+ memstick_debug_get_tpc_name(dev->req->tpc), len);
+
+ /* Set IO direction */
+ if (is_write)
+ r592_set_reg_mask(dev, R592_IO, R592_IO_DIRECTION);
+ else
+ r592_clear_reg_mask(dev, R592_IO, R592_IO_DIRECTION);
+
+
+ error = r592_test_fifo_empty(dev);
+ if (error)
+ goto out;
+
+ /* Transfer write data */
+ if (is_write) {
+ error = r592_transfer_fifo_dma(dev);
+ if (error == -EINVAL)
+ error = r592_transfer_fifo_pio(dev);
+ }
+
+ if (error)
+ goto out;
+
+ /* Trigger the TPC */
+ reg = (len << R592_TPC_EXEC_LEN_SHIFT) |
+ (dev->req->tpc << R592_TPC_EXEC_TPC_SHIFT) |
+ R592_TPC_EXEC_BIG_FIFO;
+
+ r592_write_reg(dev, R592_TPC_EXEC, reg);
+
+ /* Wait for TPC completion */
+ status = R592_STATUS_RDY;
+ if (dev->req->need_card_int)
+ status |= R592_STATUS_CED;
+
+ error = r592_wait_status(dev, status, status);
+ if (error) {
+ message("card didn't respond");
+ goto out;
+ }
+
+ /* Test IO errors */
+ error = r592_test_io_error(dev);
+ if (error) {
+ dbg("IO error");
+ goto out;
+ }
+
+ /* Read data from FIFO */
+ if (!is_write) {
+ error = r592_transfer_fifo_dma(dev);
+ if (error == -EINVAL)
+ error = r592_transfer_fifo_pio(dev);
+ }
+
+ /* read INT reg. This can be shortened with shifts, but that way
+ its more readable */
+ if (dev->parallel_mode && dev->req->need_card_int) {
+
+ dev->req->int_reg = 0;
+ status = r592_read_reg(dev, R592_STATUS);
+
+ if (status & R592_STATUS_P_CMDNACK)
+ dev->req->int_reg |= MEMSTICK_INT_CMDNAK;
+ if (status & R592_STATUS_P_BREQ)
+ dev->req->int_reg |= MEMSTICK_INT_BREQ;
+ if (status & R592_STATUS_P_INTERR)
+ dev->req->int_reg |= MEMSTICK_INT_ERR;
+ if (status & R592_STATUS_P_CED)
+ dev->req->int_reg |= MEMSTICK_INT_CED;
+ }
+
+ if (error)
+ dbg("FIFO read error");
+out:
+ dev->req->error = error;
+ r592_clear_reg_mask(dev, R592_REG_MSC, R592_REG_MSC_LED);
+ return;
+}
+
+/* Main request processing thread */
+static int r592_process_thread(void *data)
+{
+ int error;
+ struct r592_device *dev = (struct r592_device *)data;
+ unsigned long flags;
+
+ while (!kthread_should_stop()) {
+ spin_lock_irqsave(&dev->io_thread_lock, flags);
+ set_current_state(TASK_INTERRUPTIBLE);
+ error = memstick_next_req(dev->host, &dev->req);
+ spin_unlock_irqrestore(&dev->io_thread_lock, flags);
+
+ if (error) {
+ if (error == -ENXIO || error == -EAGAIN) {
+ dbg_verbose("IO: done IO, sleeping");
+ } else {
+ dbg("IO: unknown error from "
+ "memstick_next_req %d", error);
+ }
+
+ if (kthread_should_stop())
+ set_current_state(TASK_RUNNING);
+
+ schedule();
+ } else {
+ set_current_state(TASK_RUNNING);
+ r592_execute_tpc(dev);
+ }
+ }
+ return 0;
+}
+
+/* Reprogram chip to detect change in card state */
+/* eg, if card is detected, arm it to detect removal, and vice versa */
+static void r592_update_card_detect(struct r592_device *dev)
+{
+ u32 reg = r592_read_reg(dev, R592_REG_MSC);
+ bool card_detected = reg & R592_REG_MSC_PRSNT;
+
+ dbg("update card detect. card state: %s", card_detected ?
+ "present" : "absent");
+
+ reg &= ~((R592_REG_MSC_IRQ_REMOVE | R592_REG_MSC_IRQ_INSERT) << 16);
+
+ if (card_detected)
+ reg |= (R592_REG_MSC_IRQ_REMOVE << 16);
+ else
+ reg |= (R592_REG_MSC_IRQ_INSERT << 16);
+
+ r592_write_reg(dev, R592_REG_MSC, reg);
+}
+
+/* Timer routine that fires 1 second after last card detection event, */
+static void r592_detect_timer(long unsigned int data)
+{
+ struct r592_device *dev = (struct r592_device *)data;
+ r592_update_card_detect(dev);
+ memstick_detect_change(dev->host);
+}
+
+/* Interrupt handler */
+static irqreturn_t r592_irq(int irq, void *data)
+{
+ struct r592_device *dev = (struct r592_device *)data;
+ irqreturn_t ret = IRQ_NONE;
+ u32 reg;
+ u16 irq_enable, irq_status;
+ unsigned long flags;
+ int error;
+
+ spin_lock_irqsave(&dev->irq_lock, flags);
+
+ reg = r592_read_reg(dev, R592_REG_MSC);
+ irq_enable = reg >> 16;
+ irq_status = reg & 0xFFFF;
+
+ /* Ack the interrupts */
+ reg &= ~irq_status;
+ r592_write_reg(dev, R592_REG_MSC, reg);
+
+ /* Get the IRQ status minus bits that aren't enabled */
+ irq_status &= (irq_enable);
+
+ /* Due to limitation of memstick core, we don't look at bits that
+ indicate that card was removed/inserted and/or present */
+ if (irq_status & (R592_REG_MSC_IRQ_INSERT | R592_REG_MSC_IRQ_REMOVE)) {
+
+ bool card_was_added = irq_status & R592_REG_MSC_IRQ_INSERT;
+ ret = IRQ_HANDLED;
+
+ message("IRQ: card %s", card_was_added ? "added" : "removed");
+
+ mod_timer(&dev->detect_timer,
+ jiffies + msecs_to_jiffies(card_was_added ? 500 : 50));
+ }
+
+ if (irq_status &
+ (R592_REG_MSC_FIFO_DMA_DONE | R592_REG_MSC_FIFO_DMA_ERR)) {
+ ret = IRQ_HANDLED;
+
+ if (irq_status & R592_REG_MSC_FIFO_DMA_ERR) {
+ message("IRQ: DMA error");
+ error = -EIO;
+ } else {
+ dbg_verbose("IRQ: dma done");
+ error = 0;
+ }
+
+ r592_stop_dma(dev, error);
+ complete(&dev->dma_done);
+ }
+
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+ return ret;
+}
+
+/* External inteface: set settings */
+static int r592_set_param(struct memstick_host *host,
+ enum memstick_param param, int value)
+{
+ struct r592_device *dev = memstick_priv(host);
+
+ switch (param) {
+ case MEMSTICK_POWER:
+ switch (value) {
+ case MEMSTICK_POWER_ON:
+ return r592_enable_device(dev, true);
+ case MEMSTICK_POWER_OFF:
+ return r592_enable_device(dev, false);
+ default:
+ return -EINVAL;
+ }
+ case MEMSTICK_INTERFACE:
+ switch (value) {
+ case MEMSTICK_SERIAL:
+ return r592_set_mode(dev, 0);
+ case MEMSTICK_PAR4:
+ return r592_set_mode(dev, 1);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+/* External interface: submit requests */
+static void r592_submit_req(struct memstick_host *host)
+{
+ struct r592_device *dev = memstick_priv(host);
+ unsigned long flags;
+
+ if (dev->req)
+ return;
+
+ spin_lock_irqsave(&dev->io_thread_lock, flags);
+ if (wake_up_process(dev->io_thread))
+ dbg_verbose("IO thread woken to process requests");
+ spin_unlock_irqrestore(&dev->io_thread_lock, flags);
+}
+
+static const struct pci_device_id r592_pci_id_tbl[] = {
+
+ { PCI_VDEVICE(RICOH, 0x0592), },
+ { },
+};
+
+/* Main entry */
+static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int error = -ENOMEM;
+ struct memstick_host *host;
+ struct r592_device *dev;
+
+ /* Allocate memory */
+ host = memstick_alloc_host(sizeof(struct r592_device), &pdev->dev);
+ if (!host)
+ goto error1;
+
+ dev = memstick_priv(host);
+ dev->host = host;
+ dev->pci_dev = pdev;
+ pci_set_drvdata(pdev, dev);
+
+ /* pci initialization */
+ error = pci_enable_device(pdev);
+ if (error)
+ goto error2;
+
+ pci_set_master(pdev);
+ error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (error)
+ goto error3;
+
+ error = pci_request_regions(pdev, DRV_NAME);
+ if (error)
+ goto error3;
+
+ dev->mmio = pci_ioremap_bar(pdev, 0);
+ if (!dev->mmio)
+ goto error4;
+
+ dev->irq = pdev->irq;
+ spin_lock_init(&dev->irq_lock);
+ spin_lock_init(&dev->io_thread_lock);
+ init_completion(&dev->dma_done);
+ INIT_KFIFO(dev->pio_fifo);
+ setup_timer(&dev->detect_timer,
+ r592_detect_timer, (long unsigned int)dev);
+
+ /* Host initialization */
+ host->caps = MEMSTICK_CAP_PAR4;
+ host->request = r592_submit_req;
+ host->set_param = r592_set_param;
+ r592_check_dma(dev);
+
+ dev->io_thread = kthread_run(r592_process_thread, dev, "r592_io");
+ if (IS_ERR(dev->io_thread)) {
+ error = PTR_ERR(dev->io_thread);
+ goto error5;
+ }
+
+ /* This is just a precation, so don't fail */
+ dev->dummy_dma_page = pci_alloc_consistent(pdev, PAGE_SIZE,
+ &dev->dummy_dma_page_physical_address);
+ r592_stop_dma(dev , 0);
+
+ if (request_irq(dev->irq, &r592_irq, IRQF_SHARED,
+ DRV_NAME, dev))
+ goto error6;
+
+ r592_update_card_detect(dev);
+ if (memstick_add_host(host))
+ goto error7;
+
+ message("driver succesfully loaded");
+ return 0;
+error7:
+ free_irq(dev->irq, dev);
+error6:
+ if (dev->dummy_dma_page)
+ pci_free_consistent(pdev, PAGE_SIZE, dev->dummy_dma_page,
+ dev->dummy_dma_page_physical_address);
+
+ kthread_stop(dev->io_thread);
+error5:
+ iounmap(dev->mmio);
+error4:
+ pci_release_regions(pdev);
+error3:
+ pci_disable_device(pdev);
+error2:
+ memstick_free_host(host);
+error1:
+ return error;
+}
+
+static void r592_remove(struct pci_dev *pdev)
+{
+ int error = 0;
+ struct r592_device *dev = pci_get_drvdata(pdev);
+
+ /* Stop the processing thread.
+ That ensures that we won't take any more requests */
+ kthread_stop(dev->io_thread);
+
+ r592_enable_device(dev, false);
+
+ while (!error && dev->req) {
+ dev->req->error = -ETIME;
+ error = memstick_next_req(dev->host, &dev->req);
+ }
+ memstick_remove_host(dev->host);
+
+ free_irq(dev->irq, dev);
+ iounmap(dev->mmio);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ memstick_free_host(dev->host);
+
+ if (dev->dummy_dma_page)
+ pci_free_consistent(pdev, PAGE_SIZE, dev->dummy_dma_page,
+ dev->dummy_dma_page_physical_address);
+}
+
+#ifdef CONFIG_PM
+static int r592_suspend(struct device *core_dev)
+{
+ struct pci_dev *pdev = to_pci_dev(core_dev);
+ struct r592_device *dev = pci_get_drvdata(pdev);
+
+ r592_clear_interrupts(dev);
+ memstick_suspend_host(dev->host);
+ del_timer_sync(&dev->detect_timer);
+ return 0;
+}
+
+static int r592_resume(struct device *core_dev)
+{
+ struct pci_dev *pdev = to_pci_dev(core_dev);
+ struct r592_device *dev = pci_get_drvdata(pdev);
+
+ r592_clear_interrupts(dev);
+ r592_enable_device(dev, false);
+ memstick_resume_host(dev->host);
+ r592_update_card_detect(dev);
+ return 0;
+}
+
+SIMPLE_DEV_PM_OPS(r592_pm_ops, r592_suspend, r592_resume);
+#endif
+
+MODULE_DEVICE_TABLE(pci, r592_pci_id_tbl);
+
+static struct pci_driver r852_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = r592_pci_id_tbl,
+ .probe = r592_probe,
+ .remove = r592_remove,
+#ifdef CONFIG_PM
+ .driver.pm = &r592_pm_ops,
+#endif
+};
+
+static __init int r592_module_init(void)
+{
+ return pci_register_driver(&r852_pci_driver);
+}
+
+static void __exit r592_module_exit(void)
+{
+ pci_unregister_driver(&r852_pci_driver);
+}
+
+module_init(r592_module_init);
+module_exit(r592_module_exit);
+
+module_param(enable_dma, bool, S_IRUGO);
+MODULE_PARM_DESC(enable_dma, "Enable usage of the DMA (default)");
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0-3)");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Ricoh R5C592 Memstick/Memstick PRO card reader driver");
diff --git a/drivers/memstick/host/r592.h b/drivers/memstick/host/r592.h
new file mode 100644
index 000000000000..eee264e6028f
--- /dev/null
+++ b/drivers/memstick/host/r592.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2010 - Maxim Levitsky
+ * driver for Ricoh memstick readers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef R592_H
+
+#include <linux/memstick.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/kfifo.h>
+#include <linux/ctype.h>
+
+/* write to this reg (number,len) triggers TPC execution */
+#define R592_TPC_EXEC 0x00
+#define R592_TPC_EXEC_LEN_SHIFT 16 /* Bits 16..25 are TPC len */
+#define R592_TPC_EXEC_BIG_FIFO (1 << 26) /* If bit 26 is set, large fifo is used (reg 48) */
+#define R592_TPC_EXEC_TPC_SHIFT 28 /* Bits 28..31 are the TPC number */
+
+
+/* Window for small TPC fifo (big endian)*/
+/* reads and writes always are done in 8 byte chunks */
+/* Not used in driver, because large fifo does better job */
+#define R592_SFIFO 0x08
+
+
+/* Status register (ms int, small fifo, IO)*/
+#define R592_STATUS 0x10
+ /* Parallel INT bits */
+#define R592_STATUS_P_CMDNACK (1 << 16) /* INT reg: NACK (parallel mode) */
+#define R592_STATUS_P_BREQ (1 << 17) /* INT reg: card ready (parallel mode)*/
+#define R592_STATUS_P_INTERR (1 << 18) /* INT reg: int error (parallel mode)*/
+#define R592_STATUS_P_CED (1 << 19) /* INT reg: command done (parallel mode) */
+
+ /* Fifo status */
+#define R592_STATUS_SFIFO_FULL (1 << 20) /* Small Fifo almost full (last chunk is written) */
+#define R592_STATUS_SFIFO_EMPTY (1 << 21) /* Small Fifo empty */
+
+ /* Error detection via CRC */
+#define R592_STATUS_SEND_ERR (1 << 24) /* Send failed */
+#define R592_STATUS_RECV_ERR (1 << 25) /* Recieve failed */
+
+ /* Card state */
+#define R592_STATUS_RDY (1 << 28) /* RDY signal recieved */
+#define R592_STATUS_CED (1 << 29) /* INT: Command done (serial mode)*/
+#define R592_STATUS_SFIFO_INPUT (1 << 30) /* Small fifo recieved data*/
+
+#define R592_SFIFO_SIZE 32 /* total size of small fifo is 32 bytes */
+#define R592_SFIFO_PACKET 8 /* packet size of small fifo */
+
+/* IO control */
+#define R592_IO 0x18
+#define R592_IO_16 (1 << 16) /* Set by default, can be cleared */
+#define R592_IO_18 (1 << 18) /* Set by default, can be cleared */
+#define R592_IO_SERIAL1 (1 << 20) /* Set by default, can be cleared, (cleared on parallel) */
+#define R592_IO_22 (1 << 22) /* Set by default, can be cleared */
+#define R592_IO_DIRECTION (1 << 24) /* TPC direction (1 write 0 read) */
+#define R592_IO_26 (1 << 26) /* Set by default, can be cleared */
+#define R592_IO_SERIAL2 (1 << 30) /* Set by default, can be cleared (cleared on parallel), serial doesn't work if unset */
+#define R592_IO_RESET (1 << 31) /* Reset, sets defaults*/
+
+
+/* Turns hardware on/off */
+#define R592_POWER 0x20 /* bits 0-7 writeable */
+#define R592_POWER_0 (1 << 0) /* set on start, cleared on stop - must be set*/
+#define R592_POWER_1 (1 << 1) /* set on start, cleared on stop - must be set*/
+#define R592_POWER_3 (1 << 3) /* must be clear */
+#define R592_POWER_20 (1 << 5) /* set before switch to parallel */
+
+/* IO mode*/
+#define R592_IO_MODE 0x24
+#define R592_IO_MODE_SERIAL 1
+#define R592_IO_MODE_PARALLEL 3
+
+
+/* IRQ,card detection,large fifo (first word irq status, second enable) */
+/* IRQs are ACKed by clearing the bits */
+#define R592_REG_MSC 0x28
+#define R592_REG_MSC_PRSNT (1 << 1) /* card present (only status)*/
+#define R592_REG_MSC_IRQ_INSERT (1 << 8) /* detect insert / card insered */
+#define R592_REG_MSC_IRQ_REMOVE (1 << 9) /* detect removal / card removed */
+#define R592_REG_MSC_FIFO_EMPTY (1 << 10) /* fifo is empty */
+#define R592_REG_MSC_FIFO_DMA_DONE (1 << 11) /* dma enable / dma done */
+
+#define R592_REG_MSC_FIFO_USER_ORN (1 << 12) /* set if software reads empty fifo (if R592_REG_MSC_FIFO_EMPTY is set) */
+#define R592_REG_MSC_FIFO_MISMATH (1 << 13) /* set if amount of data in fifo doesn't match amount in TPC */
+#define R592_REG_MSC_FIFO_DMA_ERR (1 << 14) /* IO failure */
+#define R592_REG_MSC_LED (1 << 15) /* clear to turn led off (only status)*/
+
+#define DMA_IRQ_ACK_MASK \
+ (R592_REG_MSC_FIFO_DMA_DONE | R592_REG_MSC_FIFO_DMA_ERR)
+
+#define DMA_IRQ_EN_MASK (DMA_IRQ_ACK_MASK << 16)
+
+#define IRQ_ALL_ACK_MASK 0x00007F00
+#define IRQ_ALL_EN_MASK (IRQ_ALL_ACK_MASK << 16)
+
+/* DMA address for large FIFO read/writes*/
+#define R592_FIFO_DMA 0x2C
+
+/* PIO access to large FIFO (512 bytes) (big endian)*/
+#define R592_FIFO_PIO 0x30
+#define R592_LFIFO_SIZE 512 /* large fifo size */
+
+
+/* large FIFO DMA settings */
+#define R592_FIFO_DMA_SETTINGS 0x34
+#define R592_FIFO_DMA_SETTINGS_EN (1 << 0) /* DMA enabled */
+#define R592_FIFO_DMA_SETTINGS_DIR (1 << 1) /* Dma direction (1 read, 0 write) */
+#define R592_FIFO_DMA_SETTINGS_CAP (1 << 24) /* Dma is aviable */
+
+/* Maybe just an delay */
+/* Bits 17..19 are just number */
+/* bit 16 is set, then bit 20 is waited */
+/* time to wait is about 50 spins * 2 ^ (bits 17..19) */
+/* seems to be possible just to ignore */
+/* Probably debug register */
+#define R592_REG38 0x38
+#define R592_REG38_CHANGE (1 << 16) /* Start bit */
+#define R592_REG38_DONE (1 << 20) /* HW set this after the delay */
+#define R592_REG38_SHIFT 17
+
+/* Debug register, written (0xABCDEF00) when error happens - not used*/
+#define R592_REG_3C 0x3C
+
+struct r592_device {
+ struct pci_dev *pci_dev;
+ struct memstick_host *host; /* host backpointer */
+ struct memstick_request *req; /* current request */
+
+ /* Registers, IRQ */
+ void __iomem *mmio;
+ int irq;
+ spinlock_t irq_lock;
+ spinlock_t io_thread_lock;
+ struct timer_list detect_timer;
+
+ struct task_struct *io_thread;
+ bool parallel_mode;
+
+ DECLARE_KFIFO(pio_fifo, u8, sizeof(u32));
+
+ /* DMA area */
+ int dma_capable;
+ int dma_error;
+ struct completion dma_done;
+ void *dummy_dma_page;
+ dma_addr_t dummy_dma_page_physical_address;
+
+};
+
+#define DRV_NAME "r592"
+
+
+#define message(format, ...) \
+ printk(KERN_INFO DRV_NAME ": " format "\n", ## __VA_ARGS__)
+
+#define __dbg(level, format, ...) \
+ do { \
+ if (debug >= level) \
+ printk(KERN_DEBUG DRV_NAME \
+ ": " format "\n", ## __VA_ARGS__); \
+ } while (0)
+
+
+#define dbg(format, ...) __dbg(1, format, ## __VA_ARGS__)
+#define dbg_verbose(format, ...) __dbg(2, format, ## __VA_ARGS__)
+#define dbg_reg(format, ...) __dbg(3, format, ## __VA_ARGS__)
+
+#endif
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 9c511c1604a5..011cb6ce861b 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -416,7 +416,6 @@ static int __devinit device_irq_init(struct pm860x_chip *chip,
: chip->companion;
unsigned char status_buf[INT_STATUS_NUM];
unsigned long flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT;
- struct irq_desc *desc;
int i, data, mask, ret = -EINVAL;
int __irq;
@@ -468,19 +467,17 @@ static int __devinit device_irq_init(struct pm860x_chip *chip,
if (!chip->core_irq)
goto out;
- desc = irq_to_desc(chip->core_irq);
-
/* register IRQ by genirq */
for (i = 0; i < ARRAY_SIZE(pm860x_irqs); i++) {
__irq = i + chip->irq_base;
- set_irq_chip_data(__irq, chip);
- set_irq_chip_and_handler(__irq, &pm860x_irq_chip,
+ irq_set_chip_data(__irq, chip);
+ irq_set_chip_and_handler(__irq, &pm860x_irq_chip,
handle_edge_irq);
- set_irq_nested_thread(__irq, 1);
+ irq_set_nested_thread(__irq, 1);
#ifdef CONFIG_ARM
set_irq_flags(__irq, IRQF_VALID);
#else
- set_irq_noprobe(__irq);
+ irq_set_noprobe(__irq);
#endif
}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index a9a1af49281e..e986f91fff9c 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -133,6 +133,7 @@ config TPS6105X
tristate "TPS61050/61052 Boost Converters"
depends on I2C
select REGULATOR
+ select MFD_CORE
select REGULATOR_FIXED_VOLTAGE
help
This option enables a driver for the TP61050/TPS61052
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 47f5709f3828..ef489f253402 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -63,7 +63,7 @@ obj-$(CONFIG_UCB1400_CORE) += ucb1400_core.o
obj-$(CONFIG_PMIC_DA903X) += da903x.o
max8925-objs := max8925-core.o max8925-i2c.o
obj-$(CONFIG_MFD_MAX8925) += max8925.o
-obj-$(CONFIG_MFD_MAX8997) += max8997.o
+obj-$(CONFIG_MFD_MAX8997) += max8997.o max8997-irq.o
obj-$(CONFIG_MFD_MAX8998) += max8998.o max8998-irq.o
pcf50633-objs := pcf50633-core.o pcf50633-irq.o
diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c
index c12d04285226..ff86acf3e6bd 100644
--- a/drivers/mfd/ab3550-core.c
+++ b/drivers/mfd/ab3550-core.c
@@ -668,7 +668,7 @@ static int ab3550_startup_irq_enabled(struct device *dev, unsigned int irq)
struct ab3550_platform_data *plf_data;
bool val;
- ab = get_irq_chip_data(irq);
+ ab = irq_get_chip_data(irq);
plf_data = ab->i2c_client[0]->dev.platform_data;
irq -= plf_data->irq.base;
val = ((ab->startup_events[irq / 8] & BIT(irq % 8)) != 0);
@@ -1296,14 +1296,14 @@ static int __init ab3550_probe(struct i2c_client *client,
unsigned int irq;
irq = ab3550_plf_data->irq.base + i;
- set_irq_chip_data(irq, ab);
- set_irq_chip_and_handler(irq, &ab3550_irq_chip,
- handle_simple_irq);
- set_irq_nested_thread(irq, 1);
+ irq_set_chip_data(irq, ab);
+ irq_set_chip_and_handler(irq, &ab3550_irq_chip,
+ handle_simple_irq);
+ irq_set_nested_thread(irq, 1);
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID);
#else
- set_irq_noprobe(irq);
+ irq_set_noprobe(irq);
#endif
}
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 6e185b272d00..62e33e2258d4 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -334,14 +334,14 @@ static int ab8500_irq_init(struct ab8500 *ab8500)
int irq;
for (irq = base; irq < base + AB8500_NR_IRQS; irq++) {
- set_irq_chip_data(irq, ab8500);
- set_irq_chip_and_handler(irq, &ab8500_irq_chip,
+ irq_set_chip_data(irq, ab8500);
+ irq_set_chip_and_handler(irq, &ab8500_irq_chip,
handle_simple_irq);
- set_irq_nested_thread(irq, 1);
+ irq_set_nested_thread(irq, 1);
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID);
#else
- set_irq_noprobe(irq);
+ irq_set_noprobe(irq);
#endif
}
@@ -357,8 +357,8 @@ static void ab8500_irq_remove(struct ab8500 *ab8500)
#ifdef CONFIG_ARM
set_irq_flags(irq, 0);
#endif
- set_irq_chip_and_handler(irq, NULL, NULL);
- set_irq_chip_data(irq, NULL);
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
}
}
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 0241f08fc00d..d4a851c6b5bf 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -139,13 +139,12 @@ static void asic3_irq_flip_edge(struct asic3 *asic,
static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
{
+ struct asic3 *asic = irq_desc_get_handler_data(desc);
+ struct irq_data *data = irq_desc_get_irq_data(desc);
int iter, i;
unsigned long flags;
- struct asic3 *asic;
-
- desc->irq_data.chip->irq_ack(&desc->irq_data);
- asic = get_irq_data(irq);
+ data->chip->irq_ack(irq_data);
for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) {
u32 status;
@@ -188,8 +187,7 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
irqnr = asic->irq_base +
(ASIC3_GPIOS_PER_BANK * bank)
+ i;
- desc = irq_to_desc(irqnr);
- desc->handle_irq(irqnr, desc);
+ generic_handle_irq(irqnr);
if (asic->irq_bothedge[bank] & bit)
asic3_irq_flip_edge(asic, base,
bit);
@@ -200,11 +198,8 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
/* Handle remaining IRQs in the status register */
for (i = ASIC3_NUM_GPIOS; i < ASIC3_NR_IRQS; i++) {
/* They start at bit 4 and go up */
- if (status & (1 << (i - ASIC3_NUM_GPIOS + 4))) {
- desc = irq_to_desc(asic->irq_base + i);
- desc->handle_irq(asic->irq_base + i,
- desc);
- }
+ if (status & (1 << (i - ASIC3_NUM_GPIOS + 4)))
+ generic_handle_irq(asic->irq_base + i);
}
}
@@ -393,21 +388,21 @@ static int __init asic3_irq_probe(struct platform_device *pdev)
for (irq = irq_base; irq < irq_base + ASIC3_NR_IRQS; irq++) {
if (irq < asic->irq_base + ASIC3_NUM_GPIOS)
- set_irq_chip(irq, &asic3_gpio_irq_chip);
+ irq_set_chip(irq, &asic3_gpio_irq_chip);
else
- set_irq_chip(irq, &asic3_irq_chip);
+ irq_set_chip(irq, &asic3_irq_chip);
- set_irq_chip_data(irq, asic);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_data(irq, asic);
+ irq_set_handler(irq, handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
asic3_write_register(asic, ASIC3_OFFSET(INTR, INT_MASK),
ASIC3_INTMASK_GINTMASK);
- set_irq_chained_handler(asic->irq_nr, asic3_irq_demux);
- set_irq_type(asic->irq_nr, IRQ_TYPE_EDGE_RISING);
- set_irq_data(asic->irq_nr, asic);
+ irq_set_chained_handler(asic->irq_nr, asic3_irq_demux);
+ irq_set_irq_type(asic->irq_nr, IRQ_TYPE_EDGE_RISING);
+ irq_set_handler_data(asic->irq_nr, asic);
return 0;
}
@@ -421,11 +416,10 @@ static void asic3_irq_remove(struct platform_device *pdev)
for (irq = irq_base; irq < irq_base + ASIC3_NR_IRQS; irq++) {
set_irq_flags(irq, 0);
- set_irq_handler(irq, NULL);
- set_irq_chip(irq, NULL);
- set_irq_chip_data(irq, NULL);
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
}
- set_irq_chained_handler(asic->irq_nr, NULL);
+ irq_set_chained_handler(asic->irq_nr, NULL);
}
/* GPIOs */
diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c
index 886a06871065..155fa0407882 100644
--- a/drivers/mfd/cs5535-mfd.c
+++ b/drivers/mfd/cs5535-mfd.c
@@ -27,6 +27,7 @@
#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <asm/olpc.h>
#define DRV_NAME "cs5535-mfd"
@@ -111,6 +112,20 @@ static __devinitdata struct mfd_cell cs5535_mfd_cells[] = {
},
};
+#ifdef CONFIG_OLPC
+static void __devinit cs5535_clone_olpc_cells(void)
+{
+ const char *acpi_clones[] = { "olpc-xo1-pm-acpi", "olpc-xo1-sci-acpi" };
+
+ if (!machine_is_olpc())
+ return;
+
+ mfd_clone_cell("cs5535-acpi", acpi_clones, ARRAY_SIZE(acpi_clones));
+}
+#else
+static void cs5535_clone_olpc_cells(void) { }
+#endif
+
static int __devinit cs5535_mfd_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -139,6 +154,7 @@ static int __devinit cs5535_mfd_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "MFD add devices failed: %d\n", err);
goto err_disable;
}
+ cs5535_clone_olpc_cells();
dev_info(&pdev->dev, "%zu devices registered.\n",
ARRAY_SIZE(cs5535_mfd_cells));
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index 9e2d8dd5f9e5..f2f4029e21a0 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -162,6 +162,7 @@ static void pcap_unmask_irq(struct irq_data *d)
static struct irq_chip pcap_irq_chip = {
.name = "pcap",
+ .irq_disable = pcap_mask_irq,
.irq_mask = pcap_mask_irq,
.irq_unmask = pcap_unmask_irq,
};
@@ -196,17 +197,8 @@ static void pcap_isr_work(struct work_struct *work)
local_irq_disable();
service = isr & ~msr;
for (irq = pcap->irq_base; service; service >>= 1, irq++) {
- if (service & 1) {
- struct irq_desc *desc = irq_to_desc(irq);
-
- if (WARN(!desc, "Invalid PCAP IRQ %d\n", irq))
- break;
-
- if (desc->status & IRQ_DISABLED)
- note_interrupt(irq, desc, IRQ_NONE);
- else
- desc->handle_irq(irq, desc);
- }
+ if (service & 1)
+ generic_handle_irq(irq);
}
local_irq_enable();
ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
@@ -215,7 +207,7 @@ static void pcap_isr_work(struct work_struct *work)
static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
{
- struct pcap_chip *pcap = get_irq_data(irq);
+ struct pcap_chip *pcap = irq_get_handler_data(irq);
desc->irq_data.chip->irq_ack(&desc->irq_data);
queue_work(pcap->workqueue, &pcap->isr_work);
@@ -419,7 +411,7 @@ static int __devexit ezx_pcap_remove(struct spi_device *spi)
/* cleanup irqchip */
for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++)
- set_irq_chip_and_handler(i, NULL, NULL);
+ irq_set_chip_and_handler(i, NULL, NULL);
destroy_workqueue(pcap->workqueue);
@@ -476,12 +468,12 @@ static int __devinit ezx_pcap_probe(struct spi_device *spi)
/* setup irq chip */
for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) {
- set_irq_chip_and_handler(i, &pcap_irq_chip, handle_simple_irq);
- set_irq_chip_data(i, pcap);
+ irq_set_chip_and_handler(i, &pcap_irq_chip, handle_simple_irq);
+ irq_set_chip_data(i, pcap);
#ifdef CONFIG_ARM
set_irq_flags(i, IRQF_VALID);
#else
- set_irq_noprobe(i);
+ irq_set_noprobe(i);
#endif
}
@@ -490,10 +482,10 @@ static int __devinit ezx_pcap_probe(struct spi_device *spi)
ezx_pcap_write(pcap, PCAP_REG_ISR, PCAP_CLEAR_INTERRUPT_REGISTER);
pcap->msr = PCAP_MASK_ALL_INTERRUPT;
- set_irq_type(spi->irq, IRQ_TYPE_EDGE_RISING);
- set_irq_data(spi->irq, pcap);
- set_irq_chained_handler(spi->irq, pcap_irq_handler);
- set_irq_wake(spi->irq, 1);
+ irq_set_irq_type(spi->irq, IRQ_TYPE_EDGE_RISING);
+ irq_set_handler_data(spi->irq, pcap);
+ irq_set_chained_handler(spi->irq, pcap_irq_handler);
+ irq_set_irq_wake(spi->irq, 1);
/* ADC */
adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ?
@@ -522,7 +514,7 @@ remove_subdevs:
free_irq(adc_irq, pcap);
free_irqchip:
for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++)
- set_irq_chip_and_handler(i, NULL, NULL);
+ irq_set_chip_and_handler(i, NULL, NULL);
/* destroy_workqueue: */
destroy_workqueue(pcap->workqueue);
free_pcap:
diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c
index d00b6d1a69e5..bbaec0ccba8f 100644
--- a/drivers/mfd/htc-egpio.c
+++ b/drivers/mfd/htc-egpio.c
@@ -100,7 +100,7 @@ static struct irq_chip egpio_muxed_chip = {
static void egpio_handler(unsigned int irq, struct irq_desc *desc)
{
- struct egpio_info *ei = get_irq_data(irq);
+ struct egpio_info *ei = irq_desc_get_handler_data(desc);
int irqpin;
/* Read current pins. */
@@ -113,9 +113,7 @@ static void egpio_handler(unsigned int irq, struct irq_desc *desc)
for_each_set_bit(irqpin, &readval, ei->nirqs) {
/* Run irq handler */
pr_debug("got IRQ %d\n", irqpin);
- irq = ei->irq_start + irqpin;
- desc = irq_to_desc(irq);
- desc->handle_irq(irq, desc);
+ generic_handle_irq(ei->irq_start + irqpin);
}
}
@@ -346,14 +344,14 @@ static int __init egpio_probe(struct platform_device *pdev)
ei->ack_write = 0;
irq_end = ei->irq_start + ei->nirqs;
for (irq = ei->irq_start; irq < irq_end; irq++) {
- set_irq_chip(irq, &egpio_muxed_chip);
- set_irq_chip_data(irq, ei);
- set_irq_handler(irq, handle_simple_irq);
+ irq_set_chip_and_handler(irq, &egpio_muxed_chip,
+ handle_simple_irq);
+ irq_set_chip_data(irq, ei);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
- set_irq_type(ei->chained_irq, IRQ_TYPE_EDGE_RISING);
- set_irq_data(ei->chained_irq, ei);
- set_irq_chained_handler(ei->chained_irq, egpio_handler);
+ irq_set_irq_type(ei->chained_irq, IRQ_TYPE_EDGE_RISING);
+ irq_set_handler_data(ei->chained_irq, ei);
+ irq_set_chained_handler(ei->chained_irq, egpio_handler);
ack_irqs(ei);
device_init_wakeup(&pdev->dev, 1);
@@ -375,11 +373,10 @@ static int __exit egpio_remove(struct platform_device *pdev)
if (ei->chained_irq) {
irq_end = ei->irq_start + ei->nirqs;
for (irq = ei->irq_start; irq < irq_end; irq++) {
- set_irq_chip(irq, NULL);
- set_irq_handler(irq, NULL);
+ irq_set_chip_and_handler(irq, NULL, NULL);
set_irq_flags(irq, 0);
}
- set_irq_chained_handler(ei->chained_irq, NULL);
+ irq_set_chained_handler(ei->chained_irq, NULL);
device_init_wakeup(&pdev->dev, 0);
}
iounmap(ei->base_addr);
diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c
index 296ad1562f69..d55065cc324c 100644
--- a/drivers/mfd/htc-i2cpld.c
+++ b/drivers/mfd/htc-i2cpld.c
@@ -58,6 +58,7 @@ struct htcpld_chip {
uint irq_start;
int nirqs;
+ unsigned int flow_type;
/*
* Work structure to allow for setting values outside of any
* possible interrupt context
@@ -97,12 +98,7 @@ static void htcpld_unmask(struct irq_data *data)
static int htcpld_set_type(struct irq_data *data, unsigned int flags)
{
- struct irq_desc *d = irq_to_desc(data->irq);
-
- if (!d) {
- pr_err("HTCPLD invalid IRQ: %d\n", data->irq);
- return -EINVAL;
- }
+ struct htcpld_chip *chip = irq_data_get_irq_chip_data(data);
if (flags & ~IRQ_TYPE_SENSE_MASK)
return -EINVAL;
@@ -111,9 +107,7 @@ static int htcpld_set_type(struct irq_data *data, unsigned int flags)
if (flags & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH))
return -EINVAL;
- d->status &= ~IRQ_TYPE_SENSE_MASK;
- d->status |= flags;
-
+ chip->flow_type = flags;
return 0;
}
@@ -135,7 +129,6 @@ static irqreturn_t htcpld_handler(int irq, void *dev)
unsigned int i;
unsigned long flags;
int irqpin;
- struct irq_desc *desc;
if (!htcpld) {
pr_debug("htcpld is null in ISR\n");
@@ -195,23 +188,19 @@ static irqreturn_t htcpld_handler(int irq, void *dev)
* associated interrupts.
*/
for (irqpin = 0; irqpin < chip->nirqs; irqpin++) {
- unsigned oldb, newb;
- int flags;
+ unsigned oldb, newb, type = chip->flow_type;
irq = chip->irq_start + irqpin;
- desc = irq_to_desc(irq);
- flags = desc->status;
/* Run the IRQ handler, but only if the bit value
* changed, and the proper flags are set */
oldb = (old_val >> irqpin) & 1;
newb = (uval >> irqpin) & 1;
- if ((!oldb && newb && (flags & IRQ_TYPE_EDGE_RISING)) ||
- (oldb && !newb &&
- (flags & IRQ_TYPE_EDGE_FALLING))) {
+ if ((!oldb && newb && (type & IRQ_TYPE_EDGE_RISING)) ||
+ (oldb && !newb && (type & IRQ_TYPE_EDGE_FALLING))) {
pr_debug("fire IRQ %d\n", irqpin);
- desc->handle_irq(irq, desc);
+ generic_handle_irq(irq);
}
}
}
@@ -359,13 +348,13 @@ static int __devinit htcpld_setup_chip_irq(
/* Setup irq handlers */
irq_end = chip->irq_start + chip->nirqs;
for (irq = chip->irq_start; irq < irq_end; irq++) {
- set_irq_chip(irq, &htcpld_muxed_chip);
- set_irq_chip_data(irq, chip);
- set_irq_handler(irq, handle_simple_irq);
+ irq_set_chip_and_handler(irq, &htcpld_muxed_chip,
+ handle_simple_irq);
+ irq_set_chip_data(irq, chip);
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
#else
- set_irq_probe(irq);
+ irq_set_probe(irq);
#endif
}
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index aa518b9beaf5..a0bd0cf05af3 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -112,7 +112,7 @@ static struct irq_chip jz4740_adc_irq_chip = {
static void jz4740_adc_irq_demux(unsigned int irq, struct irq_desc *desc)
{
- struct jz4740_adc *adc = get_irq_desc_data(desc);
+ struct jz4740_adc *adc = irq_desc_get_handler_data(desc);
uint8_t status;
unsigned int i;
@@ -310,13 +310,13 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, adc);
for (irq = adc->irq_base; irq < adc->irq_base + 5; ++irq) {
- set_irq_chip_data(irq, adc);
- set_irq_chip_and_handler(irq, &jz4740_adc_irq_chip,
- handle_level_irq);
+ irq_set_chip_data(irq, adc);
+ irq_set_chip_and_handler(irq, &jz4740_adc_irq_chip,
+ handle_level_irq);
}
- set_irq_data(adc->irq, adc);
- set_irq_chained_handler(adc->irq, jz4740_adc_irq_demux);
+ irq_set_handler_data(adc->irq, adc);
+ irq_set_chained_handler(adc->irq, jz4740_adc_irq_demux);
writeb(0x00, adc->base + JZ_REG_ADC_ENABLE);
writeb(0xff, adc->base + JZ_REG_ADC_CTRL);
@@ -347,8 +347,8 @@ static int __devexit jz4740_adc_remove(struct platform_device *pdev)
mfd_remove_devices(&pdev->dev);
- set_irq_data(adc->irq, NULL);
- set_irq_chained_handler(adc->irq, NULL);
+ irq_set_handler_data(adc->irq, NULL);
+ irq_set_chained_handler(adc->irq, NULL);
iounmap(adc->base);
release_mem_region(adc->mem->start, resource_size(adc->mem));
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 0e998dc4e7d8..58cc5fdde016 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -517,7 +517,6 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq,
struct max8925_platform_data *pdata)
{
unsigned long flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT;
- struct irq_desc *desc;
int i, ret;
int __irq;
@@ -544,19 +543,18 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq,
mutex_init(&chip->irq_lock);
chip->core_irq = irq;
chip->irq_base = pdata->irq_base;
- desc = irq_to_desc(chip->core_irq);
/* register with genirq */
for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) {
__irq = i + chip->irq_base;
- set_irq_chip_data(__irq, chip);
- set_irq_chip_and_handler(__irq, &max8925_irq_chip,
+ irq_set_chip_data(__irq, chip);
+ irq_set_chip_and_handler(__irq, &max8925_irq_chip,
handle_edge_irq);
- set_irq_nested_thread(__irq, 1);
+ irq_set_nested_thread(__irq, 1);
#ifdef CONFIG_ARM
set_irq_flags(__irq, IRQF_VALID);
#else
- set_irq_noprobe(__irq);
+ irq_set_noprobe(__irq);
#endif
}
if (!irq) {
diff --git a/drivers/mfd/max8997-irq.c b/drivers/mfd/max8997-irq.c
new file mode 100644
index 000000000000..638bf7e4d3b3
--- /dev/null
+++ b/drivers/mfd/max8997-irq.c
@@ -0,0 +1,377 @@
+/*
+ * max8997-irq.c - Interrupt controller support for MAX8997
+ *
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This driver is based on max8998-irq.c
+ */
+
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/max8997.h>
+#include <linux/mfd/max8997-private.h>
+
+static const u8 max8997_mask_reg[] = {
+ [PMIC_INT1] = MAX8997_REG_INT1MSK,
+ [PMIC_INT2] = MAX8997_REG_INT2MSK,
+ [PMIC_INT3] = MAX8997_REG_INT3MSK,
+ [PMIC_INT4] = MAX8997_REG_INT4MSK,
+ [FUEL_GAUGE] = MAX8997_REG_INVALID,
+ [MUIC_INT1] = MAX8997_MUIC_REG_INTMASK1,
+ [MUIC_INT2] = MAX8997_MUIC_REG_INTMASK2,
+ [MUIC_INT3] = MAX8997_MUIC_REG_INTMASK3,
+ [GPIO_LOW] = MAX8997_REG_INVALID,
+ [GPIO_HI] = MAX8997_REG_INVALID,
+ [FLASH_STATUS] = MAX8997_REG_INVALID,
+};
+
+static struct i2c_client *get_i2c(struct max8997_dev *max8997,
+ enum max8997_irq_source src)
+{
+ switch (src) {
+ case PMIC_INT1 ... PMIC_INT4:
+ return max8997->i2c;
+ case FUEL_GAUGE:
+ return NULL;
+ case MUIC_INT1 ... MUIC_INT3:
+ return max8997->muic;
+ case GPIO_LOW ... GPIO_HI:
+ return max8997->i2c;
+ case FLASH_STATUS:
+ return max8997->i2c;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+struct max8997_irq_data {
+ int mask;
+ enum max8997_irq_source group;
+};
+
+#define DECLARE_IRQ(idx, _group, _mask) \
+ [(idx)] = { .group = (_group), .mask = (_mask) }
+static const struct max8997_irq_data max8997_irqs[] = {
+ DECLARE_IRQ(MAX8997_PMICIRQ_PWRONR, PMIC_INT1, 1 << 0),
+ DECLARE_IRQ(MAX8997_PMICIRQ_PWRONF, PMIC_INT1, 1 << 1),
+ DECLARE_IRQ(MAX8997_PMICIRQ_PWRON1SEC, PMIC_INT1, 1 << 3),
+ DECLARE_IRQ(MAX8997_PMICIRQ_JIGONR, PMIC_INT1, 1 << 4),
+ DECLARE_IRQ(MAX8997_PMICIRQ_JIGONF, PMIC_INT1, 1 << 5),
+ DECLARE_IRQ(MAX8997_PMICIRQ_LOWBAT2, PMIC_INT1, 1 << 6),
+ DECLARE_IRQ(MAX8997_PMICIRQ_LOWBAT1, PMIC_INT1, 1 << 7),
+
+ DECLARE_IRQ(MAX8997_PMICIRQ_JIGR, PMIC_INT2, 1 << 0),
+ DECLARE_IRQ(MAX8997_PMICIRQ_JIGF, PMIC_INT2, 1 << 1),
+ DECLARE_IRQ(MAX8997_PMICIRQ_MR, PMIC_INT2, 1 << 2),
+ DECLARE_IRQ(MAX8997_PMICIRQ_DVS1OK, PMIC_INT2, 1 << 3),
+ DECLARE_IRQ(MAX8997_PMICIRQ_DVS2OK, PMIC_INT2, 1 << 4),
+ DECLARE_IRQ(MAX8997_PMICIRQ_DVS3OK, PMIC_INT2, 1 << 5),
+ DECLARE_IRQ(MAX8997_PMICIRQ_DVS4OK, PMIC_INT2, 1 << 6),
+
+ DECLARE_IRQ(MAX8997_PMICIRQ_CHGINS, PMIC_INT3, 1 << 0),
+ DECLARE_IRQ(MAX8997_PMICIRQ_CHGRM, PMIC_INT3, 1 << 1),
+ DECLARE_IRQ(MAX8997_PMICIRQ_DCINOVP, PMIC_INT3, 1 << 2),
+ DECLARE_IRQ(MAX8997_PMICIRQ_TOPOFFR, PMIC_INT3, 1 << 3),
+ DECLARE_IRQ(MAX8997_PMICIRQ_CHGRSTF, PMIC_INT3, 1 << 5),
+ DECLARE_IRQ(MAX8997_PMICIRQ_MBCHGTMEXPD, PMIC_INT3, 1 << 7),
+
+ DECLARE_IRQ(MAX8997_PMICIRQ_RTC60S, PMIC_INT4, 1 << 0),
+ DECLARE_IRQ(MAX8997_PMICIRQ_RTCA1, PMIC_INT4, 1 << 1),
+ DECLARE_IRQ(MAX8997_PMICIRQ_RTCA2, PMIC_INT4, 1 << 2),
+ DECLARE_IRQ(MAX8997_PMICIRQ_SMPL_INT, PMIC_INT4, 1 << 3),
+ DECLARE_IRQ(MAX8997_PMICIRQ_RTC1S, PMIC_INT4, 1 << 4),
+ DECLARE_IRQ(MAX8997_PMICIRQ_WTSR, PMIC_INT4, 1 << 5),
+
+ DECLARE_IRQ(MAX8997_MUICIRQ_ADCError, MUIC_INT1, 1 << 2),
+ DECLARE_IRQ(MAX8997_MUICIRQ_ADCLow, MUIC_INT1, 1 << 1),
+ DECLARE_IRQ(MAX8997_MUICIRQ_ADC, MUIC_INT1, 1 << 0),
+
+ DECLARE_IRQ(MAX8997_MUICIRQ_VBVolt, MUIC_INT2, 1 << 4),
+ DECLARE_IRQ(MAX8997_MUICIRQ_DBChg, MUIC_INT2, 1 << 3),
+ DECLARE_IRQ(MAX8997_MUICIRQ_DCDTmr, MUIC_INT2, 1 << 2),
+ DECLARE_IRQ(MAX8997_MUICIRQ_ChgDetRun, MUIC_INT2, 1 << 1),
+ DECLARE_IRQ(MAX8997_MUICIRQ_ChgTyp, MUIC_INT2, 1 << 0),
+
+ DECLARE_IRQ(MAX8997_MUICIRQ_OVP, MUIC_INT3, 1 << 2),
+};
+
+static void max8997_irq_lock(struct irq_data *data)
+{
+ struct max8997_dev *max8997 = irq_get_chip_data(data->irq);
+
+ mutex_lock(&max8997->irqlock);
+}
+
+static void max8997_irq_sync_unlock(struct irq_data *data)
+{
+ struct max8997_dev *max8997 = irq_get_chip_data(data->irq);
+ int i;
+
+ for (i = 0; i < MAX8997_IRQ_GROUP_NR; i++) {
+ u8 mask_reg = max8997_mask_reg[i];
+ struct i2c_client *i2c = get_i2c(max8997, i);
+
+ if (mask_reg == MAX8997_REG_INVALID ||
+ IS_ERR_OR_NULL(i2c))
+ continue;
+ max8997->irq_masks_cache[i] = max8997->irq_masks_cur[i];
+
+ max8997_write_reg(i2c, max8997_mask_reg[i],
+ max8997->irq_masks_cur[i]);
+ }
+
+ mutex_unlock(&max8997->irqlock);
+}
+
+static const inline struct max8997_irq_data *
+irq_to_max8997_irq(struct max8997_dev *max8997, int irq)
+{
+ return &max8997_irqs[irq - max8997->irq_base];
+}
+
+static void max8997_irq_mask(struct irq_data *data)
+{
+ struct max8997_dev *max8997 = irq_get_chip_data(data->irq);
+ const struct max8997_irq_data *irq_data = irq_to_max8997_irq(max8997,
+ data->irq);
+
+ max8997->irq_masks_cur[irq_data->group] |= irq_data->mask;
+}
+
+static void max8997_irq_unmask(struct irq_data *data)
+{
+ struct max8997_dev *max8997 = irq_get_chip_data(data->irq);
+ const struct max8997_irq_data *irq_data = irq_to_max8997_irq(max8997,
+ data->irq);
+
+ max8997->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
+}
+
+static struct irq_chip max8997_irq_chip = {
+ .name = "max8997",
+ .irq_bus_lock = max8997_irq_lock,
+ .irq_bus_sync_unlock = max8997_irq_sync_unlock,
+ .irq_mask = max8997_irq_mask,
+ .irq_unmask = max8997_irq_unmask,
+};
+
+#define MAX8997_IRQSRC_PMIC (1 << 1)
+#define MAX8997_IRQSRC_FUELGAUGE (1 << 2)
+#define MAX8997_IRQSRC_MUIC (1 << 3)
+#define MAX8997_IRQSRC_GPIO (1 << 4)
+#define MAX8997_IRQSRC_FLASH (1 << 5)
+static irqreturn_t max8997_irq_thread(int irq, void *data)
+{
+ struct max8997_dev *max8997 = data;
+ u8 irq_reg[MAX8997_IRQ_GROUP_NR] = {};
+ u8 irq_src;
+ int ret;
+ int i;
+
+ ret = max8997_read_reg(max8997->i2c, MAX8997_REG_INTSRC, &irq_src);
+ if (ret < 0) {
+ dev_err(max8997->dev, "Failed to read interrupt source: %d\n",
+ ret);
+ return IRQ_NONE;
+ }
+
+ if (irq_src & MAX8997_IRQSRC_PMIC) {
+ /* PMIC INT1 ~ INT4 */
+ max8997_bulk_read(max8997->i2c, MAX8997_REG_INT1, 4,
+ &irq_reg[PMIC_INT1]);
+ }
+ if (irq_src & MAX8997_IRQSRC_FUELGAUGE) {
+ /*
+ * TODO: FUEL GAUGE
+ *
+ * This is to be supported by Max17042 driver. When
+ * an interrupt incurs here, it should be relayed to a
+ * Max17042 device that is connected (probably by
+ * platform-data). However, we do not have interrupt
+ * handling in Max17042 driver currently. The Max17042 IRQ
+ * driver should be ready to be used as a stand-alone device and
+ * a Max8997-dependent device. Because it is not ready in
+ * Max17042-side and it is not too critical in operating
+ * Max8997, we do not implement this in initial releases.
+ */
+ irq_reg[FUEL_GAUGE] = 0;
+ }
+ if (irq_src & MAX8997_IRQSRC_MUIC) {
+ /* MUIC INT1 ~ INT3 */
+ max8997_bulk_read(max8997->muic, MAX8997_MUIC_REG_INT1, 3,
+ &irq_reg[MUIC_INT1]);
+ }
+ if (irq_src & MAX8997_IRQSRC_GPIO) {
+ /* GPIO Interrupt */
+ u8 gpio_info[MAX8997_NUM_GPIO];
+
+ irq_reg[GPIO_LOW] = 0;
+ irq_reg[GPIO_HI] = 0;
+
+ max8997_bulk_read(max8997->i2c, MAX8997_REG_GPIOCNTL1,
+ MAX8997_NUM_GPIO, gpio_info);
+ for (i = 0; i < MAX8997_NUM_GPIO; i++) {
+ bool interrupt = false;
+
+ switch (gpio_info[i] & MAX8997_GPIO_INT_MASK) {
+ case MAX8997_GPIO_INT_BOTH:
+ if (max8997->gpio_status[i] != gpio_info[i])
+ interrupt = true;
+ break;
+ case MAX8997_GPIO_INT_RISE:
+ if ((max8997->gpio_status[i] != gpio_info[i]) &&
+ (gpio_info[i] & MAX8997_GPIO_DATA_MASK))
+ interrupt = true;
+ break;
+ case MAX8997_GPIO_INT_FALL:
+ if ((max8997->gpio_status[i] != gpio_info[i]) &&
+ !(gpio_info[i] & MAX8997_GPIO_DATA_MASK))
+ interrupt = true;
+ break;
+ default:
+ break;
+ }
+
+ if (interrupt) {
+ if (i < 8)
+ irq_reg[GPIO_LOW] |= (1 << i);
+ else
+ irq_reg[GPIO_HI] |= (1 << (i - 8));
+ }
+
+ }
+ }
+ if (irq_src & MAX8997_IRQSRC_FLASH) {
+ /* Flash Status Interrupt */
+ ret = max8997_read_reg(max8997->i2c, MAX8997_REG_FLASHSTATUS,
+ &irq_reg[FLASH_STATUS]);
+ }
+
+ /* Apply masking */
+ for (i = 0; i < MAX8997_IRQ_GROUP_NR; i++)
+ irq_reg[i] &= ~max8997->irq_masks_cur[i];
+
+ /* Report */
+ for (i = 0; i < MAX8997_IRQ_NR; i++) {
+ if (irq_reg[max8997_irqs[i].group] & max8997_irqs[i].mask)
+ handle_nested_irq(max8997->irq_base + i);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int max8997_irq_resume(struct max8997_dev *max8997)
+{
+ if (max8997->irq && max8997->irq_base)
+ max8997_irq_thread(max8997->irq_base, max8997);
+ return 0;
+}
+
+int max8997_irq_init(struct max8997_dev *max8997)
+{
+ int i;
+ int cur_irq;
+ int ret;
+ u8 val;
+
+ if (!max8997->irq) {
+ dev_warn(max8997->dev, "No interrupt specified.\n");
+ max8997->irq_base = 0;
+ return 0;
+ }
+
+ if (!max8997->irq_base) {
+ dev_err(max8997->dev, "No interrupt base specified.\n");
+ return 0;
+ }
+
+ mutex_init(&max8997->irqlock);
+
+ /* Mask individual interrupt sources */
+ for (i = 0; i < MAX8997_IRQ_GROUP_NR; i++) {
+ struct i2c_client *i2c;
+
+ max8997->irq_masks_cur[i] = 0xff;
+ max8997->irq_masks_cache[i] = 0xff;
+ i2c = get_i2c(max8997, i);
+
+ if (IS_ERR_OR_NULL(i2c))
+ continue;
+ if (max8997_mask_reg[i] == MAX8997_REG_INVALID)
+ continue;
+
+ max8997_write_reg(i2c, max8997_mask_reg[i], 0xff);
+ }
+
+ for (i = 0; i < MAX8997_NUM_GPIO; i++) {
+ max8997->gpio_status[i] = (max8997_read_reg(max8997->i2c,
+ MAX8997_REG_GPIOCNTL1 + i,
+ &val)
+ & MAX8997_GPIO_DATA_MASK) ?
+ true : false;
+ }
+
+ /* Register with genirq */
+ for (i = 0; i < MAX8997_IRQ_NR; i++) {
+ cur_irq = i + max8997->irq_base;
+ irq_set_chip_data(cur_irq, max8997);
+ irq_set_chip_and_handler(cur_irq, &max8997_irq_chip,
+ handle_edge_irq);
+ irq_set_nested_thread(cur_irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(cur_irq, IRQF_VALID);
+#else
+ irq_set_noprobe(cur_irq);
+#endif
+ }
+
+ ret = request_threaded_irq(max8997->irq, NULL, max8997_irq_thread,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "max8997-irq", max8997);
+
+ if (ret) {
+ dev_err(max8997->dev, "Failed to request IRQ %d: %d\n",
+ max8997->irq, ret);
+ return ret;
+ }
+
+ if (!max8997->ono)
+ return 0;
+
+ ret = request_threaded_irq(max8997->ono, NULL, max8997_irq_thread,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT, "max8997-ono", max8997);
+
+ if (ret)
+ dev_err(max8997->dev, "Failed to request ono-IRQ %d: %d\n",
+ max8997->ono, ret);
+
+ return 0;
+}
+
+void max8997_irq_exit(struct max8997_dev *max8997)
+{
+ if (max8997->ono)
+ free_irq(max8997->ono, max8997);
+
+ if (max8997->irq)
+ free_irq(max8997->irq, max8997);
+}
diff --git a/drivers/mfd/max8998-irq.c b/drivers/mfd/max8998-irq.c
index 3903e1fbb334..5919710dc9ed 100644
--- a/drivers/mfd/max8998-irq.c
+++ b/drivers/mfd/max8998-irq.c
@@ -224,14 +224,14 @@ int max8998_irq_init(struct max8998_dev *max8998)
/* register with genirq */
for (i = 0; i < MAX8998_IRQ_NR; i++) {
cur_irq = i + max8998->irq_base;
- set_irq_chip_data(cur_irq, max8998);
- set_irq_chip_and_handler(cur_irq, &max8998_irq_chip,
+ irq_set_chip_data(cur_irq, max8998);
+ irq_set_chip_and_handler(cur_irq, &max8998_irq_chip,
handle_edge_irq);
- set_irq_nested_thread(cur_irq, 1);
+ irq_set_nested_thread(cur_irq, 1);
#ifdef CONFIG_ARM
set_irq_flags(cur_irq, IRQF_VALID);
#else
- set_irq_noprobe(cur_irq);
+ irq_set_noprobe(cur_irq);
#endif
}
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index c00214257da2..9ec7570f5b81 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -209,7 +209,7 @@ static int max8998_suspend(struct device *dev)
struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
if (max8998->wakeup)
- set_irq_wake(max8998->irq, 1);
+ irq_set_irq_wake(max8998->irq, 1);
return 0;
}
@@ -219,7 +219,7 @@ static int max8998_resume(struct device *dev)
struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
if (max8998->wakeup)
- set_irq_wake(max8998->irq, 0);
+ irq_set_irq_wake(max8998->irq, 0);
/*
* In LP3974, if IRQ registers are not "read & clear"
* when it's set during sleep, the interrupt becomes
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 79eda0264fb2..d01574d98870 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -184,16 +184,12 @@ void mfd_remove_devices(struct device *parent)
}
EXPORT_SYMBOL(mfd_remove_devices);
-static int add_shared_platform_device(const char *cell, const char *name)
+int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones)
{
struct mfd_cell cell_entry;
struct device *dev;
struct platform_device *pdev;
- int err;
-
- /* check if we've already registered a device (don't fail if we have) */
- if (bus_find_device_by_name(&platform_bus_type, NULL, name))
- return 0;
+ int i;
/* fetch the parent cell's device (should already be registered!) */
dev = bus_find_device_by_name(&platform_bus_type, NULL, cell);
@@ -206,44 +202,17 @@ static int add_shared_platform_device(const char *cell, const char *name)
WARN_ON(!cell_entry.enable);
- cell_entry.name = name;
- err = mfd_add_device(pdev->dev.parent, -1, &cell_entry, NULL, 0);
- if (err)
- dev_err(dev, "MFD add devices failed: %d\n", err);
- return err;
-}
-
-int mfd_shared_platform_driver_register(struct platform_driver *drv,
- const char *cellname)
-{
- int err;
-
- err = add_shared_platform_device(cellname, drv->driver.name);
- if (err)
- printk(KERN_ERR "failed to add platform device %s\n",
- drv->driver.name);
-
- err = platform_driver_register(drv);
- if (err)
- printk(KERN_ERR "failed to add platform driver %s\n",
- drv->driver.name);
-
- return err;
-}
-EXPORT_SYMBOL(mfd_shared_platform_driver_register);
-
-void mfd_shared_platform_driver_unregister(struct platform_driver *drv)
-{
- struct device *dev;
-
- dev = bus_find_device_by_name(&platform_bus_type, NULL,
- drv->driver.name);
- if (dev)
- platform_device_unregister(to_platform_device(dev));
+ for (i = 0; i < n_clones; i++) {
+ cell_entry.name = clones[i];
+ /* don't give up if a single call fails; just report error */
+ if (mfd_add_device(pdev->dev.parent, -1, &cell_entry, NULL, 0))
+ dev_err(dev, "failed to create platform device '%s'\n",
+ clones[i]);
+ }
- platform_driver_unregister(drv);
+ return 0;
}
-EXPORT_SYMBOL(mfd_shared_platform_driver_unregister);
+EXPORT_SYMBOL(mfd_clone_cell);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov");
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index c1306ed43e3c..c7687f6a78a0 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -356,7 +356,7 @@ static int __devexit pcf50633_remove(struct i2c_client *client)
return 0;
}
-static struct i2c_device_id pcf50633_id_table[] = {
+static const struct i2c_device_id pcf50633_id_table[] = {
{"pcf50633", 0x73},
{/* end of list */}
};
diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c
index 193c940225b5..10dbe6374a89 100644
--- a/drivers/mfd/rdc321x-southbridge.c
+++ b/drivers/mfd/rdc321x-southbridge.c
@@ -97,6 +97,7 @@ static DEFINE_PCI_DEVICE_TABLE(rdc321x_sb_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_RDC, PCI_DEVICE_ID_RDC_R6030) },
{}
};
+MODULE_DEVICE_TABLE(pci, rdc321x_sb_table);
static struct pci_driver rdc321x_sb_driver = {
.name = "RDC321x Southbridge",
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 3e5732b58c49..7ab7746631d4 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -762,14 +762,14 @@ static int __devinit stmpe_irq_init(struct stmpe *stmpe)
int irq;
for (irq = base; irq < base + num_irqs; irq++) {
- set_irq_chip_data(irq, stmpe);
- set_irq_chip_and_handler(irq, &stmpe_irq_chip,
+ irq_set_chip_data(irq, stmpe);
+ irq_set_chip_and_handler(irq, &stmpe_irq_chip,
handle_edge_irq);
- set_irq_nested_thread(irq, 1);
+ irq_set_nested_thread(irq, 1);
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID);
#else
- set_irq_noprobe(irq);
+ irq_set_noprobe(irq);
#endif
}
@@ -786,8 +786,8 @@ static void stmpe_irq_remove(struct stmpe *stmpe)
#ifdef CONFIG_ARM
set_irq_flags(irq, 0);
#endif
- set_irq_chip_and_handler(irq, NULL, NULL);
- set_irq_chip_data(irq, NULL);
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
}
}
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index af57fc706a4c..42830e692964 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -186,7 +186,7 @@ static struct mfd_cell t7l66xb_cells[] = {
/* Handle the T7L66XB interrupt mux */
static void t7l66xb_irq(unsigned int irq, struct irq_desc *desc)
{
- struct t7l66xb *t7l66xb = get_irq_data(irq);
+ struct t7l66xb *t7l66xb = irq_get_handler_data(irq);
unsigned int isr;
unsigned int i, irq_base;
@@ -243,17 +243,16 @@ static void t7l66xb_attach_irq(struct platform_device *dev)
irq_base = t7l66xb->irq_base;
for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) {
- set_irq_chip(irq, &t7l66xb_chip);
- set_irq_chip_data(irq, t7l66xb);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &t7l66xb_chip, handle_level_irq);
+ irq_set_chip_data(irq, t7l66xb);
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
#endif
}
- set_irq_type(t7l66xb->irq, IRQ_TYPE_EDGE_FALLING);
- set_irq_data(t7l66xb->irq, t7l66xb);
- set_irq_chained_handler(t7l66xb->irq, t7l66xb_irq);
+ irq_set_irq_type(t7l66xb->irq, IRQ_TYPE_EDGE_FALLING);
+ irq_set_handler_data(t7l66xb->irq, t7l66xb);
+ irq_set_chained_handler(t7l66xb->irq, t7l66xb_irq);
}
static void t7l66xb_detach_irq(struct platform_device *dev)
@@ -263,15 +262,15 @@ static void t7l66xb_detach_irq(struct platform_device *dev)
irq_base = t7l66xb->irq_base;
- set_irq_chained_handler(t7l66xb->irq, NULL);
- set_irq_data(t7l66xb->irq, NULL);
+ irq_set_chained_handler(t7l66xb->irq, NULL);
+ irq_set_handler_data(t7l66xb->irq, NULL);
for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) {
#ifdef CONFIG_ARM
set_irq_flags(irq, 0);
#endif
- set_irq_chip(irq, NULL);
- set_irq_chip_data(irq, NULL);
+ irq_set_chip(irq, NULL);
+ irq_set_chip_data(irq, NULL);
}
}
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index 729dbeed2ce0..c27e515b0722 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -192,14 +192,14 @@ static int tc3589x_irq_init(struct tc3589x *tc3589x)
int irq;
for (irq = base; irq < base + TC3589x_NR_INTERNAL_IRQS; irq++) {
- set_irq_chip_data(irq, tc3589x);
- set_irq_chip_and_handler(irq, &dummy_irq_chip,
+ irq_set_chip_data(irq, tc3589x);
+ irq_set_chip_and_handler(irq, &dummy_irq_chip,
handle_edge_irq);
- set_irq_nested_thread(irq, 1);
+ irq_set_nested_thread(irq, 1);
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID);
#else
- set_irq_noprobe(irq);
+ irq_set_noprobe(irq);
#endif
}
@@ -215,8 +215,8 @@ static void tc3589x_irq_remove(struct tc3589x *tc3589x)
#ifdef CONFIG_ARM
set_irq_flags(irq, 0);
#endif
- set_irq_chip_and_handler(irq, NULL, NULL);
- set_irq_chip_data(irq, NULL);
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
}
}
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 3d62ded86a8f..fc53ce287601 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -513,7 +513,7 @@ static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb, int gpio_base)
static void
tc6393xb_irq(unsigned int irq, struct irq_desc *desc)
{
- struct tc6393xb *tc6393xb = get_irq_data(irq);
+ struct tc6393xb *tc6393xb = irq_get_handler_data(irq);
unsigned int isr;
unsigned int i, irq_base;
@@ -572,15 +572,14 @@ static void tc6393xb_attach_irq(struct platform_device *dev)
irq_base = tc6393xb->irq_base;
for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) {
- set_irq_chip(irq, &tc6393xb_chip);
- set_irq_chip_data(irq, tc6393xb);
- set_irq_handler(irq, handle_edge_irq);
+ irq_set_chip_and_handler(irq, &tc6393xb_chip, handle_edge_irq);
+ irq_set_chip_data(irq, tc6393xb);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
- set_irq_type(tc6393xb->irq, IRQ_TYPE_EDGE_FALLING);
- set_irq_data(tc6393xb->irq, tc6393xb);
- set_irq_chained_handler(tc6393xb->irq, tc6393xb_irq);
+ irq_set_irq_type(tc6393xb->irq, IRQ_TYPE_EDGE_FALLING);
+ irq_set_handler_data(tc6393xb->irq, tc6393xb);
+ irq_set_chained_handler(tc6393xb->irq, tc6393xb_irq);
}
static void tc6393xb_detach_irq(struct platform_device *dev)
@@ -588,15 +587,15 @@ static void tc6393xb_detach_irq(struct platform_device *dev)
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
unsigned int irq, irq_base;
- set_irq_chained_handler(tc6393xb->irq, NULL);
- set_irq_data(tc6393xb->irq, NULL);
+ irq_set_chained_handler(tc6393xb->irq, NULL);
+ irq_set_handler_data(tc6393xb->irq, NULL);
irq_base = tc6393xb->irq_base;
for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) {
set_irq_flags(irq, 0);
- set_irq_chip(irq, NULL);
- set_irq_chip_data(irq, NULL);
+ irq_set_chip(irq, NULL);
+ irq_set_chip_data(irq, NULL);
}
}
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index 0aa9186aec19..b600808690c1 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -422,10 +422,10 @@ static int __devinit tps6586x_irq_init(struct tps6586x *tps6586x, int irq,
for (i = 0; i < ARRAY_SIZE(tps6586x_irqs); i++) {
int __irq = i + tps6586x->irq_base;
- set_irq_chip_data(__irq, tps6586x);
- set_irq_chip_and_handler(__irq, &tps6586x->irq_chip,
+ irq_set_chip_data(__irq, tps6586x);
+ irq_set_chip_and_handler(__irq, &tps6586x->irq_chip,
handle_simple_irq);
- set_irq_nested_thread(__irq, 1);
+ irq_set_nested_thread(__irq, 1);
#ifdef CONFIG_ARM
set_irq_flags(__irq, IRQF_VALID);
#endif
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 63a30e88908f..8a7ee3139b86 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -320,24 +320,8 @@ static int twl4030_irq_thread(void *data)
for (module_irq = twl4030_irq_base;
pih_isr;
pih_isr >>= 1, module_irq++) {
- if (pih_isr & 0x1) {
- struct irq_desc *d = irq_to_desc(module_irq);
-
- if (!d) {
- pr_err("twl4030: Invalid SIH IRQ: %d\n",
- module_irq);
- return -EINVAL;
- }
-
- /* These can't be masked ... always warn
- * if we get any surprises.
- */
- if (d->status & IRQ_DISABLED)
- note_interrupt(module_irq, d,
- IRQ_NONE);
- else
- d->handle_irq(module_irq, d);
- }
+ if (pih_isr & 0x1)
+ generic_handle_irq(module_irq);
}
local_irq_enable();
@@ -470,7 +454,7 @@ static inline void activate_irq(int irq)
set_irq_flags(irq, IRQF_VALID);
#else
/* same effect on other architectures */
- set_irq_noprobe(irq);
+ irq_set_noprobe(irq);
#endif
}
@@ -560,24 +544,18 @@ static void twl4030_sih_do_edge(struct work_struct *work)
/* Modify only the bits we know must change */
while (edge_change) {
int i = fls(edge_change) - 1;
- struct irq_desc *d = irq_to_desc(i + agent->irq_base);
+ struct irq_data *idata = irq_get_irq_data(i + agent->irq_base);
int byte = 1 + (i >> 2);
int off = (i & 0x3) * 2;
-
- if (!d) {
- pr_err("twl4030: Invalid IRQ: %d\n",
- i + agent->irq_base);
- return;
- }
+ unsigned int type;
bytes[byte] &= ~(0x03 << off);
- raw_spin_lock_irq(&d->lock);
- if (d->status & IRQ_TYPE_EDGE_RISING)
+ type = irqd_get_trigger_type(idata);
+ if (type & IRQ_TYPE_EDGE_RISING)
bytes[byte] |= BIT(off + 1);
- if (d->status & IRQ_TYPE_EDGE_FALLING)
+ if (type & IRQ_TYPE_EDGE_FALLING)
bytes[byte] |= BIT(off + 0);
- raw_spin_unlock_irq(&d->lock);
edge_change &= ~BIT(i);
}
@@ -626,21 +604,13 @@ static void twl4030_sih_unmask(struct irq_data *data)
static int twl4030_sih_set_type(struct irq_data *data, unsigned trigger)
{
struct sih_agent *sih = irq_data_get_irq_chip_data(data);
- struct irq_desc *desc = irq_to_desc(data->irq);
unsigned long flags;
- if (!desc) {
- pr_err("twl4030: Invalid IRQ: %d\n", data->irq);
- return -EINVAL;
- }
-
if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
return -EINVAL;
spin_lock_irqsave(&sih_agent_lock, flags);
- if ((desc->status & IRQ_TYPE_SENSE_MASK) != trigger) {
- desc->status &= ~IRQ_TYPE_SENSE_MASK;
- desc->status |= trigger;
+ if (irqd_get_trigger_type(data) != trigger) {
sih->edge_change |= BIT(data->irq - sih->irq_base);
queue_work(wq, &sih->edge_work);
}
@@ -680,7 +650,7 @@ static inline int sih_read_isr(const struct sih *sih)
*/
static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
{
- struct sih_agent *agent = get_irq_data(irq);
+ struct sih_agent *agent = irq_get_handler_data(irq);
const struct sih *sih = agent->sih;
int isr;
@@ -754,9 +724,9 @@ int twl4030_sih_setup(int module)
for (i = 0; i < sih->bits; i++) {
irq = irq_base + i;
- set_irq_chip_and_handler(irq, &twl4030_sih_irq_chip,
- handle_edge_irq);
- set_irq_chip_data(irq, agent);
+ irq_set_chip_and_handler(irq, &twl4030_sih_irq_chip,
+ handle_edge_irq);
+ irq_set_chip_data(irq, agent);
activate_irq(irq);
}
@@ -765,8 +735,8 @@ int twl4030_sih_setup(int module)
/* replace generic PIH handler (handle_simple_irq) */
irq = sih_mod + twl4030_irq_base;
- set_irq_data(irq, agent);
- set_irq_chained_handler(irq, handle_twl4030_sih);
+ irq_set_handler_data(irq, agent);
+ irq_set_chained_handler(irq, handle_twl4030_sih);
pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name,
irq, irq_base, twl4030_irq_next - 1);
@@ -815,8 +785,8 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
for (i = irq_base; i < irq_end; i++) {
- set_irq_chip_and_handler(i, &twl4030_irq_chip,
- handle_simple_irq);
+ irq_set_chip_and_handler(i, &twl4030_irq_chip,
+ handle_simple_irq);
activate_irq(i);
}
twl4030_irq_next = i;
@@ -856,7 +826,7 @@ fail_rqirq:
/* clean up twl4030_sih_setup */
fail:
for (i = irq_base; i < irq_end; i++)
- set_irq_chip_and_handler(i, NULL, NULL);
+ irq_set_chip_and_handler(i, NULL, NULL);
destroy_workqueue(wq);
wq = NULL;
return status;
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index 4082ed73613f..fa937052fbab 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -140,22 +140,7 @@ static int twl6030_irq_thread(void *data)
if (sts.int_sts & 0x1) {
int module_irq = twl6030_irq_base +
twl6030_interrupt_mapping[i];
- struct irq_desc *d = irq_to_desc(module_irq);
-
- if (!d) {
- pr_err("twl6030: Invalid SIH IRQ: %d\n",
- module_irq);
- return -EINVAL;
- }
-
- /* These can't be masked ... always warn
- * if we get any surprises.
- */
- if (d->status & IRQ_DISABLED)
- note_interrupt(module_irq, d,
- IRQ_NONE);
- else
- d->handle_irq(module_irq, d);
+ generic_handle_irq(module_irq);
}
local_irq_enable();
@@ -198,7 +183,7 @@ static inline void activate_irq(int irq)
set_irq_flags(irq, IRQF_VALID);
#else
/* same effect on other architectures */
- set_irq_noprobe(irq);
+ irq_set_noprobe(irq);
#endif
}
@@ -335,8 +320,8 @@ int twl6030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
twl6030_irq_chip.irq_set_type = NULL;
for (i = irq_base; i < irq_end; i++) {
- set_irq_chip_and_handler(i, &twl6030_irq_chip,
- handle_simple_irq);
+ irq_set_chip_and_handler(i, &twl6030_irq_chip,
+ handle_simple_irq);
activate_irq(i);
}
@@ -365,7 +350,7 @@ fail_irq:
fail_kthread:
for (i = irq_base; i < irq_end; i++)
- set_irq_chip_and_handler(i, NULL, NULL);
+ irq_set_chip_and_handler(i, NULL, NULL);
return status;
}
diff --git a/drivers/mfd/wl1273-core.c b/drivers/mfd/wl1273-core.c
index f76f6c798046..04914f2836c0 100644
--- a/drivers/mfd/wl1273-core.c
+++ b/drivers/mfd/wl1273-core.c
@@ -25,7 +25,7 @@
#define DRIVER_DESC "WL1273 FM Radio Core"
-static struct i2c_device_id wl1273_driver_id_table[] = {
+static const struct i2c_device_id wl1273_driver_id_table[] = {
{ WL1273_FM_DRIVER_NAME, 0 },
{ }
};
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index a5cd17e18d09..23e66af89dea 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -553,17 +553,17 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
for (cur_irq = wm831x->irq_base;
cur_irq < ARRAY_SIZE(wm831x_irqs) + wm831x->irq_base;
cur_irq++) {
- set_irq_chip_data(cur_irq, wm831x);
- set_irq_chip_and_handler(cur_irq, &wm831x_irq_chip,
+ irq_set_chip_data(cur_irq, wm831x);
+ irq_set_chip_and_handler(cur_irq, &wm831x_irq_chip,
handle_edge_irq);
- set_irq_nested_thread(cur_irq, 1);
+ irq_set_nested_thread(cur_irq, 1);
/* ARM needs us to explicitly flag the IRQ as valid
* and will set them noprobe when we do so. */
#ifdef CONFIG_ARM
set_irq_flags(cur_irq, IRQF_VALID);
#else
- set_irq_noprobe(cur_irq);
+ irq_set_noprobe(cur_irq);
#endif
}
diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c
index 5839966ebd85..ed4b22a167b3 100644
--- a/drivers/mfd/wm8350-irq.c
+++ b/drivers/mfd/wm8350-irq.c
@@ -518,17 +518,17 @@ int wm8350_irq_init(struct wm8350 *wm8350, int irq,
for (cur_irq = wm8350->irq_base;
cur_irq < ARRAY_SIZE(wm8350_irqs) + wm8350->irq_base;
cur_irq++) {
- set_irq_chip_data(cur_irq, wm8350);
- set_irq_chip_and_handler(cur_irq, &wm8350_irq_chip,
+ irq_set_chip_data(cur_irq, wm8350);
+ irq_set_chip_and_handler(cur_irq, &wm8350_irq_chip,
handle_edge_irq);
- set_irq_nested_thread(cur_irq, 1);
+ irq_set_nested_thread(cur_irq, 1);
/* ARM needs us to explicitly flag the IRQ as valid
* and will set them noprobe when we do so. */
#ifdef CONFIG_ARM
set_irq_flags(cur_irq, IRQF_VALID);
#else
- set_irq_noprobe(cur_irq);
+ irq_set_noprobe(cur_irq);
#endif
}
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
index 1e3bf4a2ff8e..71c6e8f9aedb 100644
--- a/drivers/mfd/wm8994-irq.c
+++ b/drivers/mfd/wm8994-irq.c
@@ -278,17 +278,17 @@ int wm8994_irq_init(struct wm8994 *wm8994)
for (cur_irq = wm8994->irq_base;
cur_irq < ARRAY_SIZE(wm8994_irqs) + wm8994->irq_base;
cur_irq++) {
- set_irq_chip_data(cur_irq, wm8994);
- set_irq_chip_and_handler(cur_irq, &wm8994_irq_chip,
+ irq_set_chip_data(cur_irq, wm8994);
+ irq_set_chip_and_handler(cur_irq, &wm8994_irq_chip,
handle_edge_irq);
- set_irq_nested_thread(cur_irq, 1);
+ irq_set_nested_thread(cur_irq, 1);
/* ARM needs us to explicitly flag the IRQ as valid
* and will set them noprobe when we do so. */
#ifdef CONFIG_ARM
set_irq_flags(cur_irq, IRQF_VALID);
#else
- set_irq_noprobe(cur_irq);
+ irq_set_noprobe(cur_irq);
#endif
}
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 59c118c19a91..27dc463097f3 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -988,7 +988,7 @@ static void kgdbts_run_tests(void)
static int kgdbts_option_setup(char *opt)
{
- if (strlen(opt) > MAX_CONFIG_LEN) {
+ if (strlen(opt) >= MAX_CONFIG_LEN) {
printk(KERN_ERR "kgdbts: config string too long\n");
return -ENOSPC;
}
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 77414702cb00..b4567c35a322 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -33,14 +33,6 @@ config MTD_TESTS
should normally be compiled as kernel modules. The modules perform
various checks and verifications when loaded.
-config MTD_CONCAT
- tristate "MTD concatenating support"
- help
- Support for concatenating several MTD devices into a single
- (virtual) one. This allows you to have -for example- a JFFS(2)
- file system spanning multiple physical flash chips. If unsure,
- say 'Y'.
-
config MTD_PARTITIONS
bool "MTD partitioning support"
help
@@ -333,6 +325,16 @@ config MTD_OOPS
To use, add console=ttyMTDx to the kernel command line,
where x is the MTD device number to use.
+config MTD_SWAP
+ tristate "Swap on MTD device support"
+ depends on MTD && SWAP
+ select MTD_BLKDEVS
+ help
+ Provides volatile block device driver on top of mtd partition
+ suitable for swapping. The mapping of written blocks is not saved.
+ The driver provides wear leveling by storing erase counter into the
+ OOB.
+
source "drivers/mtd/chips/Kconfig"
source "drivers/mtd/maps/Kconfig"
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index d4e7f25b1ebb..d578095fb255 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -4,11 +4,10 @@
# Core functionality.
obj-$(CONFIG_MTD) += mtd.o
-mtd-y := mtdcore.o mtdsuper.o
+mtd-y := mtdcore.o mtdsuper.o mtdconcat.o
mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
mtd-$(CONFIG_MTD_OF_PARTS) += ofpart.o
-obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
@@ -26,6 +25,7 @@ obj-$(CONFIG_RFD_FTL) += rfd_ftl.o
obj-$(CONFIG_SSFDC) += ssfdc.o
obj-$(CONFIG_SM_FTL) += sm_ftl.o
obj-$(CONFIG_MTD_OOPS) += mtdoops.o
+obj-$(CONFIG_MTD_SWAP) += mtdswap.o
nftl-objs := nftlcore.o nftlmount.o
inftl-objs := inftlcore.o inftlmount.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 4aaa88f8ab5f..092aef11120c 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -455,7 +455,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
mtd->flags = MTD_CAP_NORFLASH;
mtd->name = map->name;
mtd->writesize = 1;
- mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize;
+ mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index f072fcfde04e..f9a5331e9445 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -349,6 +349,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
#ifdef AMD_BOOTLOC_BUG
{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
+ { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
{ CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
#endif
{ CFI_MFR_AMD, 0x0050, fixup_use_secsi },
@@ -440,7 +441,7 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
mtd->flags = MTD_CAP_NORFLASH;
mtd->name = map->name;
mtd->writesize = 1;
- mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize;
+ mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): write buffer size %d\n",
__func__, mtd->writebufsize);
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index c04b7658abe9..ed56ad3884fb 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -238,7 +238,7 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
mtd->resume = cfi_staa_resume;
mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
- mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize;
+ mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
map->fldrv = &cfi_staa_chipdrv;
__module_get(THIS_MODULE);
mtd->name = map->name;
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index e4eba6cc1b2e..3fb981d4bb51 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -655,7 +655,8 @@ static const struct spi_device_id m25p_ids[] = {
{ "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
{ "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
- /* EON -- en25pxx */
+ /* EON -- en25xxx */
+ { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
{ "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
@@ -728,6 +729,8 @@ static const struct spi_device_id m25p_ids[] = {
{ "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
{ "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
+ { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
+
/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
{ "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
{ "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index 26a6e809013d..1483e18971ce 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -121,6 +121,7 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
mtd->flags = MTD_CAP_RAM;
mtd->size = size;
mtd->writesize = 1;
+ mtd->writebufsize = 64; /* Mimic CFI NOR flashes */
mtd->erasesize = MTDRAM_ERASE_SIZE;
mtd->priv = mapped_address;
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 52393282eaf1..8d28fa02a5a2 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -117,6 +117,7 @@ static void unregister_devices(void)
list_for_each_entry_safe(this, safe, &phram_list, list) {
del_mtd_device(&this->mtd);
iounmap(this->mtd.priv);
+ kfree(this->mtd.name);
kfree(this);
}
}
@@ -275,6 +276,8 @@ static int phram_setup(const char *val, struct kernel_param *kp)
ret = register_device(name, start, len);
if (!ret)
pr_info("%s device: %#x at %#x\n", name, len, start);
+ else
+ kfree(name);
return ret;
}
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 5d37d315fa98..44b1f46458ca 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -114,7 +114,7 @@ config MTD_SUN_UFLASH
config MTD_SC520CDP
tristate "CFI Flash device mapped on AMD SC520 CDP"
- depends on X86 && MTD_CFI && MTD_CONCAT
+ depends on X86 && MTD_CFI
help
The SC520 CDP board has two banks of CFI-compliant chips and one
Dual-in-line JEDEC chip. This 'mapping' driver supports that
@@ -262,7 +262,7 @@ config MTD_BCM963XX
config MTD_DILNETPC
tristate "CFI Flash device mapped on DIL/Net PC"
- depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN
+ depends on X86 && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN
help
MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP".
For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm>
@@ -552,4 +552,13 @@ config MTD_PISMO
When built as a module, it will be called pismo.ko
+config MTD_LATCH_ADDR
+ tristate "Latch-assisted Flash Chip Support"
+ depends on MTD_COMPLEX_MAPPINGS
+ help
+ Map driver which allows flashes to be partially physically addressed
+ and have the upper address lines set by a board specific code.
+
+ If compiled as a module, it will be called latch-addr-flash.
+
endmenu
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index c7869c7a6b18..08533bd5cba7 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -59,3 +59,4 @@ obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o
obj-$(CONFIG_MTD_VMU) += vmu-flash.o
obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o
obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o
+obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c
index c09f4f57093e..e5f645b775ad 100644
--- a/drivers/mtd/maps/ceiva.c
+++ b/drivers/mtd/maps/ceiva.c
@@ -194,16 +194,10 @@ static int __init clps_setup_mtd(struct clps_info *clps, int nr, struct mtd_info
* We detected multiple devices. Concatenate
* them together.
*/
-#ifdef CONFIG_MTD_CONCAT
*rmtd = mtd_concat_create(subdev, found,
"clps flash");
if (*rmtd == NULL)
ret = -ENXIO;
-#else
- printk(KERN_ERR "clps flash: multiple devices "
- "found but MTD concat support disabled.\n");
- ret = -ENXIO;
-#endif
}
}
diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c
index 2aac41bde8b3..e22ff5adbbf4 100644
--- a/drivers/mtd/maps/integrator-flash.c
+++ b/drivers/mtd/maps/integrator-flash.c
@@ -202,7 +202,6 @@ static int armflash_probe(struct platform_device *dev)
if (info->nr_subdev == 1)
info->mtd = info->subdev[0].mtd;
else if (info->nr_subdev > 1) {
-#ifdef CONFIG_MTD_CONCAT
struct mtd_info *cdev[info->nr_subdev];
/*
@@ -215,11 +214,6 @@ static int armflash_probe(struct platform_device *dev)
dev_name(&dev->dev));
if (info->mtd == NULL)
err = -ENXIO;
-#else
- printk(KERN_ERR "armflash: multiple devices found but "
- "MTD concat support disabled.\n");
- err = -ENXIO;
-#endif
}
if (err < 0)
@@ -244,10 +238,8 @@ static int armflash_probe(struct platform_device *dev)
cleanup:
if (info->mtd) {
del_mtd_partitions(info->mtd);
-#ifdef CONFIG_MTD_CONCAT
if (info->mtd != info->subdev[0].mtd)
mtd_concat_destroy(info->mtd);
-#endif
}
kfree(info->parts);
subdev_err:
@@ -272,10 +264,8 @@ static int armflash_remove(struct platform_device *dev)
if (info) {
if (info->mtd) {
del_mtd_partitions(info->mtd);
-#ifdef CONFIG_MTD_CONCAT
if (info->mtd != info->subdev[0].mtd)
mtd_concat_destroy(info->mtd);
-#endif
}
kfree(info->parts);
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
new file mode 100644
index 000000000000..ee2548085334
--- /dev/null
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -0,0 +1,272 @@
+/*
+ * Interface for NOR flash driver whose high address lines are latched
+ *
+ * Copyright © 2000 Nicolas Pitre <nico@cam.org>
+ * Copyright © 2005-2008 Analog Devices Inc.
+ * Copyright © 2008 MontaVista Software, Inc. <source@mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/latch-addr-flash.h>
+#include <linux/slab.h>
+
+#define DRIVER_NAME "latch-addr-flash"
+
+struct latch_addr_flash_info {
+ struct mtd_info *mtd;
+ struct map_info map;
+ struct resource *res;
+
+ void (*set_window)(unsigned long offset, void *data);
+ void *data;
+
+ /* cache; could be found out of res */
+ unsigned long win_mask;
+
+ int nr_parts;
+ struct mtd_partition *parts;
+
+ spinlock_t lock;
+};
+
+static map_word lf_read(struct map_info *map, unsigned long ofs)
+{
+ struct latch_addr_flash_info *info;
+ map_word datum;
+
+ info = (struct latch_addr_flash_info *)map->map_priv_1;
+
+ spin_lock(&info->lock);
+
+ info->set_window(ofs, info->data);
+ datum = inline_map_read(map, info->win_mask & ofs);
+
+ spin_unlock(&info->lock);
+
+ return datum;
+}
+
+static void lf_write(struct map_info *map, map_word datum, unsigned long ofs)
+{
+ struct latch_addr_flash_info *info;
+
+ info = (struct latch_addr_flash_info *)map->map_priv_1;
+
+ spin_lock(&info->lock);
+
+ info->set_window(ofs, info->data);
+ inline_map_write(map, datum, info->win_mask & ofs);
+
+ spin_unlock(&info->lock);
+}
+
+static void lf_copy_from(struct map_info *map, void *to,
+ unsigned long from, ssize_t len)
+{
+ struct latch_addr_flash_info *info =
+ (struct latch_addr_flash_info *) map->map_priv_1;
+ unsigned n;
+
+ while (len > 0) {
+ n = info->win_mask + 1 - (from & info->win_mask);
+ if (n > len)
+ n = len;
+
+ spin_lock(&info->lock);
+
+ info->set_window(from, info->data);
+ memcpy_fromio(to, map->virt + (from & info->win_mask), n);
+
+ spin_unlock(&info->lock);
+
+ to += n;
+ from += n;
+ len -= n;
+ }
+}
+
+static char *rom_probe_types[] = { "cfi_probe", NULL };
+
+static char *part_probe_types[] = { "cmdlinepart", NULL };
+
+static int latch_addr_flash_remove(struct platform_device *dev)
+{
+ struct latch_addr_flash_info *info;
+ struct latch_addr_flash_data *latch_addr_data;
+
+ info = platform_get_drvdata(dev);
+ if (info == NULL)
+ return 0;
+ platform_set_drvdata(dev, NULL);
+
+ latch_addr_data = dev->dev.platform_data;
+
+ if (info->mtd != NULL) {
+ if (mtd_has_partitions()) {
+ if (info->nr_parts) {
+ del_mtd_partitions(info->mtd);
+ kfree(info->parts);
+ } else if (latch_addr_data->nr_parts) {
+ del_mtd_partitions(info->mtd);
+ } else {
+ del_mtd_device(info->mtd);
+ }
+ } else {
+ del_mtd_device(info->mtd);
+ }
+ map_destroy(info->mtd);
+ }
+
+ if (info->map.virt != NULL)
+ iounmap(info->map.virt);
+
+ if (info->res != NULL)
+ release_mem_region(info->res->start, resource_size(info->res));
+
+ kfree(info);
+
+ if (latch_addr_data->done)
+ latch_addr_data->done(latch_addr_data->data);
+
+ return 0;
+}
+
+static int __devinit latch_addr_flash_probe(struct platform_device *dev)
+{
+ struct latch_addr_flash_data *latch_addr_data;
+ struct latch_addr_flash_info *info;
+ resource_size_t win_base = dev->resource->start;
+ resource_size_t win_size = resource_size(dev->resource);
+ char **probe_type;
+ int chipsel;
+ int err;
+
+ latch_addr_data = dev->dev.platform_data;
+ if (latch_addr_data == NULL)
+ return -ENODEV;
+
+ pr_notice("latch-addr platform flash device: %#llx byte "
+ "window at %#.8llx\n",
+ (unsigned long long)win_size, (unsigned long long)win_base);
+
+ chipsel = dev->id;
+
+ if (latch_addr_data->init) {
+ err = latch_addr_data->init(latch_addr_data->data, chipsel);
+ if (err != 0)
+ return err;
+ }
+
+ info = kzalloc(sizeof(struct latch_addr_flash_info), GFP_KERNEL);
+ if (info == NULL) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ platform_set_drvdata(dev, info);
+
+ info->res = request_mem_region(win_base, win_size, DRIVER_NAME);
+ if (info->res == NULL) {
+ dev_err(&dev->dev, "Could not reserve memory region\n");
+ err = -EBUSY;
+ goto free_info;
+ }
+
+ info->map.name = DRIVER_NAME;
+ info->map.size = latch_addr_data->size;
+ info->map.bankwidth = latch_addr_data->width;
+
+ info->map.phys = NO_XIP;
+ info->map.virt = ioremap(win_base, win_size);
+ if (!info->map.virt) {
+ err = -ENOMEM;
+ goto free_res;
+ }
+
+ info->map.map_priv_1 = (unsigned long)info;
+
+ info->map.read = lf_read;
+ info->map.copy_from = lf_copy_from;
+ info->map.write = lf_write;
+ info->set_window = latch_addr_data->set_window;
+ info->data = latch_addr_data->data;
+ info->win_mask = win_size - 1;
+
+ spin_lock_init(&info->lock);
+
+ for (probe_type = rom_probe_types; !info->mtd && *probe_type;
+ probe_type++)
+ info->mtd = do_map_probe(*probe_type, &info->map);
+
+ if (info->mtd == NULL) {
+ dev_err(&dev->dev, "map_probe failed\n");
+ err = -ENODEV;
+ goto iounmap;
+ }
+ info->mtd->owner = THIS_MODULE;
+
+ if (mtd_has_partitions()) {
+
+ err = parse_mtd_partitions(info->mtd,
+ (const char **)part_probe_types,
+ &info->parts, 0);
+ if (err > 0) {
+ add_mtd_partitions(info->mtd, info->parts, err);
+ return 0;
+ }
+ if (latch_addr_data->nr_parts) {
+ pr_notice("Using latch-addr-flash partition information\n");
+ add_mtd_partitions(info->mtd, latch_addr_data->parts,
+ latch_addr_data->nr_parts);
+ return 0;
+ }
+ }
+ add_mtd_device(info->mtd);
+ return 0;
+
+iounmap:
+ iounmap(info->map.virt);
+free_res:
+ release_mem_region(info->res->start, resource_size(info->res));
+free_info:
+ kfree(info);
+done:
+ if (latch_addr_data->done)
+ latch_addr_data->done(latch_addr_data->data);
+ return err;
+}
+
+static struct platform_driver latch_addr_flash_driver = {
+ .probe = latch_addr_flash_probe,
+ .remove = __devexit_p(latch_addr_flash_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init latch_addr_flash_init(void)
+{
+ return platform_driver_register(&latch_addr_flash_driver);
+}
+module_init(latch_addr_flash_init);
+
+static void __exit latch_addr_flash_exit(void)
+{
+ platform_driver_unregister(&latch_addr_flash_driver);
+}
+module_exit(latch_addr_flash_exit);
+
+MODULE_AUTHOR("David Griego <dgriego@mvista.com>");
+MODULE_DESCRIPTION("MTD map driver for flashes addressed physically with upper "
+ "address lines being set board specifically");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 4c18b98a3110..7522df4f71f1 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -59,10 +59,8 @@ static int physmap_flash_remove(struct platform_device *dev)
#else
del_mtd_device(info->cmtd);
#endif
-#ifdef CONFIG_MTD_CONCAT
if (info->cmtd != info->mtd[0])
mtd_concat_destroy(info->cmtd);
-#endif
}
for (i = 0; i < MAX_RESOURCES; i++) {
@@ -159,15 +157,9 @@ static int physmap_flash_probe(struct platform_device *dev)
/*
* We detected multiple devices. Concatenate them together.
*/
-#ifdef CONFIG_MTD_CONCAT
info->cmtd = mtd_concat_create(info->mtd, devices_found, dev_name(&dev->dev));
if (info->cmtd == NULL)
err = -ENXIO;
-#else
- printk(KERN_ERR "physmap-flash: multiple devices "
- "found but MTD concat support disabled.\n");
- err = -ENXIO;
-#endif
}
if (err)
goto err_out;
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 3db0cb083d31..bd483f0c57e1 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -104,12 +104,10 @@ static int of_flash_remove(struct platform_device *dev)
return 0;
dev_set_drvdata(&dev->dev, NULL);
-#ifdef CONFIG_MTD_CONCAT
if (info->cmtd != info->list[0].mtd) {
del_mtd_device(info->cmtd);
mtd_concat_destroy(info->cmtd);
}
-#endif
if (info->cmtd) {
if (OF_FLASH_PARTS(info)) {
@@ -337,16 +335,10 @@ static int __devinit of_flash_probe(struct platform_device *dev)
/*
* We detected multiple devices. Concatenate them together.
*/
-#ifdef CONFIG_MTD_CONCAT
info->cmtd = mtd_concat_create(mtd_list, info->list_size,
dev_name(&dev->dev));
if (info->cmtd == NULL)
err = -ENXIO;
-#else
- printk(KERN_ERR "physmap_of: multiple devices "
- "found but MTD concat support disabled.\n");
- err = -ENXIO;
-#endif
}
if (err)
goto err_out;
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index f3af87e08ecd..da875908ea8e 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -232,10 +232,8 @@ static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *pla
else
del_mtd_partitions(info->mtd);
#endif
-#ifdef CONFIG_MTD_CONCAT
if (info->mtd != info->subdev[0].mtd)
mtd_concat_destroy(info->mtd);
-#endif
}
kfree(info->parts);
@@ -321,7 +319,6 @@ sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat)
info->mtd = info->subdev[0].mtd;
ret = 0;
} else if (info->num_subdev > 1) {
-#ifdef CONFIG_MTD_CONCAT
struct mtd_info *cdev[nr];
/*
* We detected multiple devices. Concatenate them together.
@@ -333,11 +330,6 @@ sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat)
plat->name);
if (info->mtd == NULL)
ret = -ENXIO;
-#else
- printk(KERN_ERR "SA1100 flash: multiple devices "
- "found but MTD concat support disabled.\n");
- ret = -ENXIO;
-#endif
}
if (ret == 0)
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c
index e2147bf11c88..e02dfa9d4ddd 100644
--- a/drivers/mtd/maps/ts5500_flash.c
+++ b/drivers/mtd/maps/ts5500_flash.c
@@ -94,7 +94,6 @@ static int __init init_ts5500_map(void)
return 0;
err1:
- map_destroy(mymtd);
iounmap(ts5500_map.virt);
err2:
return rc;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index e0a2373bf0e2..a534e1f0c348 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -40,7 +40,7 @@
static LIST_HEAD(blktrans_majors);
static DEFINE_MUTEX(blktrans_ref_mutex);
-void blktrans_dev_release(struct kref *kref)
+static void blktrans_dev_release(struct kref *kref)
{
struct mtd_blktrans_dev *dev =
container_of(kref, struct mtd_blktrans_dev, ref);
@@ -67,7 +67,7 @@ unlock:
return dev;
}
-void blktrans_dev_put(struct mtd_blktrans_dev *dev)
+static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
{
mutex_lock(&blktrans_ref_mutex);
kref_put(&dev->ref, blktrans_dev_release);
@@ -119,18 +119,43 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
}
}
+int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
+{
+ if (kthread_should_stop())
+ return 1;
+
+ return dev->bg_stop;
+}
+EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
+
static int mtd_blktrans_thread(void *arg)
{
struct mtd_blktrans_dev *dev = arg;
+ struct mtd_blktrans_ops *tr = dev->tr;
struct request_queue *rq = dev->rq;
struct request *req = NULL;
+ int background_done = 0;
spin_lock_irq(rq->queue_lock);
while (!kthread_should_stop()) {
int res;
+ dev->bg_stop = false;
if (!req && !(req = blk_fetch_request(rq))) {
+ if (tr->background && !background_done) {
+ spin_unlock_irq(rq->queue_lock);
+ mutex_lock(&dev->lock);
+ tr->background(dev);
+ mutex_unlock(&dev->lock);
+ spin_lock_irq(rq->queue_lock);
+ /*
+ * Do background processing just once per idle
+ * period.
+ */
+ background_done = !dev->bg_stop;
+ continue;
+ }
set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop())
@@ -152,6 +177,8 @@ static int mtd_blktrans_thread(void *arg)
if (!__blk_end_request_cur(req, res))
req = NULL;
+
+ background_done = 0;
}
if (req)
@@ -172,8 +199,10 @@ static void mtd_blktrans_request(struct request_queue *rq)
if (!dev)
while ((req = blk_fetch_request(rq)) != NULL)
__blk_end_request_all(req, -ENODEV);
- else
+ else {
+ dev->bg_stop = true;
wake_up_process(dev->thread);
+ }
}
static int blktrans_open(struct block_device *bdev, fmode_t mode)
@@ -379,9 +408,10 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
new->rq->queuedata = new;
blk_queue_logical_block_size(new->rq, tr->blksize);
- if (tr->discard)
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
- new->rq);
+ if (tr->discard) {
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
+ new->rq->limits.max_discard_sectors = UINT_MAX;
+ }
gd->queue = new->rq;
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 5f5777bd3f75..5060e608ea5d 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -750,6 +750,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
struct mtd_concat *concat;
uint32_t max_erasesize, curr_erasesize;
int num_erase_region;
+ int max_writebufsize = 0;
printk(KERN_NOTICE "Concatenating MTD devices:\n");
for (i = 0; i < num_devs; i++)
@@ -776,7 +777,12 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
concat->mtd.size = subdev[0]->size;
concat->mtd.erasesize = subdev[0]->erasesize;
concat->mtd.writesize = subdev[0]->writesize;
- concat->mtd.writebufsize = subdev[0]->writebufsize;
+
+ for (i = 0; i < num_devs; i++)
+ if (max_writebufsize < subdev[i]->writebufsize)
+ max_writebufsize = subdev[i]->writebufsize;
+ concat->mtd.writebufsize = max_writebufsize;
+
concat->mtd.subpage_sft = subdev[0]->subpage_sft;
concat->mtd.oobsize = subdev[0]->oobsize;
concat->mtd.oobavail = subdev[0]->oobavail;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 527cebf58da4..da69bc8a5a7d 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -43,7 +43,7 @@
* backing device capabilities for non-mappable devices (such as NAND flash)
* - permits private mappings, copies are taken of the data
*/
-struct backing_dev_info mtd_bdi_unmappable = {
+static struct backing_dev_info mtd_bdi_unmappable = {
.capabilities = BDI_CAP_MAP_COPY,
};
@@ -52,7 +52,7 @@ struct backing_dev_info mtd_bdi_unmappable = {
* - permits private mappings, copies are taken of the data
* - permits non-writable shared mappings
*/
-struct backing_dev_info mtd_bdi_ro_mappable = {
+static struct backing_dev_info mtd_bdi_ro_mappable = {
.capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP),
};
@@ -62,7 +62,7 @@ struct backing_dev_info mtd_bdi_ro_mappable = {
* - permits private mappings, copies are taken of the data
* - permits non-writable shared mappings
*/
-struct backing_dev_info mtd_bdi_rw_mappable = {
+static struct backing_dev_info mtd_bdi_rw_mappable = {
.capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP |
BDI_CAP_WRITE_MAP),
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
new file mode 100644
index 000000000000..237913c5c92c
--- /dev/null
+++ b/drivers/mtd/mtdswap.c
@@ -0,0 +1,1587 @@
+/*
+ * Swap block device support for MTDs
+ * Turns an MTD device into a swap device with block wear leveling
+ *
+ * Copyright © 2007,2011 Nokia Corporation. All rights reserved.
+ *
+ * Authors: Jarkko Lavinen <jarkko.lavinen@nokia.com>
+ *
+ * Based on Richard Purdie's earlier implementation in 2007. Background
+ * support and lock-less operation written by Adrian Hunter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/blktrans.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/genhd.h>
+#include <linux/swap.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/device.h>
+#include <linux/math64.h>
+
+#define MTDSWAP_PREFIX "mtdswap"
+
+/*
+ * The number of free eraseblocks when GC should stop
+ */
+#define CLEAN_BLOCK_THRESHOLD 20
+
+/*
+ * Number of free eraseblocks below which GC can also collect low frag
+ * blocks.
+ */
+#define LOW_FRAG_GC_TRESHOLD 5
+
+/*
+ * Wear level cost amortization. We want to do wear leveling on the background
+ * without disturbing gc too much. This is made by defining max GC frequency.
+ * Frequency value 6 means 1/6 of the GC passes will pick an erase block based
+ * on the biggest wear difference rather than the biggest dirtiness.
+ *
+ * The lower freq2 should be chosen so that it makes sure the maximum erase
+ * difference will decrease even if a malicious application is deliberately
+ * trying to make erase differences large.
+ */
+#define MAX_ERASE_DIFF 4000
+#define COLLECT_NONDIRTY_BASE MAX_ERASE_DIFF
+#define COLLECT_NONDIRTY_FREQ1 6
+#define COLLECT_NONDIRTY_FREQ2 4
+
+#define PAGE_UNDEF UINT_MAX
+#define BLOCK_UNDEF UINT_MAX
+#define BLOCK_ERROR (UINT_MAX - 1)
+#define BLOCK_MAX (UINT_MAX - 2)
+
+#define EBLOCK_BAD (1 << 0)
+#define EBLOCK_NOMAGIC (1 << 1)
+#define EBLOCK_BITFLIP (1 << 2)
+#define EBLOCK_FAILED (1 << 3)
+#define EBLOCK_READERR (1 << 4)
+#define EBLOCK_IDX_SHIFT 5
+
+struct swap_eb {
+ struct rb_node rb;
+ struct rb_root *root;
+
+ unsigned int flags;
+ unsigned int active_count;
+ unsigned int erase_count;
+ unsigned int pad; /* speeds up pointer decremtnt */
+};
+
+#define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \
+ rb)->erase_count)
+#define MTDSWAP_ECNT_MAX(rbroot) (rb_entry(rb_last(rbroot), struct swap_eb, \
+ rb)->erase_count)
+
+struct mtdswap_tree {
+ struct rb_root root;
+ unsigned int count;
+};
+
+enum {
+ MTDSWAP_CLEAN,
+ MTDSWAP_USED,
+ MTDSWAP_LOWFRAG,
+ MTDSWAP_HIFRAG,
+ MTDSWAP_DIRTY,
+ MTDSWAP_BITFLIP,
+ MTDSWAP_FAILING,
+ MTDSWAP_TREE_CNT,
+};
+
+struct mtdswap_dev {
+ struct mtd_blktrans_dev *mbd_dev;
+ struct mtd_info *mtd;
+ struct device *dev;
+
+ unsigned int *page_data;
+ unsigned int *revmap;
+
+ unsigned int eblks;
+ unsigned int spare_eblks;
+ unsigned int pages_per_eblk;
+ unsigned int max_erase_count;
+ struct swap_eb *eb_data;
+
+ struct mtdswap_tree trees[MTDSWAP_TREE_CNT];
+
+ unsigned long long sect_read_count;
+ unsigned long long sect_write_count;
+ unsigned long long mtd_write_count;
+ unsigned long long mtd_read_count;
+ unsigned long long discard_count;
+ unsigned long long discard_page_count;
+
+ unsigned int curr_write_pos;
+ struct swap_eb *curr_write;
+
+ char *page_buf;
+ char *oob_buf;
+
+ struct dentry *debugfs_root;
+};
+
+struct mtdswap_oobdata {
+ __le16 magic;
+ __le32 count;
+} __attribute__((packed));
+
+#define MTDSWAP_MAGIC_CLEAN 0x2095
+#define MTDSWAP_MAGIC_DIRTY (MTDSWAP_MAGIC_CLEAN + 1)
+#define MTDSWAP_TYPE_CLEAN 0
+#define MTDSWAP_TYPE_DIRTY 1
+#define MTDSWAP_OOBSIZE sizeof(struct mtdswap_oobdata)
+
+#define MTDSWAP_ERASE_RETRIES 3 /* Before marking erase block bad */
+#define MTDSWAP_IO_RETRIES 3
+
+enum {
+ MTDSWAP_SCANNED_CLEAN,
+ MTDSWAP_SCANNED_DIRTY,
+ MTDSWAP_SCANNED_BITFLIP,
+ MTDSWAP_SCANNED_BAD,
+};
+
+/*
+ * In the worst case mtdswap_writesect() has allocated the last clean
+ * page from the current block and is then pre-empted by the GC
+ * thread. The thread can consume a full erase block when moving a
+ * block.
+ */
+#define MIN_SPARE_EBLOCKS 2
+#define MIN_ERASE_BLOCKS (MIN_SPARE_EBLOCKS + 1)
+
+#define TREE_ROOT(d, name) (&d->trees[MTDSWAP_ ## name].root)
+#define TREE_EMPTY(d, name) (TREE_ROOT(d, name)->rb_node == NULL)
+#define TREE_NONEMPTY(d, name) (!TREE_EMPTY(d, name))
+#define TREE_COUNT(d, name) (d->trees[MTDSWAP_ ## name].count)
+
+#define MTDSWAP_MBD_TO_MTDSWAP(dev) ((struct mtdswap_dev *)dev->priv)
+
+static char partitions[128] = "";
+module_param_string(partitions, partitions, sizeof(partitions), 0444);
+MODULE_PARM_DESC(partitions, "MTD partition numbers to use as swap "
+ "partitions=\"1,3,5\"");
+
+static unsigned int spare_eblocks = 10;
+module_param(spare_eblocks, uint, 0444);
+MODULE_PARM_DESC(spare_eblocks, "Percentage of spare erase blocks for "
+ "garbage collection (default 10%)");
+
+static bool header; /* false */
+module_param(header, bool, 0444);
+MODULE_PARM_DESC(header,
+ "Include builtin swap header (default 0, without header)");
+
+static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background);
+
+static loff_t mtdswap_eb_offset(struct mtdswap_dev *d, struct swap_eb *eb)
+{
+ return (loff_t)(eb - d->eb_data) * d->mtd->erasesize;
+}
+
+static void mtdswap_eb_detach(struct mtdswap_dev *d, struct swap_eb *eb)
+{
+ unsigned int oldidx;
+ struct mtdswap_tree *tp;
+
+ if (eb->root) {
+ tp = container_of(eb->root, struct mtdswap_tree, root);
+ oldidx = tp - &d->trees[0];
+
+ d->trees[oldidx].count--;
+ rb_erase(&eb->rb, eb->root);
+ }
+}
+
+static void __mtdswap_rb_add(struct rb_root *root, struct swap_eb *eb)
+{
+ struct rb_node **p, *parent = NULL;
+ struct swap_eb *cur;
+
+ p = &root->rb_node;
+ while (*p) {
+ parent = *p;
+ cur = rb_entry(parent, struct swap_eb, rb);
+ if (eb->erase_count > cur->erase_count)
+ p = &(*p)->rb_right;
+ else
+ p = &(*p)->rb_left;
+ }
+
+ rb_link_node(&eb->rb, parent, p);
+ rb_insert_color(&eb->rb, root);
+}
+
+static void mtdswap_rb_add(struct mtdswap_dev *d, struct swap_eb *eb, int idx)
+{
+ struct rb_root *root;
+
+ if (eb->root == &d->trees[idx].root)
+ return;
+
+ mtdswap_eb_detach(d, eb);
+ root = &d->trees[idx].root;
+ __mtdswap_rb_add(root, eb);
+ eb->root = root;
+ d->trees[idx].count++;
+}
+
+static struct rb_node *mtdswap_rb_index(struct rb_root *root, unsigned int idx)
+{
+ struct rb_node *p;
+ unsigned int i;
+
+ p = rb_first(root);
+ i = 0;
+ while (i < idx && p) {
+ p = rb_next(p);
+ i++;
+ }
+
+ return p;
+}
+
+static int mtdswap_handle_badblock(struct mtdswap_dev *d, struct swap_eb *eb)
+{
+ int ret;
+ loff_t offset;
+
+ d->spare_eblks--;
+ eb->flags |= EBLOCK_BAD;
+ mtdswap_eb_detach(d, eb);
+ eb->root = NULL;
+
+ /* badblocks not supported */
+ if (!d->mtd->block_markbad)
+ return 1;
+
+ offset = mtdswap_eb_offset(d, eb);
+ dev_warn(d->dev, "Marking bad block at %08llx\n", offset);
+ ret = d->mtd->block_markbad(d->mtd, offset);
+
+ if (ret) {
+ dev_warn(d->dev, "Mark block bad failed for block at %08llx "
+ "error %d\n", offset, ret);
+ return ret;
+ }
+
+ return 1;
+
+}
+
+static int mtdswap_handle_write_error(struct mtdswap_dev *d, struct swap_eb *eb)
+{
+ unsigned int marked = eb->flags & EBLOCK_FAILED;
+ struct swap_eb *curr_write = d->curr_write;
+
+ eb->flags |= EBLOCK_FAILED;
+ if (curr_write == eb) {
+ d->curr_write = NULL;
+
+ if (!marked && d->curr_write_pos != 0) {
+ mtdswap_rb_add(d, eb, MTDSWAP_FAILING);
+ return 0;
+ }
+ }
+
+ return mtdswap_handle_badblock(d, eb);
+}
+
+static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ int ret = d->mtd->read_oob(d->mtd, from, ops);
+
+ if (ret == -EUCLEAN)
+ return ret;
+
+ if (ret) {
+ dev_warn(d->dev, "Read OOB failed %d for block at %08llx\n",
+ ret, from);
+ return ret;
+ }
+
+ if (ops->oobretlen < ops->ooblen) {
+ dev_warn(d->dev, "Read OOB return short read (%zd bytes not "
+ "%zd) for block at %08llx\n",
+ ops->oobretlen, ops->ooblen, from);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
+{
+ struct mtdswap_oobdata *data, *data2;
+ int ret;
+ loff_t offset;
+ struct mtd_oob_ops ops;
+
+ offset = mtdswap_eb_offset(d, eb);
+
+ /* Check first if the block is bad. */
+ if (d->mtd->block_isbad && d->mtd->block_isbad(d->mtd, offset))
+ return MTDSWAP_SCANNED_BAD;
+
+ ops.ooblen = 2 * d->mtd->ecclayout->oobavail;
+ ops.oobbuf = d->oob_buf;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OOB_AUTO;
+
+ ret = mtdswap_read_oob(d, offset, &ops);
+
+ if (ret && ret != -EUCLEAN)
+ return ret;
+
+ data = (struct mtdswap_oobdata *)d->oob_buf;
+ data2 = (struct mtdswap_oobdata *)
+ (d->oob_buf + d->mtd->ecclayout->oobavail);
+
+ if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) {
+ eb->erase_count = le32_to_cpu(data->count);
+ if (ret == -EUCLEAN)
+ ret = MTDSWAP_SCANNED_BITFLIP;
+ else {
+ if (le16_to_cpu(data2->magic) == MTDSWAP_MAGIC_DIRTY)
+ ret = MTDSWAP_SCANNED_DIRTY;
+ else
+ ret = MTDSWAP_SCANNED_CLEAN;
+ }
+ } else {
+ eb->flags |= EBLOCK_NOMAGIC;
+ ret = MTDSWAP_SCANNED_DIRTY;
+ }
+
+ return ret;
+}
+
+static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb,
+ u16 marker)
+{
+ struct mtdswap_oobdata n;
+ int ret;
+ loff_t offset;
+ struct mtd_oob_ops ops;
+
+ ops.ooboffs = 0;
+ ops.oobbuf = (uint8_t *)&n;
+ ops.mode = MTD_OOB_AUTO;
+ ops.datbuf = NULL;
+
+ if (marker == MTDSWAP_TYPE_CLEAN) {
+ n.magic = cpu_to_le16(MTDSWAP_MAGIC_CLEAN);
+ n.count = cpu_to_le32(eb->erase_count);
+ ops.ooblen = MTDSWAP_OOBSIZE;
+ offset = mtdswap_eb_offset(d, eb);
+ } else {
+ n.magic = cpu_to_le16(MTDSWAP_MAGIC_DIRTY);
+ ops.ooblen = sizeof(n.magic);
+ offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize;
+ }
+
+ ret = d->mtd->write_oob(d->mtd, offset , &ops);
+
+ if (ret) {
+ dev_warn(d->dev, "Write OOB failed for block at %08llx "
+ "error %d\n", offset, ret);
+ if (ret == -EIO || ret == -EBADMSG)
+ mtdswap_handle_write_error(d, eb);
+ return ret;
+ }
+
+ if (ops.oobretlen != ops.ooblen) {
+ dev_warn(d->dev, "Short OOB write for block at %08llx: "
+ "%zd not %zd\n",
+ offset, ops.oobretlen, ops.ooblen);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Are there any erase blocks without MAGIC_CLEAN header, presumably
+ * because power was cut off after erase but before header write? We
+ * need to guestimate the erase count.
+ */
+static void mtdswap_check_counts(struct mtdswap_dev *d)
+{
+ struct rb_root hist_root = RB_ROOT;
+ struct rb_node *medrb;
+ struct swap_eb *eb;
+ unsigned int i, cnt, median;
+
+ cnt = 0;
+ for (i = 0; i < d->eblks; i++) {
+ eb = d->eb_data + i;
+
+ if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR))
+ continue;
+
+ __mtdswap_rb_add(&hist_root, eb);
+ cnt++;
+ }
+
+ if (cnt == 0)
+ return;
+
+ medrb = mtdswap_rb_index(&hist_root, cnt / 2);
+ median = rb_entry(medrb, struct swap_eb, rb)->erase_count;
+
+ d->max_erase_count = MTDSWAP_ECNT_MAX(&hist_root);
+
+ for (i = 0; i < d->eblks; i++) {
+ eb = d->eb_data + i;
+
+ if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_READERR))
+ eb->erase_count = median;
+
+ if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR))
+ continue;
+
+ rb_erase(&eb->rb, &hist_root);
+ }
+}
+
+static void mtdswap_scan_eblks(struct mtdswap_dev *d)
+{
+ int status;
+ unsigned int i, idx;
+ struct swap_eb *eb;
+
+ for (i = 0; i < d->eblks; i++) {
+ eb = d->eb_data + i;
+
+ status = mtdswap_read_markers(d, eb);
+ if (status < 0)
+ eb->flags |= EBLOCK_READERR;
+ else if (status == MTDSWAP_SCANNED_BAD) {
+ eb->flags |= EBLOCK_BAD;
+ continue;
+ }
+
+ switch (status) {
+ case MTDSWAP_SCANNED_CLEAN:
+ idx = MTDSWAP_CLEAN;
+ break;
+ case MTDSWAP_SCANNED_DIRTY:
+ case MTDSWAP_SCANNED_BITFLIP:
+ idx = MTDSWAP_DIRTY;
+ break;
+ default:
+ idx = MTDSWAP_FAILING;
+ }
+
+ eb->flags |= (idx << EBLOCK_IDX_SHIFT);
+ }
+
+ mtdswap_check_counts(d);
+
+ for (i = 0; i < d->eblks; i++) {
+ eb = d->eb_data + i;
+
+ if (eb->flags & EBLOCK_BAD)
+ continue;
+
+ idx = eb->flags >> EBLOCK_IDX_SHIFT;
+ mtdswap_rb_add(d, eb, idx);
+ }
+}
+
+/*
+ * Place eblk into a tree corresponding to its number of active blocks
+ * it contains.
+ */
+static void mtdswap_store_eb(struct mtdswap_dev *d, struct swap_eb *eb)
+{
+ unsigned int weight = eb->active_count;
+ unsigned int maxweight = d->pages_per_eblk;
+
+ if (eb == d->curr_write)
+ return;
+
+ if (eb->flags & EBLOCK_BITFLIP)
+ mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP);
+ else if (eb->flags & (EBLOCK_READERR | EBLOCK_FAILED))
+ mtdswap_rb_add(d, eb, MTDSWAP_FAILING);
+ if (weight == maxweight)
+ mtdswap_rb_add(d, eb, MTDSWAP_USED);
+ else if (weight == 0)
+ mtdswap_rb_add(d, eb, MTDSWAP_DIRTY);
+ else if (weight > (maxweight/2))
+ mtdswap_rb_add(d, eb, MTDSWAP_LOWFRAG);
+ else
+ mtdswap_rb_add(d, eb, MTDSWAP_HIFRAG);
+}
+
+
+static void mtdswap_erase_callback(struct erase_info *done)
+{
+ wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
+ wake_up(wait_q);
+}
+
+static int mtdswap_erase_block(struct mtdswap_dev *d, struct swap_eb *eb)
+{
+ struct mtd_info *mtd = d->mtd;
+ struct erase_info erase;
+ wait_queue_head_t wq;
+ unsigned int retries = 0;
+ int ret;
+
+ eb->erase_count++;
+ if (eb->erase_count > d->max_erase_count)
+ d->max_erase_count = eb->erase_count;
+
+retry:
+ init_waitqueue_head(&wq);
+ memset(&erase, 0, sizeof(struct erase_info));
+
+ erase.mtd = mtd;
+ erase.callback = mtdswap_erase_callback;
+ erase.addr = mtdswap_eb_offset(d, eb);
+ erase.len = mtd->erasesize;
+ erase.priv = (u_long)&wq;
+
+ ret = mtd->erase(mtd, &erase);
+ if (ret) {
+ if (retries++ < MTDSWAP_ERASE_RETRIES) {
+ dev_warn(d->dev,
+ "erase of erase block %#llx on %s failed",
+ erase.addr, mtd->name);
+ yield();
+ goto retry;
+ }
+
+ dev_err(d->dev, "Cannot erase erase block %#llx on %s\n",
+ erase.addr, mtd->name);
+
+ mtdswap_handle_badblock(d, eb);
+ return -EIO;
+ }
+
+ ret = wait_event_interruptible(wq, erase.state == MTD_ERASE_DONE ||
+ erase.state == MTD_ERASE_FAILED);
+ if (ret) {
+ dev_err(d->dev, "Interrupted erase block %#llx erassure on %s",
+ erase.addr, mtd->name);
+ return -EINTR;
+ }
+
+ if (erase.state == MTD_ERASE_FAILED) {
+ if (retries++ < MTDSWAP_ERASE_RETRIES) {
+ dev_warn(d->dev,
+ "erase of erase block %#llx on %s failed",
+ erase.addr, mtd->name);
+ yield();
+ goto retry;
+ }
+
+ mtdswap_handle_badblock(d, eb);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mtdswap_map_free_block(struct mtdswap_dev *d, unsigned int page,
+ unsigned int *block)
+{
+ int ret;
+ struct swap_eb *old_eb = d->curr_write;
+ struct rb_root *clean_root;
+ struct swap_eb *eb;
+
+ if (old_eb == NULL || d->curr_write_pos >= d->pages_per_eblk) {
+ do {
+ if (TREE_EMPTY(d, CLEAN))
+ return -ENOSPC;
+
+ clean_root = TREE_ROOT(d, CLEAN);
+ eb = rb_entry(rb_first(clean_root), struct swap_eb, rb);
+ rb_erase(&eb->rb, clean_root);
+ eb->root = NULL;
+ TREE_COUNT(d, CLEAN)--;
+
+ ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY);
+ } while (ret == -EIO || ret == -EBADMSG);
+
+ if (ret)
+ return ret;
+
+ d->curr_write_pos = 0;
+ d->curr_write = eb;
+ if (old_eb)
+ mtdswap_store_eb(d, old_eb);
+ }
+
+ *block = (d->curr_write - d->eb_data) * d->pages_per_eblk +
+ d->curr_write_pos;
+
+ d->curr_write->active_count++;
+ d->revmap[*block] = page;
+ d->curr_write_pos++;
+
+ return 0;
+}
+
+static unsigned int mtdswap_free_page_cnt(struct mtdswap_dev *d)
+{
+ return TREE_COUNT(d, CLEAN) * d->pages_per_eblk +
+ d->pages_per_eblk - d->curr_write_pos;
+}
+
+static unsigned int mtdswap_enough_free_pages(struct mtdswap_dev *d)
+{
+ return mtdswap_free_page_cnt(d) > d->pages_per_eblk;
+}
+
+static int mtdswap_write_block(struct mtdswap_dev *d, char *buf,
+ unsigned int page, unsigned int *bp, int gc_context)
+{
+ struct mtd_info *mtd = d->mtd;
+ struct swap_eb *eb;
+ size_t retlen;
+ loff_t writepos;
+ int ret;
+
+retry:
+ if (!gc_context)
+ while (!mtdswap_enough_free_pages(d))
+ if (mtdswap_gc(d, 0) > 0)
+ return -ENOSPC;
+
+ ret = mtdswap_map_free_block(d, page, bp);
+ eb = d->eb_data + (*bp / d->pages_per_eblk);
+
+ if (ret == -EIO || ret == -EBADMSG) {
+ d->curr_write = NULL;
+ eb->active_count--;
+ d->revmap[*bp] = PAGE_UNDEF;
+ goto retry;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ writepos = (loff_t)*bp << PAGE_SHIFT;
+ ret = mtd->write(mtd, writepos, PAGE_SIZE, &retlen, buf);
+ if (ret == -EIO || ret == -EBADMSG) {
+ d->curr_write_pos--;
+ eb->active_count--;
+ d->revmap[*bp] = PAGE_UNDEF;
+ mtdswap_handle_write_error(d, eb);
+ goto retry;
+ }
+
+ if (ret < 0) {
+ dev_err(d->dev, "Write to MTD device failed: %d (%zd written)",
+ ret, retlen);
+ goto err;
+ }
+
+ if (retlen != PAGE_SIZE) {
+ dev_err(d->dev, "Short write to MTD device: %zd written",
+ retlen);
+ ret = -EIO;
+ goto err;
+ }
+
+ return ret;
+
+err:
+ d->curr_write_pos--;
+ eb->active_count--;
+ d->revmap[*bp] = PAGE_UNDEF;
+
+ return ret;
+}
+
+static int mtdswap_move_block(struct mtdswap_dev *d, unsigned int oldblock,
+ unsigned int *newblock)
+{
+ struct mtd_info *mtd = d->mtd;
+ struct swap_eb *eb, *oldeb;
+ int ret;
+ size_t retlen;
+ unsigned int page, retries;
+ loff_t readpos;
+
+ page = d->revmap[oldblock];
+ readpos = (loff_t) oldblock << PAGE_SHIFT;
+ retries = 0;
+
+retry:
+ ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
+
+ if (ret < 0 && ret != -EUCLEAN) {
+ oldeb = d->eb_data + oldblock / d->pages_per_eblk;
+ oldeb->flags |= EBLOCK_READERR;
+
+ dev_err(d->dev, "Read Error: %d (block %u)\n", ret,
+ oldblock);
+ retries++;
+ if (retries < MTDSWAP_IO_RETRIES)
+ goto retry;
+
+ goto read_error;
+ }
+
+ if (retlen != PAGE_SIZE) {
+ dev_err(d->dev, "Short read: %zd (block %u)\n", retlen,
+ oldblock);
+ ret = -EIO;
+ goto read_error;
+ }
+
+ ret = mtdswap_write_block(d, d->page_buf, page, newblock, 1);
+ if (ret < 0) {
+ d->page_data[page] = BLOCK_ERROR;
+ dev_err(d->dev, "Write error: %d\n", ret);
+ return ret;
+ }
+
+ eb = d->eb_data + *newblock / d->pages_per_eblk;
+ d->page_data[page] = *newblock;
+ d->revmap[oldblock] = PAGE_UNDEF;
+ eb = d->eb_data + oldblock / d->pages_per_eblk;
+ eb->active_count--;
+
+ return 0;
+
+read_error:
+ d->page_data[page] = BLOCK_ERROR;
+ d->revmap[oldblock] = PAGE_UNDEF;
+ return ret;
+}
+
+static int mtdswap_gc_eblock(struct mtdswap_dev *d, struct swap_eb *eb)
+{
+ unsigned int i, block, eblk_base, newblock;
+ int ret, errcode;
+
+ errcode = 0;
+ eblk_base = (eb - d->eb_data) * d->pages_per_eblk;
+
+ for (i = 0; i < d->pages_per_eblk; i++) {
+ if (d->spare_eblks < MIN_SPARE_EBLOCKS)
+ return -ENOSPC;
+
+ block = eblk_base + i;
+ if (d->revmap[block] == PAGE_UNDEF)
+ continue;
+
+ ret = mtdswap_move_block(d, block, &newblock);
+ if (ret < 0 && !errcode)
+ errcode = ret;
+ }
+
+ return errcode;
+}
+
+static int __mtdswap_choose_gc_tree(struct mtdswap_dev *d)
+{
+ int idx, stopat;
+
+ if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_TRESHOLD)
+ stopat = MTDSWAP_LOWFRAG;
+ else
+ stopat = MTDSWAP_HIFRAG;
+
+ for (idx = MTDSWAP_BITFLIP; idx >= stopat; idx--)
+ if (d->trees[idx].root.rb_node != NULL)
+ return idx;
+
+ return -1;
+}
+
+static int mtdswap_wlfreq(unsigned int maxdiff)
+{
+ unsigned int h, x, y, dist, base;
+
+ /*
+ * Calculate linear ramp down from f1 to f2 when maxdiff goes from
+ * MAX_ERASE_DIFF to MAX_ERASE_DIFF + COLLECT_NONDIRTY_BASE. Similar
+ * to triangle with height f1 - f1 and width COLLECT_NONDIRTY_BASE.
+ */
+
+ dist = maxdiff - MAX_ERASE_DIFF;
+ if (dist > COLLECT_NONDIRTY_BASE)
+ dist = COLLECT_NONDIRTY_BASE;
+
+ /*
+ * Modelling the slop as right angular triangle with base
+ * COLLECT_NONDIRTY_BASE and height freq1 - freq2. The ratio y/x is
+ * equal to the ratio h/base.
+ */
+ h = COLLECT_NONDIRTY_FREQ1 - COLLECT_NONDIRTY_FREQ2;
+ base = COLLECT_NONDIRTY_BASE;
+
+ x = dist - base;
+ y = (x * h + base / 2) / base;
+
+ return COLLECT_NONDIRTY_FREQ2 + y;
+}
+
+static int mtdswap_choose_wl_tree(struct mtdswap_dev *d)
+{
+ static unsigned int pick_cnt;
+ unsigned int i, idx = -1, wear, max;
+ struct rb_root *root;
+
+ max = 0;
+ for (i = 0; i <= MTDSWAP_DIRTY; i++) {
+ root = &d->trees[i].root;
+ if (root->rb_node == NULL)
+ continue;
+
+ wear = d->max_erase_count - MTDSWAP_ECNT_MIN(root);
+ if (wear > max) {
+ max = wear;
+ idx = i;
+ }
+ }
+
+ if (max > MAX_ERASE_DIFF && pick_cnt >= mtdswap_wlfreq(max) - 1) {
+ pick_cnt = 0;
+ return idx;
+ }
+
+ pick_cnt++;
+ return -1;
+}
+
+static int mtdswap_choose_gc_tree(struct mtdswap_dev *d,
+ unsigned int background)
+{
+ int idx;
+
+ if (TREE_NONEMPTY(d, FAILING) &&
+ (background || (TREE_EMPTY(d, CLEAN) && TREE_EMPTY(d, DIRTY))))
+ return MTDSWAP_FAILING;
+
+ idx = mtdswap_choose_wl_tree(d);
+ if (idx >= MTDSWAP_CLEAN)
+ return idx;
+
+ return __mtdswap_choose_gc_tree(d);
+}
+
+static struct swap_eb *mtdswap_pick_gc_eblk(struct mtdswap_dev *d,
+ unsigned int background)
+{
+ struct rb_root *rp = NULL;
+ struct swap_eb *eb = NULL;
+ int idx;
+
+ if (background && TREE_COUNT(d, CLEAN) > CLEAN_BLOCK_THRESHOLD &&
+ TREE_EMPTY(d, DIRTY) && TREE_EMPTY(d, FAILING))
+ return NULL;
+
+ idx = mtdswap_choose_gc_tree(d, background);
+ if (idx < 0)
+ return NULL;
+
+ rp = &d->trees[idx].root;
+ eb = rb_entry(rb_first(rp), struct swap_eb, rb);
+
+ rb_erase(&eb->rb, rp);
+ eb->root = NULL;
+ d->trees[idx].count--;
+ return eb;
+}
+
+static unsigned int mtdswap_test_patt(unsigned int i)
+{
+ return i % 2 ? 0x55555555 : 0xAAAAAAAA;
+}
+
+static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
+ struct swap_eb *eb)
+{
+ struct mtd_info *mtd = d->mtd;
+ unsigned int test, i, j, patt, mtd_pages;
+ loff_t base, pos;
+ unsigned int *p1 = (unsigned int *)d->page_buf;
+ unsigned char *p2 = (unsigned char *)d->oob_buf;
+ struct mtd_oob_ops ops;
+ int ret;
+
+ ops.mode = MTD_OOB_AUTO;
+ ops.len = mtd->writesize;
+ ops.ooblen = mtd->ecclayout->oobavail;
+ ops.ooboffs = 0;
+ ops.datbuf = d->page_buf;
+ ops.oobbuf = d->oob_buf;
+ base = mtdswap_eb_offset(d, eb);
+ mtd_pages = d->pages_per_eblk * PAGE_SIZE / mtd->writesize;
+
+ for (test = 0; test < 2; test++) {
+ pos = base;
+ for (i = 0; i < mtd_pages; i++) {
+ patt = mtdswap_test_patt(test + i);
+ memset(d->page_buf, patt, mtd->writesize);
+ memset(d->oob_buf, patt, mtd->ecclayout->oobavail);
+ ret = mtd->write_oob(mtd, pos, &ops);
+ if (ret)
+ goto error;
+
+ pos += mtd->writesize;
+ }
+
+ pos = base;
+ for (i = 0; i < mtd_pages; i++) {
+ ret = mtd->read_oob(mtd, pos, &ops);
+ if (ret)
+ goto error;
+
+ patt = mtdswap_test_patt(test + i);
+ for (j = 0; j < mtd->writesize/sizeof(int); j++)
+ if (p1[j] != patt)
+ goto error;
+
+ for (j = 0; j < mtd->ecclayout->oobavail; j++)
+ if (p2[j] != (unsigned char)patt)
+ goto error;
+
+ pos += mtd->writesize;
+ }
+
+ ret = mtdswap_erase_block(d, eb);
+ if (ret)
+ goto error;
+ }
+
+ eb->flags &= ~EBLOCK_READERR;
+ return 1;
+
+error:
+ mtdswap_handle_badblock(d, eb);
+ return 0;
+}
+
+static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background)
+{
+ struct swap_eb *eb;
+ int ret;
+
+ if (d->spare_eblks < MIN_SPARE_EBLOCKS)
+ return 1;
+
+ eb = mtdswap_pick_gc_eblk(d, background);
+ if (!eb)
+ return 1;
+
+ ret = mtdswap_gc_eblock(d, eb);
+ if (ret == -ENOSPC)
+ return 1;
+
+ if (eb->flags & EBLOCK_FAILED) {
+ mtdswap_handle_badblock(d, eb);
+ return 0;
+ }
+
+ eb->flags &= ~EBLOCK_BITFLIP;
+ ret = mtdswap_erase_block(d, eb);
+ if ((eb->flags & EBLOCK_READERR) &&
+ (ret || !mtdswap_eblk_passes(d, eb)))
+ return 0;
+
+ if (ret == 0)
+ ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_CLEAN);
+
+ if (ret == 0)
+ mtdswap_rb_add(d, eb, MTDSWAP_CLEAN);
+ else if (ret != -EIO && ret != -EBADMSG)
+ mtdswap_rb_add(d, eb, MTDSWAP_DIRTY);
+
+ return 0;
+}
+
+static void mtdswap_background(struct mtd_blktrans_dev *dev)
+{
+ struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
+ int ret;
+
+ while (1) {
+ ret = mtdswap_gc(d, 1);
+ if (ret || mtd_blktrans_cease_background(dev))
+ return;
+ }
+}
+
+static void mtdswap_cleanup(struct mtdswap_dev *d)
+{
+ vfree(d->eb_data);
+ vfree(d->revmap);
+ vfree(d->page_data);
+ kfree(d->oob_buf);
+ kfree(d->page_buf);
+}
+
+static int mtdswap_flush(struct mtd_blktrans_dev *dev)
+{
+ struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
+
+ if (d->mtd->sync)
+ d->mtd->sync(d->mtd);
+ return 0;
+}
+
+static unsigned int mtdswap_badblocks(struct mtd_info *mtd, uint64_t size)
+{
+ loff_t offset;
+ unsigned int badcnt;
+
+ badcnt = 0;
+
+ if (mtd->block_isbad)
+ for (offset = 0; offset < size; offset += mtd->erasesize)
+ if (mtd->block_isbad(mtd, offset))
+ badcnt++;
+
+ return badcnt;
+}
+
+static int mtdswap_writesect(struct mtd_blktrans_dev *dev,
+ unsigned long page, char *buf)
+{
+ struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
+ unsigned int newblock, mapped;
+ struct swap_eb *eb;
+ int ret;
+
+ d->sect_write_count++;
+
+ if (d->spare_eblks < MIN_SPARE_EBLOCKS)
+ return -ENOSPC;
+
+ if (header) {
+ /* Ignore writes to the header page */
+ if (unlikely(page == 0))
+ return 0;
+
+ page--;
+ }
+
+ mapped = d->page_data[page];
+ if (mapped <= BLOCK_MAX) {
+ eb = d->eb_data + (mapped / d->pages_per_eblk);
+ eb->active_count--;
+ mtdswap_store_eb(d, eb);
+ d->page_data[page] = BLOCK_UNDEF;
+ d->revmap[mapped] = PAGE_UNDEF;
+ }
+
+ ret = mtdswap_write_block(d, buf, page, &newblock, 0);
+ d->mtd_write_count++;
+
+ if (ret < 0)
+ return ret;
+
+ eb = d->eb_data + (newblock / d->pages_per_eblk);
+ d->page_data[page] = newblock;
+
+ return 0;
+}
+
+/* Provide a dummy swap header for the kernel */
+static int mtdswap_auto_header(struct mtdswap_dev *d, char *buf)
+{
+ union swap_header *hd = (union swap_header *)(buf);
+
+ memset(buf, 0, PAGE_SIZE - 10);
+
+ hd->info.version = 1;
+ hd->info.last_page = d->mbd_dev->size - 1;
+ hd->info.nr_badpages = 0;
+
+ memcpy(buf + PAGE_SIZE - 10, "SWAPSPACE2", 10);
+
+ return 0;
+}
+
+static int mtdswap_readsect(struct mtd_blktrans_dev *dev,
+ unsigned long page, char *buf)
+{
+ struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
+ struct mtd_info *mtd = d->mtd;
+ unsigned int realblock, retries;
+ loff_t readpos;
+ struct swap_eb *eb;
+ size_t retlen;
+ int ret;
+
+ d->sect_read_count++;
+
+ if (header) {
+ if (unlikely(page == 0))
+ return mtdswap_auto_header(d, buf);
+
+ page--;
+ }
+
+ realblock = d->page_data[page];
+ if (realblock > BLOCK_MAX) {
+ memset(buf, 0x0, PAGE_SIZE);
+ if (realblock == BLOCK_UNDEF)
+ return 0;
+ else
+ return -EIO;
+ }
+
+ eb = d->eb_data + (realblock / d->pages_per_eblk);
+ BUG_ON(d->revmap[realblock] == PAGE_UNDEF);
+
+ readpos = (loff_t)realblock << PAGE_SHIFT;
+ retries = 0;
+
+retry:
+ ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, buf);
+
+ d->mtd_read_count++;
+ if (ret == -EUCLEAN) {
+ eb->flags |= EBLOCK_BITFLIP;
+ mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP);
+ ret = 0;
+ }
+
+ if (ret < 0) {
+ dev_err(d->dev, "Read error %d\n", ret);
+ eb->flags |= EBLOCK_READERR;
+ mtdswap_rb_add(d, eb, MTDSWAP_FAILING);
+ retries++;
+ if (retries < MTDSWAP_IO_RETRIES)
+ goto retry;
+
+ return ret;
+ }
+
+ if (retlen != PAGE_SIZE) {
+ dev_err(d->dev, "Short read %zd\n", retlen);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mtdswap_discard(struct mtd_blktrans_dev *dev, unsigned long first,
+ unsigned nr_pages)
+{
+ struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
+ unsigned long page;
+ struct swap_eb *eb;
+ unsigned int mapped;
+
+ d->discard_count++;
+
+ for (page = first; page < first + nr_pages; page++) {
+ mapped = d->page_data[page];
+ if (mapped <= BLOCK_MAX) {
+ eb = d->eb_data + (mapped / d->pages_per_eblk);
+ eb->active_count--;
+ mtdswap_store_eb(d, eb);
+ d->page_data[page] = BLOCK_UNDEF;
+ d->revmap[mapped] = PAGE_UNDEF;
+ d->discard_page_count++;
+ } else if (mapped == BLOCK_ERROR) {
+ d->page_data[page] = BLOCK_UNDEF;
+ d->discard_page_count++;
+ }
+ }
+
+ return 0;
+}
+
+static int mtdswap_show(struct seq_file *s, void *data)
+{
+ struct mtdswap_dev *d = (struct mtdswap_dev *) s->private;
+ unsigned long sum;
+ unsigned int count[MTDSWAP_TREE_CNT];
+ unsigned int min[MTDSWAP_TREE_CNT];
+ unsigned int max[MTDSWAP_TREE_CNT];
+ unsigned int i, cw = 0, cwp = 0, cwecount = 0, bb_cnt, mapped, pages;
+ uint64_t use_size;
+ char *name[] = {"clean", "used", "low", "high", "dirty", "bitflip",
+ "failing"};
+
+ mutex_lock(&d->mbd_dev->lock);
+
+ for (i = 0; i < MTDSWAP_TREE_CNT; i++) {
+ struct rb_root *root = &d->trees[i].root;
+
+ if (root->rb_node) {
+ count[i] = d->trees[i].count;
+ min[i] = rb_entry(rb_first(root), struct swap_eb,
+ rb)->erase_count;
+ max[i] = rb_entry(rb_last(root), struct swap_eb,
+ rb)->erase_count;
+ } else
+ count[i] = 0;
+ }
+
+ if (d->curr_write) {
+ cw = 1;
+ cwp = d->curr_write_pos;
+ cwecount = d->curr_write->erase_count;
+ }
+
+ sum = 0;
+ for (i = 0; i < d->eblks; i++)
+ sum += d->eb_data[i].erase_count;
+
+ use_size = (uint64_t)d->eblks * d->mtd->erasesize;
+ bb_cnt = mtdswap_badblocks(d->mtd, use_size);
+
+ mapped = 0;
+ pages = d->mbd_dev->size;
+ for (i = 0; i < pages; i++)
+ if (d->page_data[i] != BLOCK_UNDEF)
+ mapped++;
+
+ mutex_unlock(&d->mbd_dev->lock);
+
+ for (i = 0; i < MTDSWAP_TREE_CNT; i++) {
+ if (!count[i])
+ continue;
+
+ if (min[i] != max[i])
+ seq_printf(s, "%s:\t%5d erase blocks, erased min %d, "
+ "max %d times\n",
+ name[i], count[i], min[i], max[i]);
+ else
+ seq_printf(s, "%s:\t%5d erase blocks, all erased %d "
+ "times\n", name[i], count[i], min[i]);
+ }
+
+ if (bb_cnt)
+ seq_printf(s, "bad:\t%5u erase blocks\n", bb_cnt);
+
+ if (cw)
+ seq_printf(s, "current erase block: %u pages used, %u free, "
+ "erased %u times\n",
+ cwp, d->pages_per_eblk - cwp, cwecount);
+
+ seq_printf(s, "total erasures: %lu\n", sum);
+
+ seq_printf(s, "\n");
+
+ seq_printf(s, "mtdswap_readsect count: %llu\n", d->sect_read_count);
+ seq_printf(s, "mtdswap_writesect count: %llu\n", d->sect_write_count);
+ seq_printf(s, "mtdswap_discard count: %llu\n", d->discard_count);
+ seq_printf(s, "mtd read count: %llu\n", d->mtd_read_count);
+ seq_printf(s, "mtd write count: %llu\n", d->mtd_write_count);
+ seq_printf(s, "discarded pages count: %llu\n", d->discard_page_count);
+
+ seq_printf(s, "\n");
+ seq_printf(s, "total pages: %u\n", pages);
+ seq_printf(s, "pages mapped: %u\n", mapped);
+
+ return 0;
+}
+
+static int mtdswap_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtdswap_show, inode->i_private);
+}
+
+static const struct file_operations mtdswap_fops = {
+ .open = mtdswap_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int mtdswap_add_debugfs(struct mtdswap_dev *d)
+{
+ struct gendisk *gd = d->mbd_dev->disk;
+ struct device *dev = disk_to_dev(gd);
+
+ struct dentry *root;
+ struct dentry *dent;
+
+ root = debugfs_create_dir(gd->disk_name, NULL);
+ if (IS_ERR(root))
+ return 0;
+
+ if (!root) {
+ dev_err(dev, "failed to initialize debugfs\n");
+ return -1;
+ }
+
+ d->debugfs_root = root;
+
+ dent = debugfs_create_file("stats", S_IRUSR, root, d,
+ &mtdswap_fops);
+ if (!dent) {
+ dev_err(d->dev, "debugfs_create_file failed\n");
+ debugfs_remove_recursive(root);
+ d->debugfs_root = NULL;
+ return -1;
+ }
+
+ return 0;
+}
+
+static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks,
+ unsigned int spare_cnt)
+{
+ struct mtd_info *mtd = d->mbd_dev->mtd;
+ unsigned int i, eblk_bytes, pages, blocks;
+ int ret = -ENOMEM;
+
+ d->mtd = mtd;
+ d->eblks = eblocks;
+ d->spare_eblks = spare_cnt;
+ d->pages_per_eblk = mtd->erasesize >> PAGE_SHIFT;
+
+ pages = d->mbd_dev->size;
+ blocks = eblocks * d->pages_per_eblk;
+
+ for (i = 0; i < MTDSWAP_TREE_CNT; i++)
+ d->trees[i].root = RB_ROOT;
+
+ d->page_data = vmalloc(sizeof(int)*pages);
+ if (!d->page_data)
+ goto page_data_fail;
+
+ d->revmap = vmalloc(sizeof(int)*blocks);
+ if (!d->revmap)
+ goto revmap_fail;
+
+ eblk_bytes = sizeof(struct swap_eb)*d->eblks;
+ d->eb_data = vmalloc(eblk_bytes);
+ if (!d->eb_data)
+ goto eb_data_fail;
+
+ memset(d->eb_data, 0, eblk_bytes);
+ for (i = 0; i < pages; i++)
+ d->page_data[i] = BLOCK_UNDEF;
+
+ for (i = 0; i < blocks; i++)
+ d->revmap[i] = PAGE_UNDEF;
+
+ d->page_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!d->page_buf)
+ goto page_buf_fail;
+
+ d->oob_buf = kmalloc(2 * mtd->ecclayout->oobavail, GFP_KERNEL);
+ if (!d->oob_buf)
+ goto oob_buf_fail;
+
+ mtdswap_scan_eblks(d);
+
+ return 0;
+
+oob_buf_fail:
+ kfree(d->page_buf);
+page_buf_fail:
+ vfree(d->eb_data);
+eb_data_fail:
+ vfree(d->revmap);
+revmap_fail:
+ vfree(d->page_data);
+page_data_fail:
+ printk(KERN_ERR "%s: init failed (%d)\n", MTDSWAP_PREFIX, ret);
+ return ret;
+}
+
+static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct mtdswap_dev *d;
+ struct mtd_blktrans_dev *mbd_dev;
+ char *parts;
+ char *this_opt;
+ unsigned long part;
+ unsigned int eblocks, eavailable, bad_blocks, spare_cnt;
+ uint64_t swap_size, use_size, size_limit;
+ struct nand_ecclayout *oinfo;
+ int ret;
+
+ parts = &partitions[0];
+ if (!*parts)
+ return;
+
+ while ((this_opt = strsep(&parts, ",")) != NULL) {
+ if (strict_strtoul(this_opt, 0, &part) < 0)
+ return;
+
+ if (mtd->index == part)
+ break;
+ }
+
+ if (mtd->index != part)
+ return;
+
+ if (mtd->erasesize < PAGE_SIZE || mtd->erasesize % PAGE_SIZE) {
+ printk(KERN_ERR "%s: Erase size %u not multiple of PAGE_SIZE "
+ "%lu\n", MTDSWAP_PREFIX, mtd->erasesize, PAGE_SIZE);
+ return;
+ }
+
+ if (PAGE_SIZE % mtd->writesize || mtd->writesize > PAGE_SIZE) {
+ printk(KERN_ERR "%s: PAGE_SIZE %lu not multiple of write size"
+ " %u\n", MTDSWAP_PREFIX, PAGE_SIZE, mtd->writesize);
+ return;
+ }
+
+ oinfo = mtd->ecclayout;
+ if (!mtd->oobsize || !oinfo || oinfo->oobavail < MTDSWAP_OOBSIZE) {
+ printk(KERN_ERR "%s: Not enough free bytes in OOB, "
+ "%d available, %lu needed.\n",
+ MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE);
+ return;
+ }
+
+ if (spare_eblocks > 100)
+ spare_eblocks = 100;
+
+ use_size = mtd->size;
+ size_limit = (uint64_t) BLOCK_MAX * PAGE_SIZE;
+
+ if (mtd->size > size_limit) {
+ printk(KERN_WARNING "%s: Device too large. Limiting size to "
+ "%llu bytes\n", MTDSWAP_PREFIX, size_limit);
+ use_size = size_limit;
+ }
+
+ eblocks = mtd_div_by_eb(use_size, mtd);
+ use_size = eblocks * mtd->erasesize;
+ bad_blocks = mtdswap_badblocks(mtd, use_size);
+ eavailable = eblocks - bad_blocks;
+
+ if (eavailable < MIN_ERASE_BLOCKS) {
+ printk(KERN_ERR "%s: Not enough erase blocks. %u available, "
+ "%d needed\n", MTDSWAP_PREFIX, eavailable,
+ MIN_ERASE_BLOCKS);
+ return;
+ }
+
+ spare_cnt = div_u64((uint64_t)eavailable * spare_eblocks, 100);
+
+ if (spare_cnt < MIN_SPARE_EBLOCKS)
+ spare_cnt = MIN_SPARE_EBLOCKS;
+
+ if (spare_cnt > eavailable - 1)
+ spare_cnt = eavailable - 1;
+
+ swap_size = (uint64_t)(eavailable - spare_cnt) * mtd->erasesize +
+ (header ? PAGE_SIZE : 0);
+
+ printk(KERN_INFO "%s: Enabling MTD swap on device %lu, size %llu KB, "
+ "%u spare, %u bad blocks\n",
+ MTDSWAP_PREFIX, part, swap_size / 1024, spare_cnt, bad_blocks);
+
+ d = kzalloc(sizeof(struct mtdswap_dev), GFP_KERNEL);
+ if (!d)
+ return;
+
+ mbd_dev = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
+ if (!mbd_dev) {
+ kfree(d);
+ return;
+ }
+
+ d->mbd_dev = mbd_dev;
+ mbd_dev->priv = d;
+
+ mbd_dev->mtd = mtd;
+ mbd_dev->devnum = mtd->index;
+ mbd_dev->size = swap_size >> PAGE_SHIFT;
+ mbd_dev->tr = tr;
+
+ if (!(mtd->flags & MTD_WRITEABLE))
+ mbd_dev->readonly = 1;
+
+ if (mtdswap_init(d, eblocks, spare_cnt) < 0)
+ goto init_failed;
+
+ if (add_mtd_blktrans_dev(mbd_dev) < 0)
+ goto cleanup;
+
+ d->dev = disk_to_dev(mbd_dev->disk);
+
+ ret = mtdswap_add_debugfs(d);
+ if (ret < 0)
+ goto debugfs_failed;
+
+ return;
+
+debugfs_failed:
+ del_mtd_blktrans_dev(mbd_dev);
+
+cleanup:
+ mtdswap_cleanup(d);
+
+init_failed:
+ kfree(mbd_dev);
+ kfree(d);
+}
+
+static void mtdswap_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
+
+ debugfs_remove_recursive(d->debugfs_root);
+ del_mtd_blktrans_dev(dev);
+ mtdswap_cleanup(d);
+ kfree(d);
+}
+
+static struct mtd_blktrans_ops mtdswap_ops = {
+ .name = "mtdswap",
+ .major = 0,
+ .part_bits = 0,
+ .blksize = PAGE_SIZE,
+ .flush = mtdswap_flush,
+ .readsect = mtdswap_readsect,
+ .writesect = mtdswap_writesect,
+ .discard = mtdswap_discard,
+ .background = mtdswap_background,
+ .add_mtd = mtdswap_add_mtd,
+ .remove_dev = mtdswap_remove_dev,
+ .owner = THIS_MODULE,
+};
+
+static int __init mtdswap_modinit(void)
+{
+ return register_mtd_blktrans(&mtdswap_ops);
+}
+
+static void __exit mtdswap_modexit(void)
+{
+ deregister_mtd_blktrans(&mtdswap_ops);
+}
+
+module_init(mtdswap_modinit);
+module_exit(mtdswap_modexit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
+MODULE_DESCRIPTION("Block device access to an MTD suitable for using as "
+ "swap space");
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 4f6c06f16328..a92054e945e1 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -31,6 +31,21 @@ config MTD_NAND_VERIFY_WRITE
device thinks the write was successful, a bit could have been
flipped accidentally due to device wear or something else.
+config MTD_NAND_BCH
+ tristate
+ select BCH
+ depends on MTD_NAND_ECC_BCH
+ default MTD_NAND
+
+config MTD_NAND_ECC_BCH
+ bool "Support software BCH ECC"
+ default n
+ help
+ This enables support for software BCH error correction. Binary BCH
+ codes are more powerful and cpu intensive than traditional Hamming
+ ECC codes. They are used with NAND devices requiring more than 1 bit
+ of error correction.
+
config MTD_SM_COMMON
tristate
default n
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 8ad6faec72cb..5745d831168e 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_MTD_NAND) += nand.o
obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o
+obj-$(CONFIG_MTD_NAND_BCH) += nand_bch.o
obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index ccce0f03b5dc..6fae04b3fc6d 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -48,6 +48,9 @@
#define no_ecc 0
#endif
+static int use_dma = 1;
+module_param(use_dma, int, 0);
+
static int on_flash_bbt = 0;
module_param(on_flash_bbt, int, 0);
@@ -89,11 +92,20 @@ struct atmel_nand_host {
struct nand_chip nand_chip;
struct mtd_info mtd;
void __iomem *io_base;
+ dma_addr_t io_phys;
struct atmel_nand_data *board;
struct device *dev;
void __iomem *ecc;
+
+ struct completion comp;
+ struct dma_chan *dma_chan;
};
+static int cpu_has_dma(void)
+{
+ return cpu_is_at91sam9rl() || cpu_is_at91sam9g45();
+}
+
/*
* Enable NAND.
*/
@@ -150,7 +162,7 @@ static int atmel_nand_device_ready(struct mtd_info *mtd)
/*
* Minimal-overhead PIO for data access.
*/
-static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len)
{
struct nand_chip *nand_chip = mtd->priv;
@@ -164,7 +176,7 @@ static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len)
__raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
}
-static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len)
{
struct nand_chip *nand_chip = mtd->priv;
@@ -178,6 +190,121 @@ static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len)
__raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2);
}
+static void dma_complete_func(void *completion)
+{
+ complete(completion);
+}
+
+static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
+ int is_read)
+{
+ struct dma_device *dma_dev;
+ enum dma_ctrl_flags flags;
+ dma_addr_t dma_src_addr, dma_dst_addr, phys_addr;
+ struct dma_async_tx_descriptor *tx = NULL;
+ dma_cookie_t cookie;
+ struct nand_chip *chip = mtd->priv;
+ struct atmel_nand_host *host = chip->priv;
+ void *p = buf;
+ int err = -EIO;
+ enum dma_data_direction dir = is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ if (buf >= high_memory) {
+ struct page *pg;
+
+ if (((size_t)buf & PAGE_MASK) !=
+ ((size_t)(buf + len - 1) & PAGE_MASK)) {
+ dev_warn(host->dev, "Buffer not fit in one page\n");
+ goto err_buf;
+ }
+
+ pg = vmalloc_to_page(buf);
+ if (pg == 0) {
+ dev_err(host->dev, "Failed to vmalloc_to_page\n");
+ goto err_buf;
+ }
+ p = page_address(pg) + ((size_t)buf & ~PAGE_MASK);
+ }
+
+ dma_dev = host->dma_chan->device;
+
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP |
+ DMA_COMPL_SKIP_DEST_UNMAP;
+
+ phys_addr = dma_map_single(dma_dev->dev, p, len, dir);
+ if (dma_mapping_error(dma_dev->dev, phys_addr)) {
+ dev_err(host->dev, "Failed to dma_map_single\n");
+ goto err_buf;
+ }
+
+ if (is_read) {
+ dma_src_addr = host->io_phys;
+ dma_dst_addr = phys_addr;
+ } else {
+ dma_src_addr = phys_addr;
+ dma_dst_addr = host->io_phys;
+ }
+
+ tx = dma_dev->device_prep_dma_memcpy(host->dma_chan, dma_dst_addr,
+ dma_src_addr, len, flags);
+ if (!tx) {
+ dev_err(host->dev, "Failed to prepare DMA memcpy\n");
+ goto err_dma;
+ }
+
+ init_completion(&host->comp);
+ tx->callback = dma_complete_func;
+ tx->callback_param = &host->comp;
+
+ cookie = tx->tx_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(host->dev, "Failed to do DMA tx_submit\n");
+ goto err_dma;
+ }
+
+ dma_async_issue_pending(host->dma_chan);
+ wait_for_completion(&host->comp);
+
+ err = 0;
+
+err_dma:
+ dma_unmap_single(dma_dev->dev, phys_addr, len, dir);
+err_buf:
+ if (err != 0)
+ dev_warn(host->dev, "Fall back to CPU I/O\n");
+ return err;
+}
+
+static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct atmel_nand_host *host = chip->priv;
+
+ if (use_dma && len >= mtd->oobsize)
+ if (atmel_nand_dma_op(mtd, buf, len, 1) == 0)
+ return;
+
+ if (host->board->bus_width_16)
+ atmel_read_buf16(mtd, buf, len);
+ else
+ atmel_read_buf8(mtd, buf, len);
+}
+
+static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct atmel_nand_host *host = chip->priv;
+
+ if (use_dma && len >= mtd->oobsize)
+ if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0)
+ return;
+
+ if (host->board->bus_width_16)
+ atmel_write_buf16(mtd, buf, len);
+ else
+ atmel_write_buf8(mtd, buf, len);
+}
+
/*
* Calculate HW ECC
*
@@ -398,6 +525,8 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ host->io_phys = (dma_addr_t)mem->start;
+
host->io_base = ioremap(mem->start, mem->end - mem->start + 1);
if (host->io_base == NULL) {
printk(KERN_ERR "atmel_nand: ioremap failed\n");
@@ -448,14 +577,11 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
nand_chip->chip_delay = 20; /* 20us command delay time */
- if (host->board->bus_width_16) { /* 16-bit bus width */
+ if (host->board->bus_width_16) /* 16-bit bus width */
nand_chip->options |= NAND_BUSWIDTH_16;
- nand_chip->read_buf = atmel_read_buf16;
- nand_chip->write_buf = atmel_write_buf16;
- } else {
- nand_chip->read_buf = atmel_read_buf;
- nand_chip->write_buf = atmel_write_buf;
- }
+
+ nand_chip->read_buf = atmel_read_buf;
+ nand_chip->write_buf = atmel_write_buf;
platform_set_drvdata(pdev, host);
atmel_nand_enable(host);
@@ -473,6 +599,22 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
nand_chip->options |= NAND_USE_FLASH_BBT;
}
+ if (cpu_has_dma() && use_dma) {
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+ host->dma_chan = dma_request_channel(mask, 0, NULL);
+ if (!host->dma_chan) {
+ dev_err(host->dev, "Failed to request DMA channel\n");
+ use_dma = 0;
+ }
+ }
+ if (use_dma)
+ dev_info(host->dev, "Using DMA for NAND access.\n");
+ else
+ dev_info(host->dev, "No DMA support for NAND access.\n");
+
/* first scan to find the device and get the page size */
if (nand_scan_ident(mtd, 1, NULL)) {
res = -ENXIO;
@@ -555,6 +697,8 @@ err_scan_ident:
err_no_card:
atmel_nand_disable(host);
platform_set_drvdata(pdev, NULL);
+ if (host->dma_chan)
+ dma_release_channel(host->dma_chan);
if (host->ecc)
iounmap(host->ecc);
err_ecc_ioremap:
@@ -578,6 +722,10 @@ static int __exit atmel_nand_remove(struct platform_device *pdev)
if (host->ecc)
iounmap(host->ecc);
+
+ if (host->dma_chan)
+ dma_release_channel(host->dma_chan);
+
iounmap(host->io_base);
kfree(host);
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index a90fde3ede28..aff3468867ac 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -37,9 +37,6 @@
#include <mach/nand.h>
#include <mach/aemif.h>
-#include <asm/mach-types.h>
-
-
/*
* This is a device driver for the NAND flash controller found on the
* various DaVinci family chips. It handles up to four SoC chipselects,
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index c2f95437e5e9..0b81b5b499d1 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -29,6 +29,7 @@
#include <linux/clk.h>
#include <linux/gfp.h>
#include <linux/delay.h>
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -757,9 +758,9 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
/* Enable NFC clock */
prv->clk = clk_get(dev, "nfc_clk");
- if (!prv->clk) {
+ if (IS_ERR(prv->clk)) {
dev_err(dev, "Unable to acquire NFC clock!\n");
- retval = -ENODEV;
+ retval = PTR_ERR(prv->clk);
goto error;
}
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 5ae1d9ee2cf1..42a95fb41504 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -211,6 +211,31 @@ static struct nand_ecclayout nandv2_hw_eccoob_largepage = {
}
};
+/* OOB description for 4096 byte pages with 128 byte OOB */
+static struct nand_ecclayout nandv2_hw_eccoob_4k = {
+ .eccbytes = 8 * 9,
+ .eccpos = {
+ 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 103, 104, 105, 106, 107, 108, 109, 110, 111,
+ 119, 120, 121, 122, 123, 124, 125, 126, 127,
+ },
+ .oobfree = {
+ {.offset = 2, .length = 4},
+ {.offset = 16, .length = 7},
+ {.offset = 32, .length = 7},
+ {.offset = 48, .length = 7},
+ {.offset = 64, .length = 7},
+ {.offset = 80, .length = 7},
+ {.offset = 96, .length = 7},
+ {.offset = 112, .length = 7},
+ }
+};
+
#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL };
#endif
@@ -641,9 +666,9 @@ static void mxc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
n = min(n, len);
- memcpy(buf, host->data_buf + col, len);
+ memcpy(buf, host->data_buf + col, n);
- host->buf_start += len;
+ host->buf_start += n;
}
/* Used by the upper layer to verify the data in NAND Flash
@@ -1185,6 +1210,8 @@ static int __init mxcnd_probe(struct platform_device *pdev)
if (mtd->writesize == 2048)
this->ecc.layout = oob_largepage;
+ if (nfc_is_v21() && mtd->writesize == 4096)
+ this->ecc.layout = &nandv2_hw_eccoob_4k;
/* second phase scan */
if (nand_scan_tail(mtd)) {
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index a9c6ce745767..85cfc061d41c 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -42,6 +42,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/nand_bch.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/leds.h>
@@ -2377,7 +2378,7 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
return -EINVAL;
}
- /* Do not allow reads past end of device */
+ /* Do not allow write past end of device */
if (unlikely(to >= mtd->size ||
ops->ooboffs + ops->ooblen >
((mtd->size >> chip->page_shift) -
@@ -3248,7 +3249,7 @@ int nand_scan_tail(struct mtd_info *mtd)
/*
* If no default placement scheme is given, select an appropriate one
*/
- if (!chip->ecc.layout) {
+ if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) {
switch (mtd->oobsize) {
case 8:
chip->ecc.layout = &nand_oob_8;
@@ -3351,6 +3352,40 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->ecc.bytes = 3;
break;
+ case NAND_ECC_SOFT_BCH:
+ if (!mtd_nand_has_bch()) {
+ printk(KERN_WARNING "CONFIG_MTD_ECC_BCH not enabled\n");
+ BUG();
+ }
+ chip->ecc.calculate = nand_bch_calculate_ecc;
+ chip->ecc.correct = nand_bch_correct_data;
+ chip->ecc.read_page = nand_read_page_swecc;
+ chip->ecc.read_subpage = nand_read_subpage;
+ chip->ecc.write_page = nand_write_page_swecc;
+ chip->ecc.read_page_raw = nand_read_page_raw;
+ chip->ecc.write_page_raw = nand_write_page_raw;
+ chip->ecc.read_oob = nand_read_oob_std;
+ chip->ecc.write_oob = nand_write_oob_std;
+ /*
+ * Board driver should supply ecc.size and ecc.bytes values to
+ * select how many bits are correctable; see nand_bch_init()
+ * for details.
+ * Otherwise, default to 4 bits for large page devices
+ */
+ if (!chip->ecc.size && (mtd->oobsize >= 64)) {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 7;
+ }
+ chip->ecc.priv = nand_bch_init(mtd,
+ chip->ecc.size,
+ chip->ecc.bytes,
+ &chip->ecc.layout);
+ if (!chip->ecc.priv) {
+ printk(KERN_WARNING "BCH ECC initialization failed!\n");
+ BUG();
+ }
+ break;
+
case NAND_ECC_NONE:
printk(KERN_WARNING "NAND_ECC_NONE selected by board driver. "
"This is not recommended !!\n");
@@ -3501,6 +3536,9 @@ void nand_release(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
+ if (chip->ecc.mode == NAND_ECC_SOFT_BCH)
+ nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
+
#ifdef CONFIG_MTD_PARTITIONS
/* Deregister partitions */
del_mtd_partitions(mtd);
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 6ebd869993aa..a1e8b30078d9 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -1101,12 +1101,16 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
{
struct nand_chip *this = mtd->priv;
- u32 pattern_len = bd->len;
- u32 bits = bd->options & NAND_BBT_NRBITS_MSK;
+ u32 pattern_len;
+ u32 bits;
u32 table_size;
if (!bd)
return;
+
+ pattern_len = bd->len;
+ bits = bd->options & NAND_BBT_NRBITS_MSK;
+
BUG_ON((this->options & NAND_USE_FLASH_BBT_NO_OOB) &&
!(this->options & NAND_USE_FLASH_BBT));
BUG_ON(!bits);
diff --git a/drivers/mtd/nand/nand_bch.c b/drivers/mtd/nand/nand_bch.c
new file mode 100644
index 000000000000..0f931e757116
--- /dev/null
+++ b/drivers/mtd/nand/nand_bch.c
@@ -0,0 +1,243 @@
+/*
+ * This file provides ECC correction for more than 1 bit per block of data,
+ * using binary BCH codes. It relies on the generic BCH library lib/bch.c.
+ *
+ * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com>
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this file; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_bch.h>
+#include <linux/bch.h>
+
+/**
+ * struct nand_bch_control - private NAND BCH control structure
+ * @bch: BCH control structure
+ * @ecclayout: private ecc layout for this BCH configuration
+ * @errloc: error location array
+ * @eccmask: XOR ecc mask, allows erased pages to be decoded as valid
+ */
+struct nand_bch_control {
+ struct bch_control *bch;
+ struct nand_ecclayout ecclayout;
+ unsigned int *errloc;
+ unsigned char *eccmask;
+};
+
+/**
+ * nand_bch_calculate_ecc - [NAND Interface] Calculate ECC for data block
+ * @mtd: MTD block structure
+ * @buf: input buffer with raw data
+ * @code: output buffer with ECC
+ */
+int nand_bch_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
+ unsigned char *code)
+{
+ const struct nand_chip *chip = mtd->priv;
+ struct nand_bch_control *nbc = chip->ecc.priv;
+ unsigned int i;
+
+ memset(code, 0, chip->ecc.bytes);
+ encode_bch(nbc->bch, buf, chip->ecc.size, code);
+
+ /* apply mask so that an erased page is a valid codeword */
+ for (i = 0; i < chip->ecc.bytes; i++)
+ code[i] ^= nbc->eccmask[i];
+
+ return 0;
+}
+EXPORT_SYMBOL(nand_bch_calculate_ecc);
+
+/**
+ * nand_bch_correct_data - [NAND Interface] Detect and correct bit error(s)
+ * @mtd: MTD block structure
+ * @buf: raw data read from the chip
+ * @read_ecc: ECC from the chip
+ * @calc_ecc: the ECC calculated from raw data
+ *
+ * Detect and correct bit errors for a data byte block
+ */
+int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+ const struct nand_chip *chip = mtd->priv;
+ struct nand_bch_control *nbc = chip->ecc.priv;
+ unsigned int *errloc = nbc->errloc;
+ int i, count;
+
+ count = decode_bch(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc,
+ NULL, errloc);
+ if (count > 0) {
+ for (i = 0; i < count; i++) {
+ if (errloc[i] < (chip->ecc.size*8))
+ /* error is located in data, correct it */
+ buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7));
+ /* else error in ecc, no action needed */
+
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: corrected bitflip %u\n",
+ __func__, errloc[i]);
+ }
+ } else if (count < 0) {
+ printk(KERN_ERR "ecc unrecoverable error\n");
+ count = -1;
+ }
+ return count;
+}
+EXPORT_SYMBOL(nand_bch_correct_data);
+
+/**
+ * nand_bch_init - [NAND Interface] Initialize NAND BCH error correction
+ * @mtd: MTD block structure
+ * @eccsize: ecc block size in bytes
+ * @eccbytes: ecc length in bytes
+ * @ecclayout: output default layout
+ *
+ * Returns:
+ * a pointer to a new NAND BCH control structure, or NULL upon failure
+ *
+ * Initialize NAND BCH error correction. Parameters @eccsize and @eccbytes
+ * are used to compute BCH parameters m (Galois field order) and t (error
+ * correction capability). @eccbytes should be equal to the number of bytes
+ * required to store m*t bits, where m is such that 2^m-1 > @eccsize*8.
+ *
+ * Example: to configure 4 bit correction per 512 bytes, you should pass
+ * @eccsize = 512 (thus, m=13 is the smallest integer such that 2^m-1 > 512*8)
+ * @eccbytes = 7 (7 bytes are required to store m*t = 13*4 = 52 bits)
+ */
+struct nand_bch_control *
+nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes,
+ struct nand_ecclayout **ecclayout)
+{
+ unsigned int m, t, eccsteps, i;
+ struct nand_ecclayout *layout;
+ struct nand_bch_control *nbc = NULL;
+ unsigned char *erased_page;
+
+ if (!eccsize || !eccbytes) {
+ printk(KERN_WARNING "ecc parameters not supplied\n");
+ goto fail;
+ }
+
+ m = fls(1+8*eccsize);
+ t = (eccbytes*8)/m;
+
+ nbc = kzalloc(sizeof(*nbc), GFP_KERNEL);
+ if (!nbc)
+ goto fail;
+
+ nbc->bch = init_bch(m, t, 0);
+ if (!nbc->bch)
+ goto fail;
+
+ /* verify that eccbytes has the expected value */
+ if (nbc->bch->ecc_bytes != eccbytes) {
+ printk(KERN_WARNING "invalid eccbytes %u, should be %u\n",
+ eccbytes, nbc->bch->ecc_bytes);
+ goto fail;
+ }
+
+ eccsteps = mtd->writesize/eccsize;
+
+ /* if no ecc placement scheme was provided, build one */
+ if (!*ecclayout) {
+
+ /* handle large page devices only */
+ if (mtd->oobsize < 64) {
+ printk(KERN_WARNING "must provide an oob scheme for "
+ "oobsize %d\n", mtd->oobsize);
+ goto fail;
+ }
+
+ layout = &nbc->ecclayout;
+ layout->eccbytes = eccsteps*eccbytes;
+
+ /* reserve 2 bytes for bad block marker */
+ if (layout->eccbytes+2 > mtd->oobsize) {
+ printk(KERN_WARNING "no suitable oob scheme available "
+ "for oobsize %d eccbytes %u\n", mtd->oobsize,
+ eccbytes);
+ goto fail;
+ }
+ /* put ecc bytes at oob tail */
+ for (i = 0; i < layout->eccbytes; i++)
+ layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
+
+ layout->oobfree[0].offset = 2;
+ layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
+
+ *ecclayout = layout;
+ }
+
+ /* sanity checks */
+ if (8*(eccsize+eccbytes) >= (1 << m)) {
+ printk(KERN_WARNING "eccsize %u is too large\n", eccsize);
+ goto fail;
+ }
+ if ((*ecclayout)->eccbytes != (eccsteps*eccbytes)) {
+ printk(KERN_WARNING "invalid ecc layout\n");
+ goto fail;
+ }
+
+ nbc->eccmask = kmalloc(eccbytes, GFP_KERNEL);
+ nbc->errloc = kmalloc(t*sizeof(*nbc->errloc), GFP_KERNEL);
+ if (!nbc->eccmask || !nbc->errloc)
+ goto fail;
+ /*
+ * compute and store the inverted ecc of an erased ecc block
+ */
+ erased_page = kmalloc(eccsize, GFP_KERNEL);
+ if (!erased_page)
+ goto fail;
+
+ memset(erased_page, 0xff, eccsize);
+ memset(nbc->eccmask, 0, eccbytes);
+ encode_bch(nbc->bch, erased_page, eccsize, nbc->eccmask);
+ kfree(erased_page);
+
+ for (i = 0; i < eccbytes; i++)
+ nbc->eccmask[i] ^= 0xff;
+
+ return nbc;
+fail:
+ nand_bch_free(nbc);
+ return NULL;
+}
+EXPORT_SYMBOL(nand_bch_init);
+
+/**
+ * nand_bch_free - [NAND Interface] Release NAND BCH ECC resources
+ * @nbc: NAND BCH control structure
+ */
+void nand_bch_free(struct nand_bch_control *nbc)
+{
+ if (nbc) {
+ free_bch(nbc->bch);
+ kfree(nbc->errloc);
+ kfree(nbc->eccmask);
+ kfree(nbc);
+ }
+}
+EXPORT_SYMBOL(nand_bch_free);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>");
+MODULE_DESCRIPTION("NAND software BCH ECC support");
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index a5aa99f014ba..213181be0d9a 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -34,6 +34,7 @@
#include <linux/string.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_bch.h>
#include <linux/mtd/partitions.h>
#include <linux/delay.h>
#include <linux/list.h>
@@ -108,6 +109,7 @@ static unsigned int rptwear = 0;
static unsigned int overridesize = 0;
static char *cache_file = NULL;
static unsigned int bbt;
+static unsigned int bch;
module_param(first_id_byte, uint, 0400);
module_param(second_id_byte, uint, 0400);
@@ -132,6 +134,7 @@ module_param(rptwear, uint, 0400);
module_param(overridesize, uint, 0400);
module_param(cache_file, charp, 0400);
module_param(bbt, uint, 0400);
+module_param(bch, uint, 0400);
MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)");
MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)");
@@ -165,6 +168,8 @@ MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the I
" e.g. 5 means a size of 32 erase blocks");
MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory");
MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in data area");
+MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
+ "be correctable in 512-byte blocks");
/* The largest possible page size */
#define NS_LARGEST_PAGE_SIZE 4096
@@ -2309,7 +2314,43 @@ static int __init ns_init_module(void)
if ((retval = parse_gravepages()) != 0)
goto error;
- if ((retval = nand_scan(nsmtd, 1)) != 0) {
+ retval = nand_scan_ident(nsmtd, 1, NULL);
+ if (retval) {
+ NS_ERR("cannot scan NAND Simulator device\n");
+ if (retval > 0)
+ retval = -ENXIO;
+ goto error;
+ }
+
+ if (bch) {
+ unsigned int eccsteps, eccbytes;
+ if (!mtd_nand_has_bch()) {
+ NS_ERR("BCH ECC support is disabled\n");
+ retval = -EINVAL;
+ goto error;
+ }
+ /* use 512-byte ecc blocks */
+ eccsteps = nsmtd->writesize/512;
+ eccbytes = (bch*13+7)/8;
+ /* do not bother supporting small page devices */
+ if ((nsmtd->oobsize < 64) || !eccsteps) {
+ NS_ERR("bch not available on small page devices\n");
+ retval = -EINVAL;
+ goto error;
+ }
+ if ((eccbytes*eccsteps+2) > nsmtd->oobsize) {
+ NS_ERR("invalid bch value %u\n", bch);
+ retval = -EINVAL;
+ goto error;
+ }
+ chip->ecc.mode = NAND_ECC_SOFT_BCH;
+ chip->ecc.size = 512;
+ chip->ecc.bytes = eccbytes;
+ NS_INFO("using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size);
+ }
+
+ retval = nand_scan_tail(nsmtd);
+ if (retval) {
NS_ERR("can't register NAND Simulator\n");
if (retval > 0)
retval = -ENXIO;
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 7b8f1fffc528..da9a351c9d79 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -668,6 +668,8 @@ static void gen_true_ecc(u8 *ecc_buf)
*
* This function compares two ECC's and indicates if there is an error.
* If the error can be corrected it will be corrected to the buffer.
+ * If there is no error, %0 is returned. If there is an error but it
+ * was corrected, %1 is returned. Otherwise, %-1 is returned.
*/
static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
u8 *ecc_data2, /* read from register */
@@ -773,7 +775,7 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
page_data[find_byte] ^= (1 << find_bit);
- return 0;
+ return 1;
default:
if (isEccFF) {
if (ecc_data2[0] == 0 &&
@@ -794,8 +796,11 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
* @calc_ecc: ecc read from HW ECC registers
*
* Compares the ecc read from nand spare area with ECC registers values
- * and if ECC's mismached, it will call 'omap_compare_ecc' for error detection
- * and correction.
+ * and if ECC's mismatched, it will call 'omap_compare_ecc' for error
+ * detection and correction. If there are no errors, %0 is returned. If
+ * there were errors and all of the errors were corrected, the number of
+ * corrected errors is returned. If uncorrectable errors exist, %-1 is
+ * returned.
*/
static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
u_char *read_ecc, u_char *calc_ecc)
@@ -803,6 +808,7 @@ static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
int blockCnt = 0, i = 0, ret = 0;
+ int stat = 0;
/* Ex NAND_ECC_HW12_2048 */
if ((info->nand.ecc.mode == NAND_ECC_HW) &&
@@ -816,12 +822,14 @@ static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
if (ret < 0)
return ret;
+ /* keep track of the number of corrected errors */
+ stat += ret;
}
read_ecc += 3;
calc_ecc += 3;
dat += 512;
}
- return 0;
+ return stat;
}
/**
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index ea2c288df3f6..ab7f4c33ced6 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -27,6 +27,8 @@
#include <plat/pxa3xx_nand.h>
#define CHIP_DELAY_TIMEOUT (2 * HZ/10)
+#define NAND_STOP_DELAY (2 * HZ/50)
+#define PAGE_CHUNK_SIZE (2048)
/* registers and bit definitions */
#define NDCR (0x00) /* Control register */
@@ -52,16 +54,18 @@
#define NDCR_ND_MODE (0x3 << 21)
#define NDCR_NAND_MODE (0x0)
#define NDCR_CLR_PG_CNT (0x1 << 20)
-#define NDCR_CLR_ECC (0x1 << 19)
+#define NDCR_STOP_ON_UNCOR (0x1 << 19)
#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
#define NDCR_RA_START (0x1 << 15)
#define NDCR_PG_PER_BLK (0x1 << 14)
#define NDCR_ND_ARB_EN (0x1 << 12)
+#define NDCR_INT_MASK (0xFFF)
#define NDSR_MASK (0xfff)
-#define NDSR_RDY (0x1 << 11)
+#define NDSR_RDY (0x1 << 12)
+#define NDSR_FLASH_RDY (0x1 << 11)
#define NDSR_CS0_PAGED (0x1 << 10)
#define NDSR_CS1_PAGED (0x1 << 9)
#define NDSR_CS0_CMDD (0x1 << 8)
@@ -74,6 +78,7 @@
#define NDSR_RDDREQ (0x1 << 1)
#define NDSR_WRCMDREQ (0x1)
+#define NDCB0_ST_ROW_EN (0x1 << 26)
#define NDCB0_AUTO_RS (0x1 << 25)
#define NDCB0_CSEL (0x1 << 24)
#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
@@ -104,18 +109,21 @@ enum {
};
enum {
- STATE_READY = 0,
+ STATE_IDLE = 0,
STATE_CMD_HANDLE,
STATE_DMA_READING,
STATE_DMA_WRITING,
STATE_DMA_DONE,
STATE_PIO_READING,
STATE_PIO_WRITING,
+ STATE_CMD_DONE,
+ STATE_READY,
};
struct pxa3xx_nand_info {
struct nand_chip nand_chip;
+ struct nand_hw_control controller;
struct platform_device *pdev;
struct pxa3xx_nand_cmdset *cmdset;
@@ -126,6 +134,7 @@ struct pxa3xx_nand_info {
unsigned int buf_start;
unsigned int buf_count;
+ struct mtd_info *mtd;
/* DMA information */
int drcmr_dat;
int drcmr_cmd;
@@ -149,6 +158,7 @@ struct pxa3xx_nand_info {
int use_ecc; /* use HW ECC ? */
int use_dma; /* use DMA ? */
+ int is_ready;
unsigned int page_size; /* page size of attached chip */
unsigned int data_size; /* data size in FIFO */
@@ -201,20 +211,22 @@ static struct pxa3xx_nand_timing timing[] = {
};
static struct pxa3xx_nand_flash builtin_flash_types[] = {
- { 0, 0, 2048, 8, 8, 0, &default_cmdset, &timing[0] },
- { 0x46ec, 32, 512, 16, 16, 4096, &default_cmdset, &timing[1] },
- { 0xdaec, 64, 2048, 8, 8, 2048, &default_cmdset, &timing[1] },
- { 0xd7ec, 128, 4096, 8, 8, 8192, &default_cmdset, &timing[1] },
- { 0xa12c, 64, 2048, 8, 8, 1024, &default_cmdset, &timing[2] },
- { 0xb12c, 64, 2048, 16, 16, 1024, &default_cmdset, &timing[2] },
- { 0xdc2c, 64, 2048, 8, 8, 4096, &default_cmdset, &timing[2] },
- { 0xcc2c, 64, 2048, 16, 16, 4096, &default_cmdset, &timing[2] },
- { 0xba20, 64, 2048, 16, 16, 2048, &default_cmdset, &timing[3] },
+{ "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing[0] },
+{ "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing[1] },
+{ "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing[1] },
+{ "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing[1] },
+{ "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing[2] },
+{ "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing[2] },
+{ "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing[2] },
+{ "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing[2] },
+{ "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] },
};
/* Define a default flash type setting serve as flash detecting only */
#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
+const char *mtd_names[] = {"pxa3xx_nand-0", NULL};
+
#define NDTR0_tCH(c) (min((c), 7) << 19)
#define NDTR0_tCS(c) (min((c), 7) << 16)
#define NDTR0_tWH(c) (min((c), 7) << 11)
@@ -252,25 +264,6 @@ static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
nand_writel(info, NDTR1CS0, ndtr1);
}
-#define WAIT_EVENT_TIMEOUT 10
-
-static int wait_for_event(struct pxa3xx_nand_info *info, uint32_t event)
-{
- int timeout = WAIT_EVENT_TIMEOUT;
- uint32_t ndsr;
-
- while (timeout--) {
- ndsr = nand_readl(info, NDSR) & NDSR_MASK;
- if (ndsr & event) {
- nand_writel(info, NDSR, ndsr);
- return 0;
- }
- udelay(10);
- }
-
- return -ETIMEDOUT;
-}
-
static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info)
{
int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
@@ -291,69 +284,45 @@ static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info)
}
}
-static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info,
- uint16_t cmd, int column, int page_addr)
+/**
+ * NOTE: it is a must to set ND_RUN firstly, then write
+ * command buffer, otherwise, it does not work.
+ * We enable all the interrupt at the same time, and
+ * let pxa3xx_nand_irq to handle all logic.
+ */
+static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
{
- const struct pxa3xx_nand_cmdset *cmdset = info->cmdset;
- pxa3xx_set_datasize(info);
-
- /* generate values for NDCBx registers */
- info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
- info->ndcb1 = 0;
- info->ndcb2 = 0;
- info->ndcb0 |= NDCB0_ADDR_CYC(info->row_addr_cycles + info->col_addr_cycles);
-
- if (info->col_addr_cycles == 2) {
- /* large block, 2 cycles for column address
- * row address starts from 3rd cycle
- */
- info->ndcb1 |= page_addr << 16;
- if (info->row_addr_cycles == 3)
- info->ndcb2 = (page_addr >> 16) & 0xff;
- } else
- /* small block, 1 cycles for column address
- * row address starts from 2nd cycle
- */
- info->ndcb1 = page_addr << 8;
-
- if (cmd == cmdset->program)
- info->ndcb0 |= NDCB0_CMD_TYPE(1) | NDCB0_AUTO_RS;
+ uint32_t ndcr;
- return 0;
-}
+ ndcr = info->reg_ndcr;
+ ndcr |= info->use_ecc ? NDCR_ECC_EN : 0;
+ ndcr |= info->use_dma ? NDCR_DMA_EN : 0;
+ ndcr |= NDCR_ND_RUN;
-static int prepare_erase_cmd(struct pxa3xx_nand_info *info,
- uint16_t cmd, int page_addr)
-{
- info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
- info->ndcb0 |= NDCB0_CMD_TYPE(2) | NDCB0_AUTO_RS | NDCB0_ADDR_CYC(3);
- info->ndcb1 = page_addr;
- info->ndcb2 = 0;
- return 0;
+ /* clear status bits and run */
+ nand_writel(info, NDCR, 0);
+ nand_writel(info, NDSR, NDSR_MASK);
+ nand_writel(info, NDCR, ndcr);
}
-static int prepare_other_cmd(struct pxa3xx_nand_info *info, uint16_t cmd)
+static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
{
- const struct pxa3xx_nand_cmdset *cmdset = info->cmdset;
-
- info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
- info->ndcb1 = 0;
- info->ndcb2 = 0;
+ uint32_t ndcr;
+ int timeout = NAND_STOP_DELAY;
- info->oob_size = 0;
- if (cmd == cmdset->read_id) {
- info->ndcb0 |= NDCB0_CMD_TYPE(3);
- info->data_size = 8;
- } else if (cmd == cmdset->read_status) {
- info->ndcb0 |= NDCB0_CMD_TYPE(4);
- info->data_size = 8;
- } else if (cmd == cmdset->reset || cmd == cmdset->lock ||
- cmd == cmdset->unlock) {
- info->ndcb0 |= NDCB0_CMD_TYPE(5);
- } else
- return -EINVAL;
+ /* wait RUN bit in NDCR become 0 */
+ ndcr = nand_readl(info, NDCR);
+ while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
+ ndcr = nand_readl(info, NDCR);
+ udelay(1);
+ }
- return 0;
+ if (timeout <= 0) {
+ ndcr &= ~NDCR_ND_RUN;
+ nand_writel(info, NDCR, ndcr);
+ }
+ /* clear status bits */
+ nand_writel(info, NDSR, NDSR_MASK);
}
static void enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
@@ -372,39 +341,8 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
nand_writel(info, NDCR, ndcr | int_mask);
}
-/* NOTE: it is a must to set ND_RUN firstly, then write command buffer
- * otherwise, it does not work
- */
-static int write_cmd(struct pxa3xx_nand_info *info)
+static void handle_data_pio(struct pxa3xx_nand_info *info)
{
- uint32_t ndcr;
-
- /* clear status bits and run */
- nand_writel(info, NDSR, NDSR_MASK);
-
- ndcr = info->reg_ndcr;
-
- ndcr |= info->use_ecc ? NDCR_ECC_EN : 0;
- ndcr |= info->use_dma ? NDCR_DMA_EN : 0;
- ndcr |= NDCR_ND_RUN;
-
- nand_writel(info, NDCR, ndcr);
-
- if (wait_for_event(info, NDSR_WRCMDREQ)) {
- printk(KERN_ERR "timed out writing command\n");
- return -ETIMEDOUT;
- }
-
- nand_writel(info, NDCB0, info->ndcb0);
- nand_writel(info, NDCB0, info->ndcb1);
- nand_writel(info, NDCB0, info->ndcb2);
- return 0;
-}
-
-static int handle_data_pio(struct pxa3xx_nand_info *info)
-{
- int ret, timeout = CHIP_DELAY_TIMEOUT;
-
switch (info->state) {
case STATE_PIO_WRITING:
__raw_writesl(info->mmio_base + NDDB, info->data_buff,
@@ -412,14 +350,6 @@ static int handle_data_pio(struct pxa3xx_nand_info *info)
if (info->oob_size > 0)
__raw_writesl(info->mmio_base + NDDB, info->oob_buff,
DIV_ROUND_UP(info->oob_size, 4));
-
- enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
-
- ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
- if (!ret) {
- printk(KERN_ERR "program command time out\n");
- return -1;
- }
break;
case STATE_PIO_READING:
__raw_readsl(info->mmio_base + NDDB, info->data_buff,
@@ -431,14 +361,11 @@ static int handle_data_pio(struct pxa3xx_nand_info *info)
default:
printk(KERN_ERR "%s: invalid state %d\n", __func__,
info->state);
- return -EINVAL;
+ BUG();
}
-
- info->state = STATE_READY;
- return 0;
}
-static void start_data_dma(struct pxa3xx_nand_info *info, int dir_out)
+static void start_data_dma(struct pxa3xx_nand_info *info)
{
struct pxa_dma_desc *desc = info->data_desc;
int dma_len = ALIGN(info->data_size + info->oob_size, 32);
@@ -446,14 +373,21 @@ static void start_data_dma(struct pxa3xx_nand_info *info, int dir_out)
desc->ddadr = DDADR_STOP;
desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
- if (dir_out) {
+ switch (info->state) {
+ case STATE_DMA_WRITING:
desc->dsadr = info->data_buff_phys;
desc->dtadr = info->mmio_phys + NDDB;
desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
- } else {
+ break;
+ case STATE_DMA_READING:
desc->dtadr = info->data_buff_phys;
desc->dsadr = info->mmio_phys + NDDB;
desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
+ break;
+ default:
+ printk(KERN_ERR "%s: invalid state %d\n", __func__,
+ info->state);
+ BUG();
}
DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
@@ -471,93 +405,62 @@ static void pxa3xx_nand_data_dma_irq(int channel, void *data)
if (dcsr & DCSR_BUSERR) {
info->retcode = ERR_DMABUSERR;
- complete(&info->cmd_complete);
}
- if (info->state == STATE_DMA_WRITING) {
- info->state = STATE_DMA_DONE;
- enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
- } else {
- info->state = STATE_READY;
- complete(&info->cmd_complete);
- }
+ info->state = STATE_DMA_DONE;
+ enable_int(info, NDCR_INT_MASK);
+ nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
}
static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
{
struct pxa3xx_nand_info *info = devid;
- unsigned int status;
+ unsigned int status, is_completed = 0;
status = nand_readl(info, NDSR);
- if (status & (NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR)) {
- if (status & NDSR_DBERR)
- info->retcode = ERR_DBERR;
- else if (status & NDSR_SBERR)
- info->retcode = ERR_SBERR;
-
- disable_int(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR);
-
- if (info->use_dma) {
- info->state = STATE_DMA_READING;
- start_data_dma(info, 0);
- } else {
- info->state = STATE_PIO_READING;
- complete(&info->cmd_complete);
- }
- } else if (status & NDSR_WRDREQ) {
- disable_int(info, NDSR_WRDREQ);
+ if (status & NDSR_DBERR)
+ info->retcode = ERR_DBERR;
+ if (status & NDSR_SBERR)
+ info->retcode = ERR_SBERR;
+ if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
+ /* whether use dma to transfer data */
if (info->use_dma) {
- info->state = STATE_DMA_WRITING;
- start_data_dma(info, 1);
+ disable_int(info, NDCR_INT_MASK);
+ info->state = (status & NDSR_RDDREQ) ?
+ STATE_DMA_READING : STATE_DMA_WRITING;
+ start_data_dma(info);
+ goto NORMAL_IRQ_EXIT;
} else {
- info->state = STATE_PIO_WRITING;
- complete(&info->cmd_complete);
+ info->state = (status & NDSR_RDDREQ) ?
+ STATE_PIO_READING : STATE_PIO_WRITING;
+ handle_data_pio(info);
}
- } else if (status & (NDSR_CS0_BBD | NDSR_CS0_CMDD)) {
- if (status & NDSR_CS0_BBD)
- info->retcode = ERR_BBERR;
-
- disable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
- info->state = STATE_READY;
- complete(&info->cmd_complete);
}
- nand_writel(info, NDSR, status);
- return IRQ_HANDLED;
-}
-
-static int pxa3xx_nand_do_cmd(struct pxa3xx_nand_info *info, uint32_t event)
-{
- uint32_t ndcr;
- int ret, timeout = CHIP_DELAY_TIMEOUT;
-
- if (write_cmd(info)) {
- info->retcode = ERR_SENDCMD;
- goto fail_stop;
+ if (status & NDSR_CS0_CMDD) {
+ info->state = STATE_CMD_DONE;
+ is_completed = 1;
}
-
- info->state = STATE_CMD_HANDLE;
-
- enable_int(info, event);
-
- ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
- if (!ret) {
- printk(KERN_ERR "command execution timed out\n");
- info->retcode = ERR_SENDCMD;
- goto fail_stop;
+ if (status & NDSR_FLASH_RDY) {
+ info->is_ready = 1;
+ info->state = STATE_READY;
}
- if (info->use_dma == 0 && info->data_size > 0)
- if (handle_data_pio(info))
- goto fail_stop;
-
- return 0;
+ if (status & NDSR_WRCMDREQ) {
+ nand_writel(info, NDSR, NDSR_WRCMDREQ);
+ status &= ~NDSR_WRCMDREQ;
+ info->state = STATE_CMD_HANDLE;
+ nand_writel(info, NDCB0, info->ndcb0);
+ nand_writel(info, NDCB0, info->ndcb1);
+ nand_writel(info, NDCB0, info->ndcb2);
+ }
-fail_stop:
- ndcr = nand_readl(info, NDCR);
- nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
- udelay(10);
- return -ETIMEDOUT;
+ /* clear NDSR to let the controller exit the IRQ */
+ nand_writel(info, NDSR, status);
+ if (is_completed)
+ complete(&info->cmd_complete);
+NORMAL_IRQ_EXIT:
+ return IRQ_HANDLED;
}
static int pxa3xx_nand_dev_ready(struct mtd_info *mtd)
@@ -574,125 +477,218 @@ static inline int is_buf_blank(uint8_t *buf, size_t len)
return 1;
}
-static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
- int column, int page_addr)
+static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
+ uint16_t column, int page_addr)
{
- struct pxa3xx_nand_info *info = mtd->priv;
- const struct pxa3xx_nand_cmdset *cmdset = info->cmdset;
- int ret;
+ uint16_t cmd;
+ int addr_cycle, exec_cmd, ndcb0;
+ struct mtd_info *mtd = info->mtd;
+
+ ndcb0 = 0;
+ addr_cycle = 0;
+ exec_cmd = 1;
+
+ /* reset data and oob column point to handle data */
+ info->buf_start = 0;
+ info->buf_count = 0;
+ info->oob_size = 0;
+ info->use_ecc = 0;
+ info->is_ready = 0;
+ info->retcode = ERR_NONE;
- info->use_dma = (use_dma) ? 1 : 0;
- info->use_ecc = 0;
- info->data_size = 0;
- info->state = STATE_READY;
+ switch (command) {
+ case NAND_CMD_READ0:
+ case NAND_CMD_PAGEPROG:
+ info->use_ecc = 1;
+ case NAND_CMD_READOOB:
+ pxa3xx_set_datasize(info);
+ break;
+ case NAND_CMD_SEQIN:
+ exec_cmd = 0;
+ break;
+ default:
+ info->ndcb1 = 0;
+ info->ndcb2 = 0;
+ break;
+ }
- init_completion(&info->cmd_complete);
+ info->ndcb0 = ndcb0;
+ addr_cycle = NDCB0_ADDR_CYC(info->row_addr_cycles
+ + info->col_addr_cycles);
switch (command) {
case NAND_CMD_READOOB:
- /* disable HW ECC to get all the OOB data */
- info->buf_count = mtd->writesize + mtd->oobsize;
- info->buf_start = mtd->writesize + column;
- memset(info->data_buff, 0xFF, info->buf_count);
+ case NAND_CMD_READ0:
+ cmd = info->cmdset->read1;
+ if (command == NAND_CMD_READOOB)
+ info->buf_start = mtd->writesize + column;
+ else
+ info->buf_start = column;
- if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr))
- break;
+ if (unlikely(info->page_size < PAGE_CHUNK_SIZE))
+ info->ndcb0 |= NDCB0_CMD_TYPE(0)
+ | addr_cycle
+ | (cmd & NDCB0_CMD1_MASK);
+ else
+ info->ndcb0 |= NDCB0_CMD_TYPE(0)
+ | NDCB0_DBC
+ | addr_cycle
+ | cmd;
- pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR);
+ case NAND_CMD_SEQIN:
+ /* small page addr setting */
+ if (unlikely(info->page_size < PAGE_CHUNK_SIZE)) {
+ info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
+ | (column & 0xFF);
- /* We only are OOB, so if the data has error, does not matter */
- if (info->retcode == ERR_DBERR)
- info->retcode = ERR_NONE;
- break;
+ info->ndcb2 = 0;
+ } else {
+ info->ndcb1 = ((page_addr & 0xFFFF) << 16)
+ | (column & 0xFFFF);
+
+ if (page_addr & 0xFF0000)
+ info->ndcb2 = (page_addr & 0xFF0000) >> 16;
+ else
+ info->ndcb2 = 0;
+ }
- case NAND_CMD_READ0:
- info->use_ecc = 1;
- info->retcode = ERR_NONE;
- info->buf_start = column;
info->buf_count = mtd->writesize + mtd->oobsize;
memset(info->data_buff, 0xFF, info->buf_count);
- if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr))
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ if (is_buf_blank(info->data_buff,
+ (mtd->writesize + mtd->oobsize))) {
+ exec_cmd = 0;
break;
+ }
- pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR);
+ cmd = info->cmdset->program;
+ info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+ | NDCB0_AUTO_RS
+ | NDCB0_ST_ROW_EN
+ | NDCB0_DBC
+ | cmd
+ | addr_cycle;
+ break;
- if (info->retcode == ERR_DBERR) {
- /* for blank page (all 0xff), HW will calculate its ECC as
- * 0, which is different from the ECC information within
- * OOB, ignore such double bit errors
- */
- if (is_buf_blank(info->data_buff, mtd->writesize))
- info->retcode = ERR_NONE;
- }
+ case NAND_CMD_READID:
+ cmd = info->cmdset->read_id;
+ info->buf_count = info->read_id_bytes;
+ info->ndcb0 |= NDCB0_CMD_TYPE(3)
+ | NDCB0_ADDR_CYC(1)
+ | cmd;
+
+ info->data_size = 8;
break;
- case NAND_CMD_SEQIN:
- info->buf_start = column;
- info->buf_count = mtd->writesize + mtd->oobsize;
- memset(info->data_buff, 0xff, info->buf_count);
+ case NAND_CMD_STATUS:
+ cmd = info->cmdset->read_status;
+ info->buf_count = 1;
+ info->ndcb0 |= NDCB0_CMD_TYPE(4)
+ | NDCB0_ADDR_CYC(1)
+ | cmd;
- /* save column/page_addr for next CMD_PAGEPROG */
- info->seqin_column = column;
- info->seqin_page_addr = page_addr;
+ info->data_size = 8;
break;
- case NAND_CMD_PAGEPROG:
- info->use_ecc = (info->seqin_column >= mtd->writesize) ? 0 : 1;
- if (prepare_read_prog_cmd(info, cmdset->program,
- info->seqin_column, info->seqin_page_addr))
- break;
+ case NAND_CMD_ERASE1:
+ cmd = info->cmdset->erase;
+ info->ndcb0 |= NDCB0_CMD_TYPE(2)
+ | NDCB0_AUTO_RS
+ | NDCB0_ADDR_CYC(3)
+ | NDCB0_DBC
+ | cmd;
+ info->ndcb1 = page_addr;
+ info->ndcb2 = 0;
- pxa3xx_nand_do_cmd(info, NDSR_WRDREQ);
break;
- case NAND_CMD_ERASE1:
- if (prepare_erase_cmd(info, cmdset->erase, page_addr))
- break;
+ case NAND_CMD_RESET:
+ cmd = info->cmdset->reset;
+ info->ndcb0 |= NDCB0_CMD_TYPE(5)
+ | cmd;
- pxa3xx_nand_do_cmd(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
break;
+
case NAND_CMD_ERASE2:
+ exec_cmd = 0;
break;
- case NAND_CMD_READID:
- case NAND_CMD_STATUS:
- info->use_dma = 0; /* force PIO read */
- info->buf_start = 0;
- info->buf_count = (command == NAND_CMD_READID) ?
- info->read_id_bytes : 1;
-
- if (prepare_other_cmd(info, (command == NAND_CMD_READID) ?
- cmdset->read_id : cmdset->read_status))
- break;
- pxa3xx_nand_do_cmd(info, NDSR_RDDREQ);
+ default:
+ exec_cmd = 0;
+ printk(KERN_ERR "pxa3xx-nand: non-supported"
+ " command %x\n", command);
break;
- case NAND_CMD_RESET:
- if (prepare_other_cmd(info, cmdset->reset))
- break;
+ }
- ret = pxa3xx_nand_do_cmd(info, NDSR_CS0_CMDD);
- if (ret == 0) {
- int timeout = 2;
- uint32_t ndcr;
+ return exec_cmd;
+}
- while (timeout--) {
- if (nand_readl(info, NDSR) & NDSR_RDY)
- break;
- msleep(10);
- }
+static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
+ int column, int page_addr)
+{
+ struct pxa3xx_nand_info *info = mtd->priv;
+ int ret, exec_cmd;
- ndcr = nand_readl(info, NDCR);
- nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
+ /*
+ * if this is a x16 device ,then convert the input
+ * "byte" address into a "word" address appropriate
+ * for indexing a word-oriented device
+ */
+ if (info->reg_ndcr & NDCR_DWIDTH_M)
+ column /= 2;
+
+ exec_cmd = prepare_command_pool(info, command, column, page_addr);
+ if (exec_cmd) {
+ init_completion(&info->cmd_complete);
+ pxa3xx_nand_start(info);
+
+ ret = wait_for_completion_timeout(&info->cmd_complete,
+ CHIP_DELAY_TIMEOUT);
+ if (!ret) {
+ printk(KERN_ERR "Wait time out!!!\n");
+ /* Stop State Machine for next command cycle */
+ pxa3xx_nand_stop(info);
}
- break;
- default:
- printk(KERN_ERR "non-supported command.\n");
- break;
+ info->state = STATE_IDLE;
}
+}
+
+static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf)
+{
+ chip->write_buf(mtd, buf, mtd->writesize);
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+}
- if (info->retcode == ERR_DBERR) {
- printk(KERN_ERR "double bit error @ page %08x\n", page_addr);
- info->retcode = ERR_NONE;
+static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int page)
+{
+ struct pxa3xx_nand_info *info = mtd->priv;
+
+ chip->read_buf(mtd, buf, mtd->writesize);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ if (info->retcode == ERR_SBERR) {
+ switch (info->use_ecc) {
+ case 1:
+ mtd->ecc_stats.corrected++;
+ break;
+ case 0:
+ default:
+ break;
+ }
+ } else if (info->retcode == ERR_DBERR) {
+ /*
+ * for blank page (all 0xff), HW will calculate its ECC as
+ * 0, which is different from the ECC information within
+ * OOB, ignore such double bit errors
+ */
+ if (is_buf_blank(buf, mtd->writesize))
+ mtd->ecc_stats.failed++;
}
+
+ return 0;
}
static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
@@ -769,73 +765,12 @@ static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
return 0;
}
-static void pxa3xx_nand_ecc_hwctl(struct mtd_info *mtd, int mode)
-{
- return;
-}
-
-static int pxa3xx_nand_ecc_calculate(struct mtd_info *mtd,
- const uint8_t *dat, uint8_t *ecc_code)
-{
- return 0;
-}
-
-static int pxa3xx_nand_ecc_correct(struct mtd_info *mtd,
- uint8_t *dat, uint8_t *read_ecc, uint8_t *calc_ecc)
-{
- struct pxa3xx_nand_info *info = mtd->priv;
- /*
- * Any error include ERR_SEND_CMD, ERR_DBERR, ERR_BUSERR, we
- * consider it as a ecc error which will tell the caller the
- * read fail We have distinguish all the errors, but the
- * nand_read_ecc only check this function return value
- *
- * Corrected (single-bit) errors must also be noted.
- */
- if (info->retcode == ERR_SBERR)
- return 1;
- else if (info->retcode != ERR_NONE)
- return -1;
-
- return 0;
-}
-
-static int __readid(struct pxa3xx_nand_info *info, uint32_t *id)
-{
- const struct pxa3xx_nand_cmdset *cmdset = info->cmdset;
- uint32_t ndcr;
- uint8_t id_buff[8];
-
- if (prepare_other_cmd(info, cmdset->read_id)) {
- printk(KERN_ERR "failed to prepare command\n");
- return -EINVAL;
- }
-
- /* Send command */
- if (write_cmd(info))
- goto fail_timeout;
-
- /* Wait for CMDDM(command done successfully) */
- if (wait_for_event(info, NDSR_RDDREQ))
- goto fail_timeout;
-
- __raw_readsl(info->mmio_base + NDDB, id_buff, 2);
- *id = id_buff[0] | (id_buff[1] << 8);
- return 0;
-
-fail_timeout:
- ndcr = nand_readl(info, NDCR);
- nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
- udelay(10);
- return -ETIMEDOUT;
-}
-
static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
const struct pxa3xx_nand_flash *f)
{
struct platform_device *pdev = info->pdev;
struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
- uint32_t ndcr = 0x00000FFF; /* disable all interrupts */
+ uint32_t ndcr = 0x0; /* enable all interrupts */
if (f->page_size != 2048 && f->page_size != 512)
return -EINVAL;
@@ -844,9 +779,8 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
return -EINVAL;
/* calculate flash information */
- info->cmdset = f->cmdset;
+ info->cmdset = &default_cmdset;
info->page_size = f->page_size;
- info->oob_buff = info->data_buff + f->page_size;
info->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
/* calculate addressing information */
@@ -876,87 +810,18 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
{
uint32_t ndcr = nand_readl(info, NDCR);
- struct nand_flash_dev *type = NULL;
- uint32_t id = -1, page_per_block, num_blocks;
- int i;
-
- page_per_block = ndcr & NDCR_PG_PER_BLK ? 64 : 32;
info->page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
- /* set info fields needed to __readid */
+ /* set info fields needed to read id */
info->read_id_bytes = (info->page_size == 2048) ? 4 : 2;
info->reg_ndcr = ndcr;
info->cmdset = &default_cmdset;
- if (__readid(info, &id))
- return -ENODEV;
-
- /* Lookup the flash id */
- id = (id >> 8) & 0xff; /* device id is byte 2 */
- for (i = 0; nand_flash_ids[i].name != NULL; i++) {
- if (id == nand_flash_ids[i].id) {
- type = &nand_flash_ids[i];
- break;
- }
- }
-
- if (!type)
- return -ENODEV;
-
- /* fill the missing flash information */
- i = __ffs(page_per_block * info->page_size);
- num_blocks = type->chipsize << (20 - i);
-
- /* calculate addressing information */
- info->col_addr_cycles = (info->page_size == 2048) ? 2 : 1;
-
- if (num_blocks * page_per_block > 65536)
- info->row_addr_cycles = 3;
- else
- info->row_addr_cycles = 2;
-
info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
return 0;
}
-static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info,
- const struct pxa3xx_nand_platform_data *pdata)
-{
- const struct pxa3xx_nand_flash *f;
- uint32_t id = -1;
- int i;
-
- if (pdata->keep_config)
- if (pxa3xx_nand_detect_config(info) == 0)
- return 0;
-
- /* we use default timing to detect id */
- f = DEFAULT_FLASH_TYPE;
- pxa3xx_nand_config_flash(info, f);
- if (__readid(info, &id))
- goto fail_detect;
-
- for (i=0; i<ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1; i++) {
- /* we first choose the flash definition from platfrom */
- if (i < pdata->num_flash)
- f = pdata->flash + i;
- else
- f = &builtin_flash_types[i - pdata->num_flash + 1];
- if (f->chip_id == id) {
- dev_info(&info->pdev->dev, "detect chip id: 0x%x\n", id);
- pxa3xx_nand_config_flash(info, f);
- return 0;
- }
- }
-
- dev_warn(&info->pdev->dev,
- "failed to detect configured nand flash; found %04x instead of\n",
- id);
-fail_detect:
- return -ENODEV;
-}
-
/* the maximum possible buffer size for large page with OOB data
* is: 2048 + 64 = 2112 bytes, allocate a page here for both the
* data buffer and the DMA descriptor
@@ -998,82 +863,144 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
return 0;
}
-static struct nand_ecclayout hw_smallpage_ecclayout = {
- .eccbytes = 6,
- .eccpos = {8, 9, 10, 11, 12, 13 },
- .oobfree = { {2, 6} }
-};
+static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
+{
+ struct mtd_info *mtd = info->mtd;
+ struct nand_chip *chip = mtd->priv;
-static struct nand_ecclayout hw_largepage_ecclayout = {
- .eccbytes = 24,
- .eccpos = {
- 40, 41, 42, 43, 44, 45, 46, 47,
- 48, 49, 50, 51, 52, 53, 54, 55,
- 56, 57, 58, 59, 60, 61, 62, 63},
- .oobfree = { {2, 38} }
-};
+ /* use the common timing to make a try */
+ pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
+ chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
+ if (info->is_ready)
+ return 1;
+ else
+ return 0;
+}
-static void pxa3xx_nand_init_mtd(struct mtd_info *mtd,
- struct pxa3xx_nand_info *info)
+static int pxa3xx_nand_scan(struct mtd_info *mtd)
{
- struct nand_chip *this = &info->nand_chip;
-
- this->options = (info->reg_ndcr & NDCR_DWIDTH_C) ? NAND_BUSWIDTH_16: 0;
-
- this->waitfunc = pxa3xx_nand_waitfunc;
- this->select_chip = pxa3xx_nand_select_chip;
- this->dev_ready = pxa3xx_nand_dev_ready;
- this->cmdfunc = pxa3xx_nand_cmdfunc;
- this->read_word = pxa3xx_nand_read_word;
- this->read_byte = pxa3xx_nand_read_byte;
- this->read_buf = pxa3xx_nand_read_buf;
- this->write_buf = pxa3xx_nand_write_buf;
- this->verify_buf = pxa3xx_nand_verify_buf;
-
- this->ecc.mode = NAND_ECC_HW;
- this->ecc.hwctl = pxa3xx_nand_ecc_hwctl;
- this->ecc.calculate = pxa3xx_nand_ecc_calculate;
- this->ecc.correct = pxa3xx_nand_ecc_correct;
- this->ecc.size = info->page_size;
-
- if (info->page_size == 2048)
- this->ecc.layout = &hw_largepage_ecclayout;
+ struct pxa3xx_nand_info *info = mtd->priv;
+ struct platform_device *pdev = info->pdev;
+ struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
+ struct nand_flash_dev pxa3xx_flash_ids[2] = { {NULL,}, {NULL,} };
+ const struct pxa3xx_nand_flash *f = NULL;
+ struct nand_chip *chip = mtd->priv;
+ uint32_t id = -1;
+ uint64_t chipsize;
+ int i, ret, num;
+
+ if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
+ goto KEEP_CONFIG;
+
+ ret = pxa3xx_nand_sensing(info);
+ if (!ret) {
+ kfree(mtd);
+ info->mtd = NULL;
+ printk(KERN_INFO "There is no nand chip on cs 0!\n");
+
+ return -EINVAL;
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
+ id = *((uint16_t *)(info->data_buff));
+ if (id != 0)
+ printk(KERN_INFO "Detect a flash id %x\n", id);
+ else {
+ kfree(mtd);
+ info->mtd = NULL;
+ printk(KERN_WARNING "Read out ID 0, potential timing set wrong!!\n");
+
+ return -EINVAL;
+ }
+
+ num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1;
+ for (i = 0; i < num; i++) {
+ if (i < pdata->num_flash)
+ f = pdata->flash + i;
+ else
+ f = &builtin_flash_types[i - pdata->num_flash + 1];
+
+ /* find the chip in default list */
+ if (f->chip_id == id)
+ break;
+ }
+
+ if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
+ kfree(mtd);
+ info->mtd = NULL;
+ printk(KERN_ERR "ERROR!! flash not defined!!!\n");
+
+ return -EINVAL;
+ }
+
+ pxa3xx_nand_config_flash(info, f);
+ pxa3xx_flash_ids[0].name = f->name;
+ pxa3xx_flash_ids[0].id = (f->chip_id >> 8) & 0xffff;
+ pxa3xx_flash_ids[0].pagesize = f->page_size;
+ chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
+ pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
+ pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
+ if (f->flash_width == 16)
+ pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
+KEEP_CONFIG:
+ if (nand_scan_ident(mtd, 1, pxa3xx_flash_ids))
+ return -ENODEV;
+ /* calculate addressing information */
+ info->col_addr_cycles = (mtd->writesize >= 2048) ? 2 : 1;
+ info->oob_buff = info->data_buff + mtd->writesize;
+ if ((mtd->size >> chip->page_shift) > 65536)
+ info->row_addr_cycles = 3;
else
- this->ecc.layout = &hw_smallpage_ecclayout;
+ info->row_addr_cycles = 2;
+ mtd->name = mtd_names[0];
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = f->page_size;
+
+ chip->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16 : 0;
+ chip->options |= NAND_NO_AUTOINCR;
+ chip->options |= NAND_NO_READRDY;
- this->chip_delay = 25;
+ return nand_scan_tail(mtd);
}
-static int pxa3xx_nand_probe(struct platform_device *pdev)
+static
+struct pxa3xx_nand_info *alloc_nand_resource(struct platform_device *pdev)
{
- struct pxa3xx_nand_platform_data *pdata;
struct pxa3xx_nand_info *info;
- struct nand_chip *this;
+ struct nand_chip *chip;
struct mtd_info *mtd;
struct resource *r;
- int ret = 0, irq;
-
- pdata = pdev->dev.platform_data;
-
- if (!pdata) {
- dev_err(&pdev->dev, "no platform data defined\n");
- return -ENODEV;
- }
+ int ret, irq;
mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info),
GFP_KERNEL);
if (!mtd) {
dev_err(&pdev->dev, "failed to allocate memory\n");
- return -ENOMEM;
+ return NULL;
}
info = (struct pxa3xx_nand_info *)(&mtd[1]);
+ chip = (struct nand_chip *)(&mtd[1]);
info->pdev = pdev;
-
- this = &info->nand_chip;
+ info->mtd = mtd;
mtd->priv = info;
mtd->owner = THIS_MODULE;
+ chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
+ chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
+ chip->controller = &info->controller;
+ chip->waitfunc = pxa3xx_nand_waitfunc;
+ chip->select_chip = pxa3xx_nand_select_chip;
+ chip->dev_ready = pxa3xx_nand_dev_ready;
+ chip->cmdfunc = pxa3xx_nand_cmdfunc;
+ chip->read_word = pxa3xx_nand_read_word;
+ chip->read_byte = pxa3xx_nand_read_byte;
+ chip->read_buf = pxa3xx_nand_read_buf;
+ chip->write_buf = pxa3xx_nand_write_buf;
+ chip->verify_buf = pxa3xx_nand_verify_buf;
+
+ spin_lock_init(&chip->controller->lock);
+ init_waitqueue_head(&chip->controller->wq);
info->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(info->clk)) {
dev_err(&pdev->dev, "failed to get nand clock\n");
@@ -1141,43 +1068,12 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
goto fail_free_buf;
}
- ret = pxa3xx_nand_detect_flash(info, pdata);
- if (ret) {
- dev_err(&pdev->dev, "failed to detect flash\n");
- ret = -ENODEV;
- goto fail_free_irq;
- }
-
- pxa3xx_nand_init_mtd(mtd, info);
-
- platform_set_drvdata(pdev, mtd);
-
- if (nand_scan(mtd, 1)) {
- dev_err(&pdev->dev, "failed to scan nand\n");
- ret = -ENXIO;
- goto fail_free_irq;
- }
-
-#ifdef CONFIG_MTD_PARTITIONS
- if (mtd_has_cmdlinepart()) {
- static const char *probes[] = { "cmdlinepart", NULL };
- struct mtd_partition *parts;
- int nr_parts;
-
- nr_parts = parse_mtd_partitions(mtd, probes, &parts, 0);
-
- if (nr_parts)
- return add_mtd_partitions(mtd, parts, nr_parts);
- }
+ platform_set_drvdata(pdev, info);
- return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
-#else
- return 0;
-#endif
+ return info;
-fail_free_irq:
- free_irq(irq, info);
fail_free_buf:
+ free_irq(irq, info);
if (use_dma) {
pxa_free_dma(info->data_dma_ch);
dma_free_coherent(&pdev->dev, info->data_buff_size,
@@ -1193,22 +1089,18 @@ fail_put_clk:
clk_put(info->clk);
fail_free_mtd:
kfree(mtd);
- return ret;
+ return NULL;
}
static int pxa3xx_nand_remove(struct platform_device *pdev)
{
- struct mtd_info *mtd = platform_get_drvdata(pdev);
- struct pxa3xx_nand_info *info = mtd->priv;
+ struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = info->mtd;
struct resource *r;
int irq;
platform_set_drvdata(pdev, NULL);
- del_mtd_device(mtd);
-#ifdef CONFIG_MTD_PARTITIONS
- del_mtd_partitions(mtd);
-#endif
irq = platform_get_irq(pdev, 0);
if (irq >= 0)
free_irq(irq, info);
@@ -1226,17 +1118,62 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
clk_disable(info->clk);
clk_put(info->clk);
- kfree(mtd);
+ if (mtd) {
+ del_mtd_device(mtd);
+#ifdef CONFIG_MTD_PARTITIONS
+ del_mtd_partitions(mtd);
+#endif
+ kfree(mtd);
+ }
return 0;
}
+static int pxa3xx_nand_probe(struct platform_device *pdev)
+{
+ struct pxa3xx_nand_platform_data *pdata;
+ struct pxa3xx_nand_info *info;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data defined\n");
+ return -ENODEV;
+ }
+
+ info = alloc_nand_resource(pdev);
+ if (info == NULL)
+ return -ENOMEM;
+
+ if (pxa3xx_nand_scan(info->mtd)) {
+ dev_err(&pdev->dev, "failed to scan nand\n");
+ pxa3xx_nand_remove(pdev);
+ return -ENODEV;
+ }
+
+#ifdef CONFIG_MTD_PARTITIONS
+ if (mtd_has_cmdlinepart()) {
+ const char *probes[] = { "cmdlinepart", NULL };
+ struct mtd_partition *parts;
+ int nr_parts;
+
+ nr_parts = parse_mtd_partitions(info->mtd, probes, &parts, 0);
+
+ if (nr_parts)
+ return add_mtd_partitions(info->mtd, parts, nr_parts);
+ }
+
+ return add_mtd_partitions(info->mtd, pdata->parts, pdata->nr_parts);
+#else
+ return 0;
+#endif
+}
+
#ifdef CONFIG_PM
static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
{
- struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
- struct pxa3xx_nand_info *info = mtd->priv;
+ struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = info->mtd;
- if (info->state != STATE_READY) {
+ if (info->state) {
dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
return -EAGAIN;
}
@@ -1246,8 +1183,8 @@ static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
static int pxa3xx_nand_resume(struct platform_device *pdev)
{
- struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
- struct pxa3xx_nand_info *info = mtd->priv;
+ struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = info->mtd;
nand_writel(info, NDTR0CS0, info->ndtr0cs0);
nand_writel(info, NDTR1CS0, info->ndtr1cs0);
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 14a49abe057e..f591f615d3f6 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -629,6 +629,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
{
struct omap_onenand_platform_data *pdata;
struct omap2_onenand *c;
+ struct onenand_chip *this;
int r;
pdata = pdev->dev.platform_data;
@@ -726,9 +727,8 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
c->mtd.dev.parent = &pdev->dev;
+ this = &c->onenand;
if (c->dma_channel >= 0) {
- struct onenand_chip *this = &c->onenand;
-
this->wait = omap2_onenand_wait;
if (cpu_is_omap34xx()) {
this->read_bufferram = omap3_onenand_read_bufferram;
@@ -749,6 +749,9 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
c->onenand.disable = omap2_onenand_disable;
}
+ if (pdata->skip_initial_unlocking)
+ this->options |= ONENAND_SKIP_INITIAL_UNLOCKING;
+
if ((r = onenand_scan(&c->mtd, 1)) < 0)
goto err_release_regulator;
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index bac41caa8df7..56a8b2005bda 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1132,6 +1132,8 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
onenand_update_bufferram(mtd, from, !ret);
if (ret == -EBADMSG)
ret = 0;
+ if (ret)
+ break;
}
this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
@@ -1646,11 +1648,10 @@ static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr,
int ret = 0;
int thislen, column;
+ column = addr & (this->writesize - 1);
+
while (len != 0) {
- thislen = min_t(int, this->writesize, len);
- column = addr & (this->writesize - 1);
- if (column + thislen > this->writesize)
- thislen = this->writesize - column;
+ thislen = min_t(int, this->writesize - column, len);
this->command(mtd, ONENAND_CMD_READ, addr, this->writesize);
@@ -1664,12 +1665,13 @@ static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr,
this->read_bufferram(mtd, ONENAND_DATARAM, this->verify_buf, 0, mtd->writesize);
- if (memcmp(buf, this->verify_buf, thislen))
+ if (memcmp(buf, this->verify_buf + column, thislen))
return -EBADMSG;
len -= thislen;
buf += thislen;
addr += thislen;
+ column = 0;
}
return 0;
@@ -4083,7 +4085,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
mtd->writebufsize = mtd->writesize;
/* Unlock whole block */
- this->unlock_all(mtd);
+ if (!(this->options & ONENAND_SKIP_INITIAL_UNLOCKING))
+ this->unlock_all(mtd);
ret = this->scan_bbt(mtd);
if ((!FLEXONENAND(this)) || ret)
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index ac0d6a8613b5..2b0daae4018d 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -64,12 +64,16 @@ struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET);
char *vendor = kmalloc(vendor_len, GFP_KERNEL);
+ if (!vendor)
+ goto error1;
memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len);
vendor[vendor_len] = 0;
/* Initialize sysfs attributes */
vendor_attribute =
kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL);
+ if (!vendor_attribute)
+ goto error2;
sysfs_attr_init(&vendor_attribute->dev_attr.attr);
@@ -83,12 +87,24 @@ struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
/* Create array of pointers to the attributes */
attributes = kzalloc(sizeof(struct attribute *) * (NUM_ATTRIBUTES + 1),
GFP_KERNEL);
+ if (!attributes)
+ goto error3;
attributes[0] = &vendor_attribute->dev_attr.attr;
/* Finally create the attribute group */
attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
+ if (!attr_group)
+ goto error4;
attr_group->attrs = attributes;
return attr_group;
+error4:
+ kfree(attributes);
+error3:
+ kfree(vendor_attribute);
+error2:
+ kfree(vendor);
+error1:
+ return NULL;
}
void sm_delete_sysfs_attributes(struct sm_ftl *ftl)
@@ -1178,6 +1194,8 @@ static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
}
ftl->disk_attributes = sm_create_sysfs_attributes(ftl);
+ if (!ftl->disk_attributes)
+ goto error6;
trans->disk_attributes = ftl->disk_attributes;
sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d",
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
index 161feeb7b8b9..627d4e2466a3 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -16,7 +16,7 @@
*
* Test read and write speed of a MTD device.
*
- * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
+ * Author: Adrian Hunter <adrian.hunter@nokia.com>
*/
#include <linux/init.h>
@@ -33,6 +33,11 @@ static int dev;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
+static int count;
+module_param(count, int, S_IRUGO);
+MODULE_PARM_DESC(count, "Maximum number of eraseblocks to use "
+ "(0 means use all)");
+
static struct mtd_info *mtd;
static unsigned char *iobuf;
static unsigned char *bbt;
@@ -89,6 +94,33 @@ static int erase_eraseblock(int ebnum)
return 0;
}
+static int multiblock_erase(int ebnum, int blocks)
+{
+ int err;
+ struct erase_info ei;
+ loff_t addr = ebnum * mtd->erasesize;
+
+ memset(&ei, 0, sizeof(struct erase_info));
+ ei.mtd = mtd;
+ ei.addr = addr;
+ ei.len = mtd->erasesize * blocks;
+
+ err = mtd->erase(mtd, &ei);
+ if (err) {
+ printk(PRINT_PREF "error %d while erasing EB %d, blocks %d\n",
+ err, ebnum, blocks);
+ return err;
+ }
+
+ if (ei.state == MTD_ERASE_FAILED) {
+ printk(PRINT_PREF "some erase error occurred at EB %d,"
+ "blocks %d\n", ebnum, blocks);
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int erase_whole_device(void)
{
int err;
@@ -282,13 +314,16 @@ static inline void stop_timing(void)
static long calc_speed(void)
{
- long ms, k, speed;
+ uint64_t k;
+ long ms;
ms = (finish.tv_sec - start.tv_sec) * 1000 +
(finish.tv_usec - start.tv_usec) / 1000;
- k = goodebcnt * mtd->erasesize / 1024;
- speed = (k * 1000) / ms;
- return speed;
+ if (ms == 0)
+ return 0;
+ k = goodebcnt * (mtd->erasesize / 1024) * 1000;
+ do_div(k, ms);
+ return k;
}
static int scan_for_bad_eraseblocks(void)
@@ -320,13 +355,16 @@ out:
static int __init mtd_speedtest_init(void)
{
- int err, i;
+ int err, i, blocks, j, k;
long speed;
uint64_t tmp;
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
- printk(PRINT_PREF "MTD device: %d\n", dev);
+ if (count)
+ printk(PRINT_PREF "MTD device: %d count: %d\n", dev, count);
+ else
+ printk(PRINT_PREF "MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
@@ -353,6 +391,9 @@ static int __init mtd_speedtest_init(void)
(unsigned long long)mtd->size, mtd->erasesize,
pgsize, ebcnt, pgcnt, mtd->oobsize);
+ if (count > 0 && count < ebcnt)
+ ebcnt = count;
+
err = -ENOMEM;
iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!iobuf) {
@@ -484,6 +525,31 @@ static int __init mtd_speedtest_init(void)
speed = calc_speed();
printk(PRINT_PREF "erase speed is %ld KiB/s\n", speed);
+ /* Multi-block erase all eraseblocks */
+ for (k = 1; k < 7; k++) {
+ blocks = 1 << k;
+ printk(PRINT_PREF "Testing %dx multi-block erase speed\n",
+ blocks);
+ start_timing();
+ for (i = 0; i < ebcnt; ) {
+ for (j = 0; j < blocks && (i + j) < ebcnt; j++)
+ if (bbt[i + j])
+ break;
+ if (j < 1) {
+ i++;
+ continue;
+ }
+ err = multiblock_erase(i, j);
+ if (err)
+ goto out;
+ cond_resched();
+ i += j;
+ }
+ stop_timing();
+ speed = calc_speed();
+ printk(PRINT_PREF "%dx multi-block erase speed is %ld KiB/s\n",
+ blocks, speed);
+ }
printk(PRINT_PREF "finished\n");
out:
kfree(iobuf);
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c
index 11204e8aab5f..334eae53a3db 100644
--- a/drivers/mtd/tests/mtd_subpagetest.c
+++ b/drivers/mtd/tests/mtd_subpagetest.c
@@ -394,6 +394,11 @@ static int __init mtd_subpagetest_init(void)
}
subpgsize = mtd->writesize >> mtd->subpage_sft;
+ tmp = mtd->size;
+ do_div(tmp, mtd->erasesize);
+ ebcnt = tmp;
+ pgcnt = mtd->erasesize / mtd->writesize;
+
printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
"page size %u, subpage size %u, count of eraseblocks %u, "
"pages per eraseblock %u, OOB size %u\n",
@@ -413,11 +418,6 @@ static int __init mtd_subpagetest_init(void)
goto out;
}
- tmp = mtd->size;
- do_div(tmp, mtd->erasesize);
- ebcnt = tmp;
- pgcnt = mtd->erasesize / mtd->writesize;
-
err = scan_for_bad_eraseblocks();
if (err)
goto out;
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index f142cc21e453..deaa8bc16cf8 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -711,14 +711,14 @@ static int __devinit a2065_init_one(struct zorro_dev *z,
return -EBUSY;
r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM");
if (!r2) {
- release_resource(r1);
+ release_mem_region(base_addr, sizeof(struct lance_regs));
return -EBUSY;
}
dev = alloc_etherdev(sizeof(struct lance_private));
if (dev == NULL) {
- release_resource(r1);
- release_resource(r2);
+ release_mem_region(base_addr, sizeof(struct lance_regs));
+ release_mem_region(mem_start, A2065_RAM_SIZE);
return -ENOMEM;
}
@@ -764,8 +764,8 @@ static int __devinit a2065_init_one(struct zorro_dev *z,
err = register_netdev(dev);
if (err) {
- release_resource(r1);
- release_resource(r2);
+ release_mem_region(base_addr, sizeof(struct lance_regs));
+ release_mem_region(mem_start, A2065_RAM_SIZE);
free_netdev(dev);
return err;
}
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index 7ca0eded2561..b7f45cd756a2 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -182,14 +182,14 @@ static int __devinit ariadne_init_one(struct zorro_dev *z,
return -EBUSY;
r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM");
if (!r2) {
- release_resource(r1);
+ release_mem_region(base_addr, sizeof(struct Am79C960));
return -EBUSY;
}
dev = alloc_etherdev(sizeof(struct ariadne_private));
if (dev == NULL) {
- release_resource(r1);
- release_resource(r2);
+ release_mem_region(base_addr, sizeof(struct Am79C960));
+ release_mem_region(mem_start, ARIADNE_RAM_SIZE);
return -ENOMEM;
}
@@ -213,8 +213,8 @@ static int __devinit ariadne_init_one(struct zorro_dev *z,
err = register_netdev(dev);
if (err) {
- release_resource(r1);
- release_resource(r2);
+ release_mem_region(base_addr, sizeof(struct Am79C960));
+ release_mem_region(mem_start, ARIADNE_RAM_SIZE);
free_netdev(dev);
return err;
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 338bea147c64..16d6fe954695 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1482,21 +1482,16 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct slave *slave;
- struct net_device *bond_dev;
struct bonding *bond;
- slave = bond_slave_get_rcu(skb->dev);
- bond_dev = ACCESS_ONCE(slave->dev->master);
- if (unlikely(!bond_dev))
- return RX_HANDLER_PASS;
-
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
return RX_HANDLER_CONSUMED;
*pskb = skb;
- bond = netdev_priv(bond_dev);
+ slave = bond_slave_get_rcu(skb->dev);
+ bond = slave->bond;
if (bond->params.arp_interval)
slave->dev->last_rx = jiffies;
@@ -1505,10 +1500,10 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
return RX_HANDLER_EXACT;
}
- skb->dev = bond_dev;
+ skb->dev = bond->dev;
if (bond->params.mode == BOND_MODE_ALB &&
- bond_dev->priv_flags & IFF_BRIDGE_PORT &&
+ bond->dev->priv_flags & IFF_BRIDGE_PORT &&
skb->pkt_type == PACKET_HOST) {
if (unlikely(skb_cow_head(skb,
@@ -1516,7 +1511,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
- memcpy(eth_hdr(skb)->h_dest, bond_dev->dev_addr, ETH_ALEN);
+ memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN);
}
return RX_HANDLER_ANOTHER;
@@ -1698,20 +1693,15 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
pr_debug("Error %d calling netdev_set_bond_master\n", res);
goto err_restore_mac;
}
- res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
- new_slave);
- if (res) {
- pr_debug("Error %d calling netdev_rx_handler_register\n", res);
- goto err_unset_master;
- }
/* open the slave since the application closed it */
res = dev_open(slave_dev);
if (res) {
pr_debug("Opening slave %s failed\n", slave_dev->name);
- goto err_unreg_rxhandler;
+ goto err_unset_master;
}
+ new_slave->bond = bond;
new_slave->dev = slave_dev;
slave_dev->priv_flags |= IFF_BONDING;
@@ -1907,6 +1897,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (res)
goto err_close;
+ res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
+ new_slave);
+ if (res) {
+ pr_debug("Error %d calling netdev_rx_handler_register\n", res);
+ goto err_dest_symlinks;
+ }
+
pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
bond_dev->name, slave_dev->name,
bond_is_active_slave(new_slave) ? "n active" : " backup",
@@ -1916,13 +1913,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
return 0;
/* Undo stages on error */
+err_dest_symlinks:
+ bond_destroy_slave_symlinks(bond_dev, slave_dev);
+
err_close:
dev_close(slave_dev);
-err_unreg_rxhandler:
- netdev_rx_handler_unregister(slave_dev);
- synchronize_net();
-
err_unset_master:
netdev_set_bond_master(slave_dev, NULL);
@@ -1988,6 +1984,14 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
return -EINVAL;
}
+ /* unregister rx_handler early so bond_handle_frame wouldn't be called
+ * for this slave anymore.
+ */
+ netdev_rx_handler_unregister(slave_dev);
+ write_unlock_bh(&bond->lock);
+ synchronize_net();
+ write_lock_bh(&bond->lock);
+
if (!bond->params.fail_over_mac) {
if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond->slave_cnt > 1)
@@ -2104,8 +2108,6 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
netif_addr_unlock_bh(bond_dev);
}
- netdev_rx_handler_unregister(slave_dev);
- synchronize_net();
netdev_set_bond_master(slave_dev, NULL);
slave_disable_netpoll(slave);
@@ -2186,6 +2188,12 @@ static int bond_release_all(struct net_device *bond_dev)
*/
write_unlock_bh(&bond->lock);
+ /* unregister rx_handler early so bond_handle_frame wouldn't
+ * be called for this slave anymore.
+ */
+ netdev_rx_handler_unregister(slave_dev);
+ synchronize_net();
+
if (bond_is_lb(bond)) {
/* must be called only after the slave
* has been detached from the list
@@ -2217,8 +2225,6 @@ static int bond_release_all(struct net_device *bond_dev)
netif_addr_unlock_bh(bond_dev);
}
- netdev_rx_handler_unregister(slave_dev);
- synchronize_net();
netdev_set_bond_master(slave_dev, NULL);
slave_disable_netpoll(slave);
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 6b26962fd0ec..90736cb4d975 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -187,6 +187,7 @@ struct slave {
struct net_device *dev; /* first - useful for panic debug */
struct slave *next;
struct slave *prev;
+ struct bonding *bond; /* our master */
int delay;
unsigned long jiffies;
unsigned long last_arp_rx;
diff --git a/drivers/net/davinci_cpdma.c b/drivers/net/davinci_cpdma.c
index e92b2b6cd8c4..ae47f23ba930 100644
--- a/drivers/net/davinci_cpdma.c
+++ b/drivers/net/davinci_cpdma.c
@@ -76,6 +76,7 @@ struct cpdma_desc {
struct cpdma_desc_pool {
u32 phys;
+ u32 hw_addr;
void __iomem *iomap; /* ioremap map */
void *cpumap; /* dma_alloc map */
int desc_size, mem_size;
@@ -137,7 +138,8 @@ struct cpdma_chan {
* abstract out these details
*/
static struct cpdma_desc_pool *
-cpdma_desc_pool_create(struct device *dev, u32 phys, int size, int align)
+cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
+ int size, int align)
{
int bitmap_size;
struct cpdma_desc_pool *pool;
@@ -161,10 +163,12 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, int size, int align)
if (phys) {
pool->phys = phys;
pool->iomap = ioremap(phys, size);
+ pool->hw_addr = hw_addr;
} else {
pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
GFP_KERNEL);
pool->iomap = (void __force __iomem *)pool->cpumap;
+ pool->hw_addr = pool->phys;
}
if (pool->iomap)
@@ -201,14 +205,14 @@ static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
{
if (!desc)
return 0;
- return pool->phys + (__force dma_addr_t)desc -
+ return pool->hw_addr + (__force dma_addr_t)desc -
(__force dma_addr_t)pool->iomap;
}
static inline struct cpdma_desc __iomem *
desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
{
- return dma ? pool->iomap + dma - pool->phys : NULL;
+ return dma ? pool->iomap + dma - pool->hw_addr : NULL;
}
static struct cpdma_desc __iomem *
@@ -260,6 +264,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
ctlr->params.desc_mem_phys,
+ ctlr->params.desc_hw_addr,
ctlr->params.desc_mem_size,
ctlr->params.desc_align);
if (!ctlr->pool) {
diff --git a/drivers/net/davinci_cpdma.h b/drivers/net/davinci_cpdma.h
index 868e50ebde45..afa19a0c0d81 100644
--- a/drivers/net/davinci_cpdma.h
+++ b/drivers/net/davinci_cpdma.h
@@ -33,6 +33,7 @@ struct cpdma_params {
bool has_soft_reset;
int min_packet_size;
u32 desc_mem_phys;
+ u32 desc_hw_addr;
int desc_mem_size;
int desc_align;
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 082d6ea69920..baca6bfcb089 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -1854,10 +1854,13 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
dma_params.rxcp = priv->emac_base + 0x660;
dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS;
dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE;
- dma_params.desc_mem_phys = hw_ram_addr;
+ dma_params.desc_hw_addr = hw_ram_addr;
dma_params.desc_mem_size = pdata->ctrl_ram_size;
dma_params.desc_align = 16;
+ dma_params.desc_mem_phys = pdata->no_bd_ram ? 0 :
+ (u32 __force)res->start + pdata->ctrl_ram_offset;
+
priv->dma = cpdma_ctlr_create(&dma_params);
if (!priv->dma) {
dev_err(emac_dev, "DaVinci EMAC: Error initializing DMA\n");
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 3a4277f6fac4..116cae334dad 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -62,6 +62,9 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
} else
obj = -1;
+ if (obj != -1)
+ --bitmap->avail;
+
spin_unlock(&bitmap->lock);
return obj;
@@ -101,11 +104,19 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
} else
obj = -1;
+ if (obj != -1)
+ bitmap->avail -= cnt;
+
spin_unlock(&bitmap->lock);
return obj;
}
+u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
+{
+ return bitmap->avail;
+}
+
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
{
obj &= bitmap->max + bitmap->reserved_top - 1;
@@ -115,6 +126,7 @@ void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
bitmap->last = min(bitmap->last, obj);
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
+ bitmap->avail += cnt;
spin_unlock(&bitmap->lock);
}
@@ -130,6 +142,7 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
bitmap->max = num - reserved_top;
bitmap->mask = mask;
bitmap->reserved_top = reserved_top;
+ bitmap->avail = num - reserved_top - reserved_bot;
spin_lock_init(&bitmap->lock);
bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
sizeof (long), GFP_KERNEL);
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index 7cd34e9c7c7e..bd8ef9f2fa71 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -198,7 +198,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
u64 mtt_addr;
int err;
- if (vector >= dev->caps.num_comp_vectors)
+ if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
return -EINVAL;
cq->vector = vector;
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
index 21786ad4455e..ec4b6d047fe0 100644
--- a/drivers/net/mlx4/en_cq.c
+++ b/drivers/net/mlx4/en_cq.c
@@ -51,13 +51,10 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
int err;
cq->size = entries;
- if (mode == RX) {
+ if (mode == RX)
cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
- cq->vector = ring % mdev->dev->caps.num_comp_vectors;
- } else {
+ else
cq->buf_size = sizeof(struct mlx4_cqe);
- cq->vector = 0;
- }
cq->ring = ring;
cq->is_tx = mode;
@@ -80,7 +77,8 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
struct mlx4_en_dev *mdev = priv->mdev;
- int err;
+ int err = 0;
+ char name[25];
cq->dev = mdev->pndev[priv->port];
cq->mcq.set_ci_db = cq->wqres.db.db;
@@ -89,6 +87,29 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
*cq->mcq.arm_db = 0;
memset(cq->buf, 0, cq->buf_size);
+ if (cq->is_tx == RX) {
+ if (mdev->dev->caps.comp_pool) {
+ if (!cq->vector) {
+ sprintf(name , "%s-rx-%d", priv->dev->name, cq->ring);
+ if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) {
+ cq->vector = (cq->ring + 1 + priv->port) %
+ mdev->dev->caps.num_comp_vectors;
+ mlx4_warn(mdev, "Failed Assigning an EQ to "
+ "%s_rx-%d ,Falling back to legacy EQ's\n",
+ priv->dev->name, cq->ring);
+ }
+ }
+ } else {
+ cq->vector = (cq->ring + 1 + priv->port) %
+ mdev->dev->caps.num_comp_vectors;
+ }
+ } else {
+ if (!cq->vector || !mdev->dev->caps.comp_pool) {
+ /*Fallback to legacy pool in case of error*/
+ cq->vector = 0;
+ }
+ }
+
if (!cq->is_tx)
cq->size = priv->rx_ring[cq->ring].actual_size;
@@ -112,12 +133,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
return 0;
}
-void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
+ bool reserve_vectors)
{
struct mlx4_en_dev *mdev = priv->mdev;
mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
+ if (priv->mdev->dev->caps.comp_pool && cq->vector && !reserve_vectors)
+ mlx4_release_eq(priv->mdev->dev, cq->vector);
cq->buf_size = 0;
cq->buf = NULL;
}
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
index 056152b3ff58..d54b7abf0225 100644
--- a/drivers/net/mlx4/en_ethtool.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -45,7 +45,7 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- sprintf(drvinfo->driver, DRV_NAME " (%s)", mdev->dev->board_id);
+ strncpy(drvinfo->driver, DRV_NAME, 32);
strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
sprintf(drvinfo->fw_version, "%d.%d.%d",
(u16) (mdev->dev->caps.fw_ver >> 32),
@@ -131,8 +131,65 @@ static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
static void mlx4_en_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
- wol->supported = 0;
- wol->wolopts = 0;
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+ int err = 0;
+ u64 config = 0;
+
+ if (!priv->mdev->dev->caps.wol) {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ return;
+ }
+
+ err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
+ if (err) {
+ en_err(priv, "Failed to get WoL information\n");
+ return;
+ }
+
+ if (config & MLX4_EN_WOL_MAGIC)
+ wol->supported = WAKE_MAGIC;
+ else
+ wol->supported = 0;
+
+ if (config & MLX4_EN_WOL_ENABLED)
+ wol->wolopts = WAKE_MAGIC;
+ else
+ wol->wolopts = 0;
+}
+
+static int mlx4_en_set_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+ u64 config = 0;
+ int err = 0;
+
+ if (!priv->mdev->dev->caps.wol)
+ return -EOPNOTSUPP;
+
+ if (wol->supported & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
+ if (err) {
+ en_err(priv, "Failed to get WoL info, unable to modify\n");
+ return err;
+ }
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED |
+ MLX4_EN_WOL_MAGIC;
+ } else {
+ config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC);
+ config |= MLX4_EN_WOL_DO_MODIFY;
+ }
+
+ err = mlx4_wol_write(priv->mdev->dev, config, priv->port);
+ if (err)
+ en_err(priv, "Failed to set WoL information\n");
+
+ return err;
}
static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
@@ -388,7 +445,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
mlx4_en_stop_port(dev);
}
- mlx4_en_free_resources(priv);
+ mlx4_en_free_resources(priv, true);
priv->prof->tx_ring_size = tx_size;
priv->prof->rx_ring_size = rx_size;
@@ -442,6 +499,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_ethtool_stats = mlx4_en_get_ethtool_stats,
.self_test = mlx4_en_self_test,
.get_wol = mlx4_en_get_wol,
+ .set_wol = mlx4_en_set_wol,
.get_msglevel = mlx4_en_get_msglevel,
.set_msglevel = mlx4_en_set_msglevel,
.get_coalesce = mlx4_en_get_coalesce,
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index 1ff6ca6466ed..9317b61a75b8 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -241,16 +241,18 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
mdev->port_cnt++;
- /* If we did not receive an explicit number of Rx rings, default to
- * the number of completion vectors populated by the mlx4_core */
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
- mlx4_info(mdev, "Using %d tx rings for port:%d\n",
- mdev->profile.prof[i].tx_ring_num, i);
- mdev->profile.prof[i].rx_ring_num = min_t(int,
- roundup_pow_of_two(dev->caps.num_comp_vectors),
- MAX_RX_RINGS);
- mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n",
- mdev->profile.prof[i].rx_ring_num, i);
+ if (!dev->caps.comp_pool) {
+ mdev->profile.prof[i].rx_ring_num =
+ rounddown_pow_of_two(max_t(int, MIN_RX_RINGS,
+ min_t(int,
+ dev->caps.num_comp_vectors,
+ MAX_RX_RINGS)));
+ } else {
+ mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(
+ min_t(int, dev->caps.comp_pool/
+ dev->caps.num_ports - 1 , MAX_MSIX_P_PORT - 1));
+ }
}
/* Create our own workqueue for reset/multicast tasks
@@ -294,7 +296,7 @@ static struct mlx4_interface mlx4_en_interface = {
.remove = mlx4_en_remove,
.event = mlx4_en_event,
.get_dev = mlx4_en_get_netdev,
- .protocol = MLX4_PROTOCOL_EN,
+ .protocol = MLX4_PROT_ETH,
};
static int __init mlx4_en_init(void)
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 897f576b8b17..5762ebde4455 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -156,9 +156,8 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
/* Remove old MAC and insert the new one */
- mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
- err = mlx4_register_mac(mdev->dev, priv->port,
- priv->mac, &priv->mac_index);
+ err = mlx4_replace_mac(mdev->dev, priv->port,
+ priv->base_qpn, priv->mac, 0);
if (err)
en_err(priv, "Failed changing HW MAC address\n");
} else
@@ -214,6 +213,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
u64 mcast_addr = 0;
+ u8 mc_list[16] = {0};
int err;
mutex_lock(&mdev->state_lock);
@@ -239,8 +239,12 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
priv->flags |= MLX4_EN_FLAG_PROMISC;
/* Enable promiscouos mode */
- err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
- priv->base_qpn, 1);
+ if (!mdev->dev->caps.vep_uc_steering)
+ err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
+ priv->base_qpn, 1);
+ else
+ err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn,
+ priv->port);
if (err)
en_err(priv, "Failed enabling "
"promiscous mode\n");
@@ -252,10 +256,21 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
en_err(priv, "Failed disabling "
"multicast filter\n");
- /* Disable port VLAN filter */
- err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL);
- if (err)
- en_err(priv, "Failed disabling VLAN filter\n");
+ /* Add the default qp number as multicast promisc */
+ if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
+ err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed entering multicast promisc mode\n");
+ priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
+ }
+
+ if (priv->vlgrp) {
+ /* Disable port VLAN filter */
+ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL);
+ if (err)
+ en_err(priv, "Failed disabling VLAN filter\n");
+ }
}
goto out;
}
@@ -270,11 +285,24 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
priv->flags &= ~MLX4_EN_FLAG_PROMISC;
/* Disable promiscouos mode */
- err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
- priv->base_qpn, 0);
+ if (!mdev->dev->caps.vep_uc_steering)
+ err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
+ priv->base_qpn, 0);
+ else
+ err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
+ priv->port);
if (err)
en_err(priv, "Failed disabling promiscous mode\n");
+ /* Disable Multicast promisc */
+ if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
+ err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed disabling multicast promiscous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ }
+
/* Enable port VLAN filter */
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
if (err)
@@ -287,14 +315,38 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
0, MLX4_MCAST_DISABLE);
if (err)
en_err(priv, "Failed disabling multicast filter\n");
+
+ /* Add the default qp number as multicast promisc */
+ if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
+ err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed entering multicast promisc mode\n");
+ priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
+ }
} else {
int i;
+ /* Disable Multicast promisc */
+ if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
+ err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed disabling multicast promiscous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ }
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
0, MLX4_MCAST_DISABLE);
if (err)
en_err(priv, "Failed disabling multicast filter\n");
+ /* Detach our qp from all the multicast addresses */
+ for (i = 0; i < priv->mc_addrs_cnt; i++) {
+ memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
+ mc_list[5] = priv->port;
+ mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
+ mc_list, MLX4_PROT_ETH);
+ }
/* Flush mcast filter and init it with broadcast address */
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
1, MLX4_MCAST_CONFIG);
@@ -307,6 +359,10 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
for (i = 0; i < priv->mc_addrs_cnt; i++) {
mcast_addr =
mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
+ memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
+ mc_list[5] = priv->port;
+ mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp,
+ mc_list, 0, MLX4_PROT_ETH);
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
mcast_addr, 0, MLX4_MCAST_CONFIG);
}
@@ -314,8 +370,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
0, MLX4_MCAST_ENABLE);
if (err)
en_err(priv, "Failed enabling multicast filter\n");
-
- mlx4_en_clear_list(dev);
}
out:
mutex_unlock(&mdev->state_lock);
@@ -417,7 +471,6 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
unsigned long avg_pkt_size;
unsigned long rx_packets;
unsigned long rx_bytes;
- unsigned long rx_byte_diff;
unsigned long tx_packets;
unsigned long tx_pkt_diff;
unsigned long rx_pkt_diff;
@@ -441,25 +494,20 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
rx_pkt_diff = ((unsigned long) (rx_packets -
priv->last_moder_packets));
packets = max(tx_pkt_diff, rx_pkt_diff);
- rx_byte_diff = rx_bytes - priv->last_moder_bytes;
- rx_byte_diff = rx_byte_diff ? rx_byte_diff : 1;
rate = packets * HZ / period;
avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
priv->last_moder_bytes)) / packets : 0;
/* Apply auto-moderation only when packet rate exceeds a rate that
* it matters */
- if (rate > MLX4_EN_RX_RATE_THRESH) {
+ if (rate > MLX4_EN_RX_RATE_THRESH && avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
/* If tx and rx packet rates are not balanced, assume that
* traffic is mainly BW bound and apply maximum moderation.
* Otherwise, moderate according to packet rate */
- if (2 * tx_pkt_diff > 3 * rx_pkt_diff &&
- rx_pkt_diff / rx_byte_diff <
- MLX4_EN_SMALL_PKT_SIZE)
- moder_time = priv->rx_usecs_low;
- else if (2 * rx_pkt_diff > 3 * tx_pkt_diff)
+ if (2 * tx_pkt_diff > 3 * rx_pkt_diff ||
+ 2 * rx_pkt_diff > 3 * tx_pkt_diff) {
moder_time = priv->rx_usecs_high;
- else {
+ } else {
if (rate < priv->pkt_rate_low)
moder_time = priv->rx_usecs_low;
else if (rate > priv->pkt_rate_high)
@@ -471,9 +519,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
priv->rx_usecs_low;
}
} else {
- /* When packet rate is low, use default moderation rather than
- * 0 to prevent interrupt storms if traffic suddenly increases */
- moder_time = priv->rx_usecs;
+ moder_time = priv->rx_usecs_low;
}
en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
@@ -565,6 +611,8 @@ int mlx4_en_start_port(struct net_device *dev)
int err = 0;
int i;
int j;
+ u8 mc_list[16] = {0};
+ char name[32];
if (priv->port_up) {
en_dbg(DRV, priv, "start port called while port already up\n");
@@ -603,16 +651,35 @@ int mlx4_en_start_port(struct net_device *dev)
++rx_index;
}
+ /* Set port mac number */
+ en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
+ err = mlx4_register_mac(mdev->dev, priv->port,
+ priv->mac, &priv->base_qpn, 0);
+ if (err) {
+ en_err(priv, "Failed setting port mac\n");
+ goto cq_err;
+ }
+ mdev->mac_removed[priv->port] = 0;
+
err = mlx4_en_config_rss_steer(priv);
if (err) {
en_err(priv, "Failed configuring rss steering\n");
- goto cq_err;
+ goto mac_err;
}
+ if (mdev->dev->caps.comp_pool && !priv->tx_vector) {
+ sprintf(name , "%s-tx", priv->dev->name);
+ if (mlx4_assign_eq(mdev->dev , name, &priv->tx_vector)) {
+ mlx4_warn(mdev, "Failed Assigning an EQ to "
+ "%s_tx ,Falling back to legacy "
+ "EQ's\n", priv->dev->name);
+ }
+ }
/* Configure tx cq's and rings */
for (i = 0; i < priv->tx_ring_num; i++) {
/* Configure cq */
cq = &priv->tx_cq[i];
+ cq->vector = priv->tx_vector;
err = mlx4_en_activate_cq(priv, cq);
if (err) {
en_err(priv, "Failed allocating Tx CQ\n");
@@ -659,24 +726,22 @@ int mlx4_en_start_port(struct net_device *dev)
en_err(priv, "Failed setting default qp numbers\n");
goto tx_err;
}
- /* Set port mac number */
- en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
- err = mlx4_register_mac(mdev->dev, priv->port,
- priv->mac, &priv->mac_index);
- if (err) {
- en_err(priv, "Failed setting port mac\n");
- goto tx_err;
- }
- mdev->mac_removed[priv->port] = 0;
/* Init port */
en_dbg(HW, priv, "Initializing port\n");
err = mlx4_INIT_PORT(mdev->dev, priv->port);
if (err) {
en_err(priv, "Failed Initializing port\n");
- goto mac_err;
+ goto tx_err;
}
+ /* Attach rx QP to bradcast address */
+ memset(&mc_list[10], 0xff, ETH_ALEN);
+ mc_list[5] = priv->port;
+ if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
+ 0, MLX4_PROT_ETH))
+ mlx4_warn(mdev, "Failed Attaching Broadcast\n");
+
/* Schedule multicast task to populate multicast list */
queue_work(mdev->workqueue, &priv->mcast_task);
@@ -684,8 +749,6 @@ int mlx4_en_start_port(struct net_device *dev)
netif_tx_start_all_queues(dev);
return 0;
-mac_err:
- mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
tx_err:
while (tx_index--) {
mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
@@ -693,6 +756,8 @@ tx_err:
}
mlx4_en_release_rss_steer(priv);
+mac_err:
+ mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
cq_err:
while (rx_index--)
mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
@@ -708,6 +773,7 @@ void mlx4_en_stop_port(struct net_device *dev)
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int i;
+ u8 mc_list[16] = {0};
if (!priv->port_up) {
en_dbg(DRV, priv, "stop port called while port already down\n");
@@ -722,8 +788,23 @@ void mlx4_en_stop_port(struct net_device *dev)
/* Set port as not active */
priv->port_up = false;
+ /* Detach All multicasts */
+ memset(&mc_list[10], 0xff, ETH_ALEN);
+ mc_list[5] = priv->port;
+ mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
+ MLX4_PROT_ETH);
+ for (i = 0; i < priv->mc_addrs_cnt; i++) {
+ memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
+ mc_list[5] = priv->port;
+ mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
+ mc_list, MLX4_PROT_ETH);
+ }
+ mlx4_en_clear_list(dev);
+ /* Flush multicast filter */
+ mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
+
/* Unregister Mac address for the port */
- mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
+ mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
mdev->mac_removed[priv->port] = 1;
/* Free TX Rings */
@@ -801,7 +882,6 @@ static int mlx4_en_open(struct net_device *dev)
priv->rx_ring[i].packets = 0;
}
- mlx4_en_set_default_moderation(priv);
err = mlx4_en_start_port(dev);
if (err)
en_err(priv, "Failed starting port:%d\n", priv->port);
@@ -828,7 +908,7 @@ static int mlx4_en_close(struct net_device *dev)
return 0;
}
-void mlx4_en_free_resources(struct mlx4_en_priv *priv)
+void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors)
{
int i;
@@ -836,14 +916,14 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
if (priv->tx_ring[i].tx_info)
mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
if (priv->tx_cq[i].buf)
- mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
+ mlx4_en_destroy_cq(priv, &priv->tx_cq[i], reserve_vectors);
}
for (i = 0; i < priv->rx_ring_num; i++) {
if (priv->rx_ring[i].rx_info)
mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
if (priv->rx_cq[i].buf)
- mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
+ mlx4_en_destroy_cq(priv, &priv->rx_cq[i], reserve_vectors);
}
}
@@ -851,6 +931,13 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
{
struct mlx4_en_port_profile *prof = priv->prof;
int i;
+ int base_tx_qpn, err;
+
+ err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn);
+ if (err) {
+ en_err(priv, "failed reserving range for TX rings\n");
+ return err;
+ }
/* Create tx Rings */
for (i = 0; i < priv->tx_ring_num; i++) {
@@ -858,7 +945,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
prof->tx_ring_size, i, TX))
goto err;
- if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
+ if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i,
prof->tx_ring_size, TXBB_SIZE))
goto err;
}
@@ -878,6 +965,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
err:
en_err(priv, "Failed to allocate NIC resources\n");
+ mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num);
return -ENOMEM;
}
@@ -905,7 +993,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
mdev->pndev[priv->port] = NULL;
mutex_unlock(&mdev->state_lock);
- mlx4_en_free_resources(priv);
+ mlx4_en_free_resources(priv, false);
free_netdev(dev);
}
@@ -932,7 +1020,6 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
en_dbg(DRV, priv, "Change MTU called with card down!?\n");
} else {
mlx4_en_stop_port(dev);
- mlx4_en_set_default_moderation(priv);
err = mlx4_en_start_port(dev);
if (err) {
en_err(priv, "Failed restarting port:%d\n",
@@ -1079,7 +1166,25 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
+ /* Configure port */
+ err = mlx4_SET_PORT_general(mdev->dev, priv->port,
+ MLX4_EN_MIN_MTU,
+ 0, 0, 0, 0);
+ if (err) {
+ en_err(priv, "Failed setting port general configurations "
+ "for port %d, with error %d\n", priv->port, err);
+ goto out;
+ }
+
+ /* Init port */
+ en_warn(priv, "Initializing port\n");
+ err = mlx4_INIT_PORT(mdev->dev, priv->port);
+ if (err) {
+ en_err(priv, "Failed Initializing port\n");
+ goto out;
+ }
priv->registered = 1;
+ mlx4_en_set_default_moderation(priv);
queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
return 0;
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
index 7f5a3221e0c1..f2a4f5dd313d 100644
--- a/drivers/net/mlx4/en_port.c
+++ b/drivers/net/mlx4/en_port.c
@@ -119,6 +119,10 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
struct mlx4_set_port_rqp_calc_context *context;
int err;
u32 in_mod;
+ u32 m_promisc = (dev->caps.vep_mc_steering) ? MCAST_DIRECT : MCAST_DEFAULT;
+
+ if (dev->caps.vep_mc_steering && dev->caps.vep_uc_steering)
+ return 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@@ -127,8 +131,11 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
memset(context, 0, sizeof *context);
context->base_qpn = cpu_to_be32(base_qpn);
- context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_EN_SHIFT | base_qpn);
- context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_MODE_SHIFT | base_qpn);
+ context->n_mac = 0x7;
+ context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
+ base_qpn);
+ context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
+ base_qpn);
context->intra_no_vlan = 0;
context->no_vlan = MLX4_NO_VLAN_IDX;
context->intra_vlan_miss = 0;
@@ -206,7 +213,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
}
stats->tx_packets = 0;
stats->tx_bytes = 0;
- for (i = 0; i <= priv->tx_ring_num; i++) {
+ for (i = 0; i < priv->tx_ring_num; i++) {
stats->tx_packets += priv->tx_ring[i].packets;
stats->tx_bytes += priv->tx_ring[i].bytes;
}
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h
index 092e814b1981..e3d73e41c567 100644
--- a/drivers/net/mlx4/en_port.h
+++ b/drivers/net/mlx4/en_port.h
@@ -36,8 +36,8 @@
#define SET_PORT_GEN_ALL_VALID 0x7
-#define SET_PORT_PROMISC_EN_SHIFT 31
-#define SET_PORT_PROMISC_MODE_SHIFT 30
+#define SET_PORT_PROMISC_SHIFT 31
+#define SET_PORT_MC_PROMISC_SHIFT 30
enum {
MLX4_CMD_SET_VLAN_FLTR = 0x47,
@@ -45,6 +45,12 @@ enum {
MLX4_CMD_DUMP_ETH_STATS = 0x49,
};
+enum {
+ MCAST_DIRECT_ONLY = 0,
+ MCAST_DIRECT = 1,
+ MCAST_DEFAULT = 2
+};
+
struct mlx4_set_port_general_context {
u8 reserved[3];
u8 flags;
@@ -60,14 +66,17 @@ struct mlx4_set_port_general_context {
struct mlx4_set_port_rqp_calc_context {
__be32 base_qpn;
- __be32 flags;
- u8 reserved[3];
+ u8 rererved;
+ u8 n_mac;
+ u8 n_vlan;
+ u8 n_prio;
+ u8 reserved2[3];
u8 mac_miss;
u8 intra_no_vlan;
u8 no_vlan;
u8 intra_vlan_miss;
u8 vlan_miss;
- u8 reserved2[3];
+ u8 reserved3[3];
u8 no_vlan_prio;
__be32 promisc;
__be32 mcast;
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 570f2508fb30..05998ee297c9 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -845,16 +845,10 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
}
/* Configure RSS indirection qp */
- err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn);
- if (err) {
- en_err(priv, "Failed to reserve range for RSS "
- "indirection qp\n");
- goto rss_err;
- }
err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
if (err) {
en_err(priv, "Failed to allocate RSS indirection QP\n");
- goto reserve_err;
+ goto rss_err;
}
rss_map->indir_qp.event = mlx4_en_sqp_event;
mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
@@ -881,8 +875,6 @@ indir_err:
MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
-reserve_err:
- mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
rss_err:
for (i = 0; i < good_qps; i++) {
mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
@@ -904,7 +896,6 @@ void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
- mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
for (i = 0; i < priv->rx_ring_num; i++) {
mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index a680cd4a5ab6..01feb8fd42ad 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -44,6 +44,7 @@
enum {
MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
+ MAX_BF = 256,
};
static int inline_thold __read_mostly = MAX_INLINE;
@@ -52,7 +53,7 @@ module_param_named(inline_thold, inline_thold, int, 0444);
MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_tx_ring *ring, u32 size,
+ struct mlx4_en_tx_ring *ring, int qpn, u32 size,
u16 stride)
{
struct mlx4_en_dev *mdev = priv->mdev;
@@ -103,23 +104,25 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
"buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
- err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn);
- if (err) {
- en_err(priv, "Failed reserving qp for tx ring.\n");
- goto err_map;
- }
-
+ ring->qpn = qpn;
err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
if (err) {
en_err(priv, "Failed allocating qp %d\n", ring->qpn);
- goto err_reserve;
+ goto err_map;
}
ring->qp.event = mlx4_en_sqp_event;
+ err = mlx4_bf_alloc(mdev->dev, &ring->bf);
+ if (err) {
+ en_dbg(DRV, priv, "working without blueflame (%d)", err);
+ ring->bf.uar = &mdev->priv_uar;
+ ring->bf.uar->map = mdev->uar_map;
+ ring->bf_enabled = false;
+ } else
+ ring->bf_enabled = true;
+
return 0;
-err_reserve:
- mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
err_map:
mlx4_en_unmap_buffer(&ring->wqres.buf);
err_hwq_res:
@@ -139,6 +142,8 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_dev *mdev = priv->mdev;
en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
+ if (ring->bf_enabled)
+ mlx4_bf_free(mdev->dev, &ring->bf);
mlx4_qp_remove(mdev->dev, &ring->qp);
mlx4_qp_free(mdev->dev, &ring->qp);
mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
@@ -171,6 +176,8 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
ring->cqn, &ring->context);
+ if (ring->bf_enabled)
+ ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
&ring->qp, &ring->qp_state);
@@ -591,6 +598,11 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
return skb_tx_hash(dev, skb);
}
+static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt)
+{
+ __iowrite64_copy(dst, src, bytecnt / 8);
+}
+
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -609,12 +621,13 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
int desc_size;
int real_size;
dma_addr_t dma;
- u32 index;
+ u32 index, bf_index;
__be32 op_own;
u16 vlan_tag = 0;
int i;
int lso_header_size;
void *fragptr;
+ bool bounce = false;
if (!priv->port_up)
goto tx_drop;
@@ -657,13 +670,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
/* Packet is good - grab an index and transmit it */
index = ring->prod & ring->size_mask;
+ bf_index = ring->prod;
/* See if we have enough space for whole descriptor TXBB for setting
* SW ownership on next descriptor; if not, use a bounce buffer. */
if (likely(index + nr_txbb <= ring->size))
tx_desc = ring->buf + index * TXBB_SIZE;
- else
+ else {
tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
+ bounce = true;
+ }
/* Save skb in tx_info ring */
tx_info = &ring->tx_info[index];
@@ -768,21 +784,37 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->prod += nr_txbb;
/* If we used a bounce buffer then copy descriptor back into place */
- if (tx_desc == (struct mlx4_en_tx_desc *) ring->bounce_buf)
+ if (bounce)
tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
/* Run destructor before passing skb to HW */
if (likely(!skb_shared(skb)))
skb_orphan(skb);
- /* Ensure new descirptor hits memory
- * before setting ownership of this descriptor to HW */
- wmb();
- tx_desc->ctrl.owner_opcode = op_own;
+ if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
+ *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn;
+ op_own |= htonl((bf_index & 0xffff) << 8);
+ /* Ensure new descirptor hits memory
+ * before setting ownership of this descriptor to HW */
+ wmb();
+ tx_desc->ctrl.owner_opcode = op_own;
- /* Ring doorbell! */
- wmb();
- writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL);
+ wmb();
+
+ mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl,
+ desc_size);
+
+ wmb();
+
+ ring->bf.offset ^= ring->bf.buf_size;
+ } else {
+ /* Ensure new descirptor hits memory
+ * before setting ownership of this descriptor to HW */
+ wmb();
+ tx_desc->ctrl.owner_opcode = op_own;
+ wmb();
+ writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
+ }
/* Poll CQ here */
mlx4_en_xmit_poll(priv, tx_ind);
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 552d0fce6f67..506cfd0372ec 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -42,7 +42,7 @@
#include "fw.h"
enum {
- MLX4_IRQNAME_SIZE = 64
+ MLX4_IRQNAME_SIZE = 32
};
enum {
@@ -317,8 +317,8 @@ static int mlx4_num_eq_uar(struct mlx4_dev *dev)
* we need to map, take the difference of highest index and
* the lowest index we'll use and add 1.
*/
- return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 -
- dev->caps.reserved_eqs / 4 + 1;
+ return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
+ dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
}
static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
@@ -496,16 +496,32 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
static void mlx4_free_irqs(struct mlx4_dev *dev)
{
struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
- int i;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i, vec;
if (eq_table->have_irq)
free_irq(dev->pdev->irq, dev);
+
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
if (eq_table->eq[i].have_irq) {
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
eq_table->eq[i].have_irq = 0;
}
+ for (i = 0; i < dev->caps.comp_pool; i++) {
+ /*
+ * Freeing the assigned irq's
+ * all bits should be 0, but we need to validate
+ */
+ if (priv->msix_ctl.pool_bm & 1ULL << i) {
+ /* NO need protecting*/
+ vec = dev->caps.num_comp_vectors + 1 + i;
+ free_irq(priv->eq_table.eq[vec].irq,
+ &priv->eq_table.eq[vec]);
+ }
+ }
+
+
kfree(eq_table->irq_names);
}
@@ -578,7 +594,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
(priv->eq_table.inta_pin < 32 ? 4 : 0);
priv->eq_table.irq_names =
- kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1),
+ kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
+ dev->caps.comp_pool),
GFP_KERNEL);
if (!priv->eq_table.irq_names) {
err = -ENOMEM;
@@ -601,6 +618,22 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
if (err)
goto err_out_comp;
+ /*if additional completion vectors poolsize is 0 this loop will not run*/
+ for (i = dev->caps.num_comp_vectors + 1;
+ i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
+
+ err = mlx4_create_eq(dev, dev->caps.num_cqs -
+ dev->caps.reserved_cqs +
+ MLX4_NUM_SPARE_EQE,
+ (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
+ &priv->eq_table.eq[i]);
+ if (err) {
+ --i;
+ goto err_out_unmap;
+ }
+ }
+
+
if (dev->flags & MLX4_FLAG_MSI_X) {
const char *eq_name;
@@ -686,7 +719,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
mlx4_free_irqs(dev);
- for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
+ for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
mlx4_unmap_clr_int(dev);
@@ -743,3 +776,65 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
return err;
}
EXPORT_SYMBOL(mlx4_test_interrupts);
+
+int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
+{
+
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int vec = 0, err = 0, i;
+
+ spin_lock(&priv->msix_ctl.pool_lock);
+ for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
+ if (~priv->msix_ctl.pool_bm & 1ULL << i) {
+ priv->msix_ctl.pool_bm |= 1ULL << i;
+ vec = dev->caps.num_comp_vectors + 1 + i;
+ snprintf(priv->eq_table.irq_names +
+ vec * MLX4_IRQNAME_SIZE,
+ MLX4_IRQNAME_SIZE, "%s", name);
+ err = request_irq(priv->eq_table.eq[vec].irq,
+ mlx4_msi_x_interrupt, 0,
+ &priv->eq_table.irq_names[vec<<5],
+ priv->eq_table.eq + vec);
+ if (err) {
+ /*zero out bit by fliping it*/
+ priv->msix_ctl.pool_bm ^= 1 << i;
+ vec = 0;
+ continue;
+ /*we dont want to break here*/
+ }
+ eq_set_ci(&priv->eq_table.eq[vec], 1);
+ }
+ }
+ spin_unlock(&priv->msix_ctl.pool_lock);
+
+ if (vec) {
+ *vector = vec;
+ } else {
+ *vector = 0;
+ err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
+ }
+ return err;
+}
+EXPORT_SYMBOL(mlx4_assign_eq);
+
+void mlx4_release_eq(struct mlx4_dev *dev, int vec)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ /*bm index*/
+ int i = vec - dev->caps.num_comp_vectors - 1;
+
+ if (likely(i >= 0)) {
+ /*sanity check , making sure were not trying to free irq's
+ Belonging to a legacy EQ*/
+ spin_lock(&priv->msix_ctl.pool_lock);
+ if (priv->msix_ctl.pool_bm & 1ULL << i) {
+ free_irq(priv->eq_table.eq[vec].irq,
+ &priv->eq_table.eq[vec]);
+ priv->msix_ctl.pool_bm &= ~(1ULL << i);
+ }
+ spin_unlock(&priv->msix_ctl.pool_lock);
+ }
+
+}
+EXPORT_SYMBOL(mlx4_release_eq);
+
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 5de1db897835..67a209ba939d 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -274,8 +274,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->stat_rate_support = stat_rate;
MLX4_GET(field, outbox, QUERY_DEV_CAP_UDP_RSS_OFFSET);
dev_cap->udp_rss = field & 0x1;
+ dev_cap->vep_uc_steering = field & 0x2;
+ dev_cap->vep_mc_steering = field & 0x4;
MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET);
dev_cap->loopback_support = field & 0x1;
+ dev_cap->wol = field & 0x40;
MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
dev_cap->reserved_uars = field >> 4;
@@ -737,6 +740,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
+#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
#define INIT_HCA_TPT_OFFSET 0x0f0
#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
@@ -797,6 +801,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+ if (dev->caps.vep_mc_steering)
+ MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET);
MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
/* TPT attributes */
@@ -908,3 +914,22 @@ int mlx4_NOP(struct mlx4_dev *dev)
/* Input modifier of 0x1f means "finish as soon as possible." */
return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
}
+
+#define MLX4_WOL_SETUP_MODE (5 << 28)
+int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
+{
+ u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
+
+ return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
+ MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A);
+}
+EXPORT_SYMBOL_GPL(mlx4_wol_read);
+
+int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
+{
+ u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
+
+ return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
+ MLX4_CMD_TIME_CLASS_A);
+}
+EXPORT_SYMBOL_GPL(mlx4_wol_write);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index 65cc72eb899d..88003ebc6185 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -80,6 +80,9 @@ struct mlx4_dev_cap {
u16 stat_rate_support;
int udp_rss;
int loopback_support;
+ int vep_uc_steering;
+ int vep_mc_steering;
+ int wol;
u32 flags;
int reserved_uars;
int uar_size;
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index c83501122d77..62fa7eec5f0c 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -39,6 +39,7 @@
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
+#include <linux/io-mapping.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
@@ -227,6 +228,9 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
dev->caps.udp_rss = dev_cap->udp_rss;
dev->caps.loopback_support = dev_cap->loopback_support;
+ dev->caps.vep_uc_steering = dev_cap->vep_uc_steering;
+ dev->caps.vep_mc_steering = dev_cap->vep_mc_steering;
+ dev->caps.wol = dev_cap->wol;
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.log_num_macs = log_num_mac;
@@ -718,8 +722,31 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
mlx4_free_icm(dev, priv->fw.aux_icm, 0);
}
+static int map_bf_area(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ resource_size_t bf_start;
+ resource_size_t bf_len;
+ int err = 0;
+
+ bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT);
+ bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT);
+ priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
+ if (!priv->bf_mapping)
+ err = -ENOMEM;
+
+ return err;
+}
+
+static void unmap_bf_area(struct mlx4_dev *dev)
+{
+ if (mlx4_priv(dev)->bf_mapping)
+ io_mapping_free(mlx4_priv(dev)->bf_mapping);
+}
+
static void mlx4_close_hca(struct mlx4_dev *dev)
{
+ unmap_bf_area(dev);
mlx4_CLOSE_HCA(dev, 0);
mlx4_free_icms(dev);
mlx4_UNMAP_FA(dev);
@@ -772,6 +799,9 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
goto err_stop_fw;
}
+ if (map_bf_area(dev))
+ mlx4_dbg(dev, "Failed to map blue flame area\n");
+
init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
@@ -802,6 +832,7 @@ err_free_icm:
mlx4_free_icms(dev);
err_stop_fw:
+ unmap_bf_area(dev);
mlx4_UNMAP_FA(dev);
mlx4_free_icm(dev, priv->fw.fw_icm, 0);
@@ -969,13 +1000,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct msix_entry *entries;
- int nreq;
+ int nreq = min_t(int, dev->caps.num_ports *
+ min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT)
+ + MSIX_LEGACY_SZ, MAX_MSIX);
int err;
int i;
if (msi_x) {
nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
- num_possible_cpus() + 1);
+ nreq);
entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
if (!entries)
goto no_msi;
@@ -998,7 +1031,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
goto no_msi;
}
- dev->caps.num_comp_vectors = nreq - 1;
+ if (nreq <
+ MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
+ /*Working in legacy mode , all EQ's shared*/
+ dev->caps.comp_pool = 0;
+ dev->caps.num_comp_vectors = nreq - 1;
+ } else {
+ dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
+ dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
+ }
for (i = 0; i < nreq; ++i)
priv->eq_table.eq[i].irq = entries[i].vector;
@@ -1010,6 +1051,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
no_msi:
dev->caps.num_comp_vectors = 1;
+ dev->caps.comp_pool = 0;
for (i = 0; i < 2; ++i)
priv->eq_table.eq[i].irq = dev->pdev->irq;
@@ -1049,6 +1091,59 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
device_remove_file(&info->dev->pdev->dev, &info->port_attr);
}
+static int mlx4_init_steering(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int num_entries = dev->caps.num_ports;
+ int i, j;
+
+ priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
+ if (!priv->steer)
+ return -ENOMEM;
+
+ for (i = 0; i < num_entries; i++) {
+ for (j = 0; j < MLX4_NUM_STEERS; j++) {
+ INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
+ INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
+ }
+ INIT_LIST_HEAD(&priv->steer[i].high_prios);
+ }
+ return 0;
+}
+
+static void mlx4_clear_steering(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_steer_index *entry, *tmp_entry;
+ struct mlx4_promisc_qp *pqp, *tmp_pqp;
+ int num_entries = dev->caps.num_ports;
+ int i, j;
+
+ for (i = 0; i < num_entries; i++) {
+ for (j = 0; j < MLX4_NUM_STEERS; j++) {
+ list_for_each_entry_safe(pqp, tmp_pqp,
+ &priv->steer[i].promisc_qps[j],
+ list) {
+ list_del(&pqp->list);
+ kfree(pqp);
+ }
+ list_for_each_entry_safe(entry, tmp_entry,
+ &priv->steer[i].steer_entries[j],
+ list) {
+ list_del(&entry->list);
+ list_for_each_entry_safe(pqp, tmp_pqp,
+ &entry->duplicates,
+ list) {
+ list_del(&pqp->list);
+ kfree(pqp);
+ }
+ kfree(entry);
+ }
+ }
+ }
+ kfree(priv->steer);
+}
+
static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct mlx4_priv *priv;
@@ -1130,6 +1225,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&priv->pgdir_list);
mutex_init(&priv->pgdir_mutex);
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &dev->rev_id);
+
+ INIT_LIST_HEAD(&priv->bf_list);
+ mutex_init(&priv->bf_mutex);
+
/*
* Now reset the HCA before we touch the PCI capabilities or
* attempt a firmware command, since a boot ROM may have left
@@ -1154,8 +1254,15 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_close;
+ priv->msix_ctl.pool_bm = 0;
+ spin_lock_init(&priv->msix_ctl.pool_lock);
+
mlx4_enable_msi_x(dev);
+ err = mlx4_init_steering(dev);
+ if (err)
+ goto err_free_eq;
+
err = mlx4_setup_hca(dev);
if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
dev->flags &= ~MLX4_FLAG_MSI_X;
@@ -1164,7 +1271,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
if (err)
- goto err_free_eq;
+ goto err_steer;
for (port = 1; port <= dev->caps.num_ports; port++) {
err = mlx4_init_port_info(dev, port);
@@ -1197,6 +1304,9 @@ err_port:
mlx4_cleanup_pd_table(dev);
mlx4_cleanup_uar_table(dev);
+err_steer:
+ mlx4_clear_steering(dev);
+
err_free_eq:
mlx4_free_eq_table(dev);
@@ -1256,6 +1366,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
iounmap(priv->kar);
mlx4_uar_free(dev, &priv->driver_uar);
mlx4_cleanup_uar_table(dev);
+ mlx4_clear_steering(dev);
mlx4_free_eq_table(dev);
mlx4_close_hca(dev);
mlx4_cmd_cleanup(dev);
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index 79cf42db2ea9..e71372aa9cc4 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -32,6 +32,7 @@
*/
#include <linux/string.h>
+#include <linux/etherdevice.h>
#include <linux/mlx4/cmd.h>
@@ -40,38 +41,40 @@
#define MGM_QPN_MASK 0x00FFFFFF
#define MGM_BLCK_LB_BIT 30
-struct mlx4_mgm {
- __be32 next_gid_index;
- __be32 members_count;
- u32 reserved[2];
- u8 gid[16];
- __be32 qp[MLX4_QP_PER_MGM];
-};
-
static const u8 zero_gid[16]; /* automatically initialized to 0 */
-static int mlx4_READ_MCG(struct mlx4_dev *dev, int index,
- struct mlx4_cmd_mailbox *mailbox)
+static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
+ struct mlx4_cmd_mailbox *mailbox)
{
return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
MLX4_CMD_TIME_CLASS_A);
}
-static int mlx4_WRITE_MCG(struct mlx4_dev *dev, int index,
- struct mlx4_cmd_mailbox *mailbox)
+static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
+ struct mlx4_cmd_mailbox *mailbox)
{
return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
MLX4_CMD_TIME_CLASS_A);
}
-static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
- u16 *hash)
+static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer,
+ struct mlx4_cmd_mailbox *mailbox)
+{
+ u32 in_mod;
+
+ in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1;
+ return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
+ MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+ u16 *hash, u8 op_mod)
{
u64 imm;
int err;
- err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, 0, MLX4_CMD_MGID_HASH,
- MLX4_CMD_TIME_CLASS_A);
+ err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
+ MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A);
if (!err)
*hash = imm;
@@ -79,6 +82,457 @@ static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
return err;
}
+static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
+ enum mlx4_steer_type steer,
+ u32 qpn)
+{
+ struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num];
+ struct mlx4_promisc_qp *pqp;
+
+ list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
+ if (pqp->qpn == qpn)
+ return pqp;
+ }
+ /* not found */
+ return NULL;
+}
+
+/*
+ * Add new entry to steering data structure.
+ * All promisc QPs should be added as well
+ */
+static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+ enum mlx4_steer_type steer,
+ unsigned int index, u32 qpn)
+{
+ struct mlx4_steer *s_steer;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_mgm *mgm;
+ u32 members_count;
+ struct mlx4_steer_index *new_entry;
+ struct mlx4_promisc_qp *pqp;
+ struct mlx4_promisc_qp *dqp;
+ u32 prot;
+ int err;
+ u8 pf_num;
+
+ pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
+ s_steer = &mlx4_priv(dev)->steer[pf_num];
+ new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
+ if (!new_entry)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&new_entry->duplicates);
+ new_entry->index = index;
+ list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
+
+ /* If the given qpn is also a promisc qp,
+ * it should be inserted to duplicates list
+ */
+ pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+ if (pqp) {
+ dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
+ if (!dqp) {
+ err = -ENOMEM;
+ goto out_alloc;
+ }
+ dqp->qpn = qpn;
+ list_add_tail(&dqp->list, &new_entry->duplicates);
+ }
+
+ /* if no promisc qps for this vep, we are done */
+ if (list_empty(&s_steer->promisc_qps[steer]))
+ return 0;
+
+ /* now need to add all the promisc qps to the new
+ * steering entry, as they should also receive the packets
+ * destined to this address */
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ err = -ENOMEM;
+ goto out_alloc;
+ }
+ mgm = mailbox->buf;
+
+ err = mlx4_READ_ENTRY(dev, index, mailbox);
+ if (err)
+ goto out_mailbox;
+
+ members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
+ prot = be32_to_cpu(mgm->members_count) >> 30;
+ list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
+ /* don't add already existing qpn */
+ if (pqp->qpn == qpn)
+ continue;
+ if (members_count == MLX4_QP_PER_MGM) {
+ /* out of space */
+ err = -ENOMEM;
+ goto out_mailbox;
+ }
+
+ /* add the qpn */
+ mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
+ }
+ /* update the qps count and update the entry with all the promisc qps*/
+ mgm->members_count = cpu_to_be32(members_count | (prot << 30));
+ err = mlx4_WRITE_ENTRY(dev, index, mailbox);
+
+out_mailbox:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ if (!err)
+ return 0;
+out_alloc:
+ if (dqp) {
+ list_del(&dqp->list);
+ kfree(&dqp);
+ }
+ list_del(&new_entry->list);
+ kfree(new_entry);
+ return err;
+}
+
+/* update the data structures with existing steering entry */
+static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+ enum mlx4_steer_type steer,
+ unsigned int index, u32 qpn)
+{
+ struct mlx4_steer *s_steer;
+ struct mlx4_steer_index *tmp_entry, *entry = NULL;
+ struct mlx4_promisc_qp *pqp;
+ struct mlx4_promisc_qp *dqp;
+ u8 pf_num;
+
+ pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
+ s_steer = &mlx4_priv(dev)->steer[pf_num];
+
+ pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+ if (!pqp)
+ return 0; /* nothing to do */
+
+ list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
+ if (tmp_entry->index == index) {
+ entry = tmp_entry;
+ break;
+ }
+ }
+ if (unlikely(!entry)) {
+ mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
+ return -EINVAL;
+ }
+
+ /* the given qpn is listed as a promisc qpn
+ * we need to add it as a duplicate to this entry
+ * for future refernce */
+ list_for_each_entry(dqp, &entry->duplicates, list) {
+ if (qpn == dqp->qpn)
+ return 0; /* qp is already duplicated */
+ }
+
+ /* add the qp as a duplicate on this index */
+ dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
+ if (!dqp)
+ return -ENOMEM;
+ dqp->qpn = qpn;
+ list_add_tail(&dqp->list, &entry->duplicates);
+
+ return 0;
+}
+
+/* Check whether a qpn is a duplicate on steering entry
+ * If so, it should not be removed from mgm */
+static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+ enum mlx4_steer_type steer,
+ unsigned int index, u32 qpn)
+{
+ struct mlx4_steer *s_steer;
+ struct mlx4_steer_index *tmp_entry, *entry = NULL;
+ struct mlx4_promisc_qp *dqp, *tmp_dqp;
+ u8 pf_num;
+
+ pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
+ s_steer = &mlx4_priv(dev)->steer[pf_num];
+
+ /* if qp is not promisc, it cannot be duplicated */
+ if (!get_promisc_qp(dev, pf_num, steer, qpn))
+ return false;
+
+ /* The qp is promisc qp so it is a duplicate on this index
+ * Find the index entry, and remove the duplicate */
+ list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
+ if (tmp_entry->index == index) {
+ entry = tmp_entry;
+ break;
+ }
+ }
+ if (unlikely(!entry)) {
+ mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
+ return false;
+ }
+ list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
+ if (dqp->qpn == qpn) {
+ list_del(&dqp->list);
+ kfree(dqp);
+ }
+ }
+ return true;
+}
+
+/* I a steering entry contains only promisc QPs, it can be removed. */
+static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+ enum mlx4_steer_type steer,
+ unsigned int index, u32 tqpn)
+{
+ struct mlx4_steer *s_steer;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_mgm *mgm;
+ struct mlx4_steer_index *entry = NULL, *tmp_entry;
+ u32 qpn;
+ u32 members_count;
+ bool ret = false;
+ int i;
+ u8 pf_num;
+
+ pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
+ s_steer = &mlx4_priv(dev)->steer[pf_num];
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return false;
+ mgm = mailbox->buf;
+
+ if (mlx4_READ_ENTRY(dev, index, mailbox))
+ goto out;
+ members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
+ for (i = 0; i < members_count; i++) {
+ qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
+ if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) {
+ /* the qp is not promisc, the entry can't be removed */
+ goto out;
+ }
+ }
+ /* All the qps currently registered for this entry are promiscuous,
+ * Checking for duplicates */
+ ret = true;
+ list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
+ if (entry->index == index) {
+ if (list_empty(&entry->duplicates)) {
+ list_del(&entry->list);
+ kfree(entry);
+ } else {
+ /* This entry contains duplicates so it shouldn't be removed */
+ ret = false;
+ goto out;
+ }
+ }
+ }
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return ret;
+}
+
+static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
+ enum mlx4_steer_type steer, u32 qpn)
+{
+ struct mlx4_steer *s_steer;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_mgm *mgm;
+ struct mlx4_steer_index *entry;
+ struct mlx4_promisc_qp *pqp;
+ struct mlx4_promisc_qp *dqp;
+ u32 members_count;
+ u32 prot;
+ int i;
+ bool found;
+ int last_index;
+ int err;
+ u8 pf_num;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
+ s_steer = &mlx4_priv(dev)->steer[pf_num];
+
+ mutex_lock(&priv->mcg_table.mutex);
+
+ if (get_promisc_qp(dev, pf_num, steer, qpn)) {
+ err = 0; /* Noting to do, already exists */
+ goto out_mutex;
+ }
+
+ pqp = kmalloc(sizeof *pqp, GFP_KERNEL);
+ if (!pqp) {
+ err = -ENOMEM;
+ goto out_mutex;
+ }
+ pqp->qpn = qpn;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ err = -ENOMEM;
+ goto out_alloc;
+ }
+ mgm = mailbox->buf;
+
+ /* the promisc qp needs to be added for each one of the steering
+ * entries, if it already exists, needs to be added as a duplicate
+ * for this entry */
+ list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
+ err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
+ if (err)
+ goto out_mailbox;
+
+ members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
+ prot = be32_to_cpu(mgm->members_count) >> 30;
+ found = false;
+ for (i = 0; i < members_count; i++) {
+ if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
+ /* Entry already exists, add to duplicates */
+ dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
+ if (!dqp)
+ goto out_mailbox;
+ dqp->qpn = qpn;
+ list_add_tail(&dqp->list, &entry->duplicates);
+ found = true;
+ }
+ }
+ if (!found) {
+ /* Need to add the qpn to mgm */
+ if (members_count == MLX4_QP_PER_MGM) {
+ /* entry is full */
+ err = -ENOMEM;
+ goto out_mailbox;
+ }
+ mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
+ mgm->members_count = cpu_to_be32(members_count | (prot << 30));
+ err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
+ if (err)
+ goto out_mailbox;
+ }
+ last_index = entry->index;
+ }
+
+ /* add the new qpn to list of promisc qps */
+ list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
+ /* now need to add all the promisc qps to default entry */
+ memset(mgm, 0, sizeof *mgm);
+ members_count = 0;
+ list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
+ mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
+ mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
+
+ err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
+ if (err)
+ goto out_list;
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ mutex_unlock(&priv->mcg_table.mutex);
+ return 0;
+
+out_list:
+ list_del(&pqp->list);
+out_mailbox:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+out_alloc:
+ kfree(pqp);
+out_mutex:
+ mutex_unlock(&priv->mcg_table.mutex);
+ return err;
+}
+
+static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
+ enum mlx4_steer_type steer, u32 qpn)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_steer *s_steer;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_mgm *mgm;
+ struct mlx4_steer_index *entry;
+ struct mlx4_promisc_qp *pqp;
+ struct mlx4_promisc_qp *dqp;
+ u32 members_count;
+ bool found;
+ bool back_to_list = false;
+ int loc, i;
+ int err;
+ u8 pf_num;
+
+ pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
+ s_steer = &mlx4_priv(dev)->steer[pf_num];
+ mutex_lock(&priv->mcg_table.mutex);
+
+ pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+ if (unlikely(!pqp)) {
+ mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
+ /* nothing to do */
+ err = 0;
+ goto out_mutex;
+ }
+
+ /*remove from list of promisc qps */
+ list_del(&pqp->list);
+ kfree(pqp);
+
+ /* set the default entry not to include the removed one */
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ err = -ENOMEM;
+ back_to_list = true;
+ goto out_list;
+ }
+ mgm = mailbox->buf;
+ members_count = 0;
+ list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
+ mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
+ mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
+
+ err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
+ if (err)
+ goto out_mailbox;
+
+ /* remove the qp from all the steering entries*/
+ list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
+ found = false;
+ list_for_each_entry(dqp, &entry->duplicates, list) {
+ if (dqp->qpn == qpn) {
+ found = true;
+ break;
+ }
+ }
+ if (found) {
+ /* a duplicate, no need to change the mgm,
+ * only update the duplicates list */
+ list_del(&dqp->list);
+ kfree(dqp);
+ } else {
+ err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
+ if (err)
+ goto out_mailbox;
+ members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
+ for (loc = -1, i = 0; i < members_count; ++i)
+ if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn)
+ loc = i;
+
+ mgm->members_count = cpu_to_be32(--members_count |
+ (MLX4_PROT_ETH << 30));
+ mgm->qp[loc] = mgm->qp[i - 1];
+ mgm->qp[i - 1] = 0;
+
+ err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
+ if (err)
+ goto out_mailbox;
+ }
+
+ }
+
+out_mailbox:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+out_list:
+ if (back_to_list)
+ list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
+out_mutex:
+ mutex_unlock(&priv->mcg_table.mutex);
+ return err;
+}
+
/*
* Caller must hold MCG table semaphore. gid and mgm parameters must
* be properly aligned for command interface.
@@ -94,15 +548,17 @@ static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
* If no AMGM exists for given gid, *index = -1, *prev = index of last
* entry in hash chain and *mgm holds end of hash chain.
*/
-static int find_mgm(struct mlx4_dev *dev,
- u8 *gid, enum mlx4_protocol protocol,
- struct mlx4_cmd_mailbox *mgm_mailbox,
- u16 *hash, int *prev, int *index)
+static int find_entry(struct mlx4_dev *dev, u8 port,
+ u8 *gid, enum mlx4_protocol prot,
+ enum mlx4_steer_type steer,
+ struct mlx4_cmd_mailbox *mgm_mailbox,
+ u16 *hash, int *prev, int *index)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mgm *mgm = mgm_mailbox->buf;
u8 *mgid;
int err;
+ u8 op_mod = (prot == MLX4_PROT_ETH) ? !!(dev->caps.vep_mc_steering) : 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@@ -111,7 +567,7 @@ static int find_mgm(struct mlx4_dev *dev,
memcpy(mgid, gid, 16);
- err = mlx4_MGID_HASH(dev, mailbox, hash);
+ err = mlx4_GID_HASH(dev, mailbox, hash, op_mod);
mlx4_free_cmd_mailbox(dev, mailbox);
if (err)
return err;
@@ -123,11 +579,11 @@ static int find_mgm(struct mlx4_dev *dev,
*prev = -1;
do {
- err = mlx4_READ_MCG(dev, *index, mgm_mailbox);
+ err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
if (err)
return err;
- if (!memcmp(mgm->gid, zero_gid, 16)) {
+ if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
if (*index != *hash) {
mlx4_err(dev, "Found zero MGID in AMGM.\n");
err = -EINVAL;
@@ -136,7 +592,7 @@ static int find_mgm(struct mlx4_dev *dev,
}
if (!memcmp(mgm->gid, gid, 16) &&
- be32_to_cpu(mgm->members_count) >> 30 == protocol)
+ be32_to_cpu(mgm->members_count) >> 30 == prot)
return err;
*prev = *index;
@@ -147,8 +603,9 @@ static int find_mgm(struct mlx4_dev *dev,
return err;
}
-int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
- int block_mcast_loopback, enum mlx4_protocol protocol)
+int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ int block_mcast_loopback, enum mlx4_protocol prot,
+ enum mlx4_steer_type steer)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
@@ -159,6 +616,8 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int link = 0;
int i;
int err;
+ u8 port = gid[5];
+ u8 new_entry = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@@ -166,14 +625,16 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
mgm = mailbox->buf;
mutex_lock(&priv->mcg_table.mutex);
-
- err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index);
+ err = find_entry(dev, port, gid, prot, steer,
+ mailbox, &hash, &prev, &index);
if (err)
goto out;
if (index != -1) {
- if (!memcmp(mgm->gid, zero_gid, 16))
+ if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
+ new_entry = 1;
memcpy(mgm->gid, gid, 16);
+ }
} else {
link = 1;
@@ -209,26 +670,34 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
else
mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
- mgm->members_count = cpu_to_be32(members_count | (u32) protocol << 30);
+ mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
- err = mlx4_WRITE_MCG(dev, index, mailbox);
+ err = mlx4_WRITE_ENTRY(dev, index, mailbox);
if (err)
goto out;
if (!link)
goto out;
- err = mlx4_READ_MCG(dev, prev, mailbox);
+ err = mlx4_READ_ENTRY(dev, prev, mailbox);
if (err)
goto out;
mgm->next_gid_index = cpu_to_be32(index << 6);
- err = mlx4_WRITE_MCG(dev, prev, mailbox);
+ err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
if (err)
goto out;
out:
+ if (prot == MLX4_PROT_ETH) {
+ /* manage the steering entry for promisc mode */
+ if (new_entry)
+ new_steering_entry(dev, 0, port, steer, index, qp->qpn);
+ else
+ existing_steering_entry(dev, 0, port, steer,
+ index, qp->qpn);
+ }
if (err && link && index != -1) {
if (index < dev->caps.num_mgms)
mlx4_warn(dev, "Got AMGM index %d < %d",
@@ -242,10 +711,9 @@ out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
-EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
-int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
- enum mlx4_protocol protocol)
+int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ enum mlx4_protocol prot, enum mlx4_steer_type steer)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
@@ -255,6 +723,8 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int prev, index;
int i, loc;
int err;
+ u8 port = gid[5];
+ bool removed_entry = false;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@@ -263,7 +733,8 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
mutex_lock(&priv->mcg_table.mutex);
- err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index);
+ err = find_entry(dev, port, gid, prot, steer,
+ mailbox, &hash, &prev, &index);
if (err)
goto out;
@@ -273,6 +744,11 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
goto out;
}
+ /* if this pq is also a promisc qp, it shouldn't be removed */
+ if (prot == MLX4_PROT_ETH &&
+ check_duplicate_entry(dev, 0, port, steer, index, qp->qpn))
+ goto out;
+
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
for (loc = -1, i = 0; i < members_count; ++i)
if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
@@ -285,26 +761,31 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
}
- mgm->members_count = cpu_to_be32(--members_count | (u32) protocol << 30);
+ mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
mgm->qp[loc] = mgm->qp[i - 1];
mgm->qp[i - 1] = 0;
- if (i != 1) {
- err = mlx4_WRITE_MCG(dev, index, mailbox);
+ if (prot == MLX4_PROT_ETH)
+ removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn);
+ if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
+ err = mlx4_WRITE_ENTRY(dev, index, mailbox);
goto out;
}
+ /* We are going to delete the entry, members count should be 0 */
+ mgm->members_count = cpu_to_be32((u32) prot << 30);
+
if (prev == -1) {
/* Remove entry from MGM */
int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
if (amgm_index) {
- err = mlx4_READ_MCG(dev, amgm_index, mailbox);
+ err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
if (err)
goto out;
} else
memset(mgm->gid, 0, 16);
- err = mlx4_WRITE_MCG(dev, index, mailbox);
+ err = mlx4_WRITE_ENTRY(dev, index, mailbox);
if (err)
goto out;
@@ -319,13 +800,13 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
} else {
/* Remove entry from AMGM */
int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
- err = mlx4_READ_MCG(dev, prev, mailbox);
+ err = mlx4_READ_ENTRY(dev, prev, mailbox);
if (err)
goto out;
mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
- err = mlx4_WRITE_MCG(dev, prev, mailbox);
+ err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
if (err)
goto out;
@@ -343,8 +824,85 @@ out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
+
+
+int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ int block_mcast_loopback, enum mlx4_protocol prot)
+{
+ enum mlx4_steer_type steer;
+
+ steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
+
+ if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering)
+ return 0;
+
+ if (prot == MLX4_PROT_ETH)
+ gid[7] |= (steer << 1);
+
+ return mlx4_qp_attach_common(dev, qp, gid,
+ block_mcast_loopback, prot,
+ steer);
+}
+EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
+
+int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ enum mlx4_protocol prot)
+{
+ enum mlx4_steer_type steer;
+
+ steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
+
+ if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering)
+ return 0;
+
+ if (prot == MLX4_PROT_ETH) {
+ gid[7] |= (steer << 1);
+ }
+
+ return mlx4_qp_detach_common(dev, qp, gid, prot, steer);
+}
EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
+
+int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
+{
+ if (!dev->caps.vep_mc_steering)
+ return 0;
+
+
+ return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
+}
+EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
+
+int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
+{
+ if (!dev->caps.vep_mc_steering)
+ return 0;
+
+
+ return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
+}
+EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
+
+int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
+{
+ if (!dev->caps.vep_mc_steering)
+ return 0;
+
+
+ return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
+}
+EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
+
+int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
+{
+ if (!dev->caps.vep_mc_steering)
+ return 0;
+
+ return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
+}
+EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
+
int mlx4_init_mcg_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 0da5bb7285b4..c1e0e5f1bcdb 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -105,6 +105,7 @@ struct mlx4_bitmap {
u32 max;
u32 reserved_top;
u32 mask;
+ u32 avail;
spinlock_t lock;
unsigned long *table;
};
@@ -162,6 +163,27 @@ struct mlx4_fw {
u8 catas_bar;
};
+#define MGM_QPN_MASK 0x00FFFFFF
+#define MGM_BLCK_LB_BIT 30
+
+struct mlx4_promisc_qp {
+ struct list_head list;
+ u32 qpn;
+};
+
+struct mlx4_steer_index {
+ struct list_head list;
+ unsigned int index;
+ struct list_head duplicates;
+};
+
+struct mlx4_mgm {
+ __be32 next_gid_index;
+ __be32 members_count;
+ u32 reserved[2];
+ u8 gid[16];
+ __be32 qp[MLX4_QP_PER_MGM];
+};
struct mlx4_cmd {
struct pci_pool *pool;
void __iomem *hcr;
@@ -265,6 +287,10 @@ struct mlx4_vlan_table {
int max;
};
+struct mlx4_mac_entry {
+ u64 mac;
+};
+
struct mlx4_port_info {
struct mlx4_dev *dev;
int port;
@@ -272,7 +298,9 @@ struct mlx4_port_info {
struct device_attribute port_attr;
enum mlx4_port_type tmp_type;
struct mlx4_mac_table mac_table;
+ struct radix_tree_root mac_tree;
struct mlx4_vlan_table vlan_table;
+ int base_qpn;
};
struct mlx4_sense {
@@ -282,6 +310,17 @@ struct mlx4_sense {
struct delayed_work sense_poll;
};
+struct mlx4_msix_ctl {
+ u64 pool_bm;
+ spinlock_t pool_lock;
+};
+
+struct mlx4_steer {
+ struct list_head promisc_qps[MLX4_NUM_STEERS];
+ struct list_head steer_entries[MLX4_NUM_STEERS];
+ struct list_head high_prios;
+};
+
struct mlx4_priv {
struct mlx4_dev dev;
@@ -313,6 +352,11 @@ struct mlx4_priv {
struct mlx4_port_info port[MLX4_MAX_PORTS + 1];
struct mlx4_sense sense;
struct mutex port_mutex;
+ struct mlx4_msix_ctl msix_ctl;
+ struct mlx4_steer *steer;
+ struct list_head bf_list;
+ struct mutex bf_mutex;
+ struct io_mapping *bf_mapping;
};
static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -328,6 +372,7 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
+u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap);
int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
u32 reserved_bot, u32 resetrved_top);
void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
@@ -403,4 +448,9 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
+int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ enum mlx4_protocol prot, enum mlx4_steer_type steer);
+int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ int block_mcast_loopback, enum mlx4_protocol prot,
+ enum mlx4_steer_type steer);
#endif /* MLX4_H */
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index dfed6a07c2d7..e30f6099c0de 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -49,8 +49,8 @@
#include "en_port.h"
#define DRV_NAME "mlx4_en"
-#define DRV_VERSION "1.5.1.6"
-#define DRV_RELDATE "August 2010"
+#define DRV_VERSION "1.5.4.1"
+#define DRV_RELDATE "March 2011"
#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
@@ -62,6 +62,7 @@
#define MLX4_EN_PAGE_SHIFT 12
#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
#define MAX_RX_RINGS 16
+#define MIN_RX_RINGS 4
#define TXBB_SIZE 64
#define HEADROOM (2048 / TXBB_SIZE + 1)
#define STAMP_STRIDE 64
@@ -124,6 +125,7 @@ enum {
#define MLX4_EN_RX_SIZE_THRESH 1024
#define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
#define MLX4_EN_SAMPLE_INTERVAL 0
+#define MLX4_EN_AVG_PKT_SMALL 256
#define MLX4_EN_AUTO_CONF 0xffff
@@ -214,6 +216,9 @@ struct mlx4_en_tx_desc {
#define MLX4_EN_USE_SRQ 0x01000000
+#define MLX4_EN_CX3_LOW_ID 0x1000
+#define MLX4_EN_CX3_HIGH_ID 0x1005
+
struct mlx4_en_rx_alloc {
struct page *page;
u16 offset;
@@ -243,6 +248,8 @@ struct mlx4_en_tx_ring {
unsigned long bytes;
unsigned long packets;
spinlock_t comp_lock;
+ struct mlx4_bf bf;
+ bool bf_enabled;
};
struct mlx4_en_rx_desc {
@@ -453,6 +460,7 @@ struct mlx4_en_priv {
struct mlx4_en_rss_map rss_map;
u32 flags;
#define MLX4_EN_FLAG_PROMISC 0x1
+#define MLX4_EN_FLAG_MC_PROMISC 0x2
u32 tx_ring_num;
u32 rx_ring_num;
u32 rx_skb_size;
@@ -461,6 +469,7 @@ struct mlx4_en_priv {
u16 log_rx_info;
struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS];
+ int tx_vector;
struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
struct mlx4_en_cq tx_cq[MAX_TX_RINGS];
struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
@@ -476,6 +485,13 @@ struct mlx4_en_priv {
int mc_addrs_cnt;
struct mlx4_en_stat_out_mbox hw_stats;
int vids[128];
+ bool wol;
+};
+
+enum mlx4_en_wol {
+ MLX4_EN_WOL_MAGIC = (1ULL << 61),
+ MLX4_EN_WOL_ENABLED = (1ULL << 62),
+ MLX4_EN_WOL_DO_MODIFY = (1ULL << 63),
};
@@ -486,12 +502,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
int mlx4_en_start_port(struct net_device *dev);
void mlx4_en_stop_port(struct net_device *dev);
-void mlx4_en_free_resources(struct mlx4_en_priv *priv);
+void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors);
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
int entries, int ring, enum cq_type mode);
-void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
+ bool reserve_vectors);
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
@@ -503,7 +520,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
- u32 size, u16 stride);
+ int qpn, u32 size, u16 stride);
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
index c4988d6bd5b2..1286b886dcea 100644
--- a/drivers/net/mlx4/pd.c
+++ b/drivers/net/mlx4/pd.c
@@ -32,12 +32,17 @@
*/
#include <linux/errno.h>
+#include <linux/io-mapping.h>
#include <asm/page.h>
#include "mlx4.h"
#include "icm.h"
+enum {
+ MLX4_NUM_RESERVED_UARS = 8
+};
+
int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -77,6 +82,7 @@ int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
return -ENOMEM;
uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
+ uar->map = NULL;
return 0;
}
@@ -88,6 +94,102 @@ void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
}
EXPORT_SYMBOL_GPL(mlx4_uar_free);
+int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_uar *uar;
+ int err = 0;
+ int idx;
+
+ if (!priv->bf_mapping)
+ return -ENOMEM;
+
+ mutex_lock(&priv->bf_mutex);
+ if (!list_empty(&priv->bf_list))
+ uar = list_entry(priv->bf_list.next, struct mlx4_uar, bf_list);
+ else {
+ if (mlx4_bitmap_avail(&priv->uar_table.bitmap) < MLX4_NUM_RESERVED_UARS) {
+ err = -ENOMEM;
+ goto out;
+ }
+ uar = kmalloc(sizeof *uar, GFP_KERNEL);
+ if (!uar) {
+ err = -ENOMEM;
+ goto out;
+ }
+ err = mlx4_uar_alloc(dev, uar);
+ if (err)
+ goto free_kmalloc;
+
+ uar->map = ioremap(uar->pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!uar->map) {
+ err = -ENOMEM;
+ goto free_uar;
+ }
+
+ uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT);
+ if (!uar->bf_map) {
+ err = -ENOMEM;
+ goto unamp_uar;
+ }
+ uar->free_bf_bmap = 0;
+ list_add(&uar->bf_list, &priv->bf_list);
+ }
+
+ bf->uar = uar;
+ idx = ffz(uar->free_bf_bmap);
+ uar->free_bf_bmap |= 1 << idx;
+ bf->uar = uar;
+ bf->offset = 0;
+ bf->buf_size = dev->caps.bf_reg_size / 2;
+ bf->reg = uar->bf_map + idx * dev->caps.bf_reg_size;
+ if (uar->free_bf_bmap == (1 << dev->caps.bf_regs_per_page) - 1)
+ list_del_init(&uar->bf_list);
+
+ goto out;
+
+unamp_uar:
+ bf->uar = NULL;
+ iounmap(uar->map);
+
+free_uar:
+ mlx4_uar_free(dev, uar);
+
+free_kmalloc:
+ kfree(uar);
+
+out:
+ mutex_unlock(&priv->bf_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_bf_alloc);
+
+void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int idx;
+
+ if (!bf->uar || !bf->uar->bf_map)
+ return;
+
+ mutex_lock(&priv->bf_mutex);
+ idx = (bf->reg - bf->uar->bf_map) / dev->caps.bf_reg_size;
+ bf->uar->free_bf_bmap &= ~(1 << idx);
+ if (!bf->uar->free_bf_bmap) {
+ if (!list_empty(&bf->uar->bf_list))
+ list_del(&bf->uar->bf_list);
+
+ io_mapping_unmap(bf->uar->bf_map);
+ iounmap(bf->uar->map);
+ mlx4_uar_free(dev, bf->uar);
+ kfree(bf->uar);
+ } else if (list_empty(&bf->uar->bf_list))
+ list_add(&bf->uar->bf_list, &priv->bf_list);
+
+ mutex_unlock(&priv->bf_mutex);
+}
+EXPORT_SYMBOL_GPL(mlx4_bf_free);
+
int mlx4_init_uar_table(struct mlx4_dev *dev)
{
if (dev->caps.num_uars <= 128) {
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
index 451339559bdc..eca7d8596f87 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/mlx4/port.c
@@ -90,12 +90,79 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
return err;
}
-int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
+static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
+ u64 mac, int *qpn, u8 reserve)
{
- struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;
+ struct mlx4_qp qp;
+ u8 gid[16] = {0};
+ int err;
+
+ if (reserve) {
+ err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
+ if (err) {
+ mlx4_err(dev, "Failed to reserve qp for mac registration\n");
+ return err;
+ }
+ }
+ qp.qpn = *qpn;
+
+ mac &= 0xffffffffffffULL;
+ mac = cpu_to_be64(mac << 16);
+ memcpy(&gid[10], &mac, ETH_ALEN);
+ gid[5] = port;
+ gid[7] = MLX4_UC_STEER << 1;
+
+ err = mlx4_qp_attach_common(dev, &qp, gid, 0,
+ MLX4_PROT_ETH, MLX4_UC_STEER);
+ if (err && reserve)
+ mlx4_qp_release_range(dev, *qpn, 1);
+
+ return err;
+}
+
+static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
+ u64 mac, int qpn, u8 free)
+{
+ struct mlx4_qp qp;
+ u8 gid[16] = {0};
+
+ qp.qpn = qpn;
+ mac &= 0xffffffffffffULL;
+ mac = cpu_to_be64(mac << 16);
+ memcpy(&gid[10], &mac, ETH_ALEN);
+ gid[5] = port;
+ gid[7] = MLX4_UC_STEER << 1;
+
+ mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER);
+ if (free)
+ mlx4_qp_release_range(dev, qpn, 1);
+}
+
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
+{
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+ struct mlx4_mac_table *table = &info->mac_table;
+ struct mlx4_mac_entry *entry;
int i, err = 0;
int free = -1;
+ if (dev->caps.vep_uc_steering) {
+ err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
+ if (!err) {
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry) {
+ mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
+ return -ENOMEM;
+ }
+ entry->mac = mac;
+ err = radix_tree_insert(&info->mac_tree, *qpn, entry);
+ if (err) {
+ mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
+ return err;
+ }
+ } else
+ return err;
+ }
mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
mutex_lock(&table->mutex);
for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
@@ -106,7 +173,6 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
/* MAC already registered, increase refernce count */
- *index = i;
++table->refs[i];
goto out;
}
@@ -137,7 +203,8 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
goto out;
}
- *index = free;
+ if (!dev->caps.vep_uc_steering)
+ *qpn = info->base_qpn + free;
++table->total;
out:
mutex_unlock(&table->mutex);
@@ -145,20 +212,52 @@ out:
}
EXPORT_SYMBOL_GPL(mlx4_register_mac);
-void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index)
+static int validate_index(struct mlx4_dev *dev,
+ struct mlx4_mac_table *table, int index)
{
- struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;
+ int err = 0;
- mutex_lock(&table->mutex);
- if (!table->refs[index]) {
- mlx4_warn(dev, "No MAC entry for index %d\n", index);
- goto out;
+ if (index < 0 || index >= table->max || !table->entries[index]) {
+ mlx4_warn(dev, "No valid Mac entry for the given index\n");
+ err = -EINVAL;
}
- if (--table->refs[index]) {
- mlx4_warn(dev, "Have more references for index %d,"
- "no need to modify MAC table\n", index);
- goto out;
+ return err;
+}
+
+static int find_index(struct mlx4_dev *dev,
+ struct mlx4_mac_table *table, u64 mac)
+{
+ int i;
+ for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+ if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
+ return i;
}
+ /* Mac not found */
+ return -EINVAL;
+}
+
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
+{
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+ struct mlx4_mac_table *table = &info->mac_table;
+ int index = qpn - info->base_qpn;
+ struct mlx4_mac_entry *entry;
+
+ if (dev->caps.vep_uc_steering) {
+ entry = radix_tree_lookup(&info->mac_tree, qpn);
+ if (entry) {
+ mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
+ radix_tree_delete(&info->mac_tree, qpn);
+ index = find_index(dev, table, entry->mac);
+ kfree(entry);
+ }
+ }
+
+ mutex_lock(&table->mutex);
+
+ if (validate_index(dev, table, index))
+ goto out;
+
table->entries[index] = 0;
mlx4_set_port_mac_table(dev, port, table->entries);
--table->total;
@@ -167,6 +266,44 @@ out:
}
EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
+int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap)
+{
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+ struct mlx4_mac_table *table = &info->mac_table;
+ int index = qpn - info->base_qpn;
+ struct mlx4_mac_entry *entry;
+ int err;
+
+ if (dev->caps.vep_uc_steering) {
+ entry = radix_tree_lookup(&info->mac_tree, qpn);
+ if (!entry)
+ return -EINVAL;
+ index = find_index(dev, table, entry->mac);
+ mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0);
+ entry->mac = new_mac;
+ err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0);
+ if (err || index < 0)
+ return err;
+ }
+
+ mutex_lock(&table->mutex);
+
+ err = validate_index(dev, table, index);
+ if (err)
+ goto out;
+
+ table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
+
+ err = mlx4_set_port_mac_table(dev, port, table->entries);
+ if (unlikely(err)) {
+ mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac);
+ table->entries[index] = 0;
+ }
+out:
+ mutex_unlock(&table->mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_replace_mac);
static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
__be32 *entries)
{
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index e749f82865fe..b967647d0c76 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -107,9 +107,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
profile[MLX4_RES_AUXC].num = request->num_qp;
profile[MLX4_RES_SRQ].num = request->num_srq;
profile[MLX4_RES_CQ].num = request->num_cq;
- profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs,
- dev_cap->reserved_eqs +
- num_possible_cpus() + 1);
+ profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
profile[MLX4_RES_DMPT].num = request->num_mpt;
profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
profile[MLX4_RES_MTT].num = request->num_mtt;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index a7f2eed9a08a..1f4e8680a96a 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -3645,6 +3645,7 @@ static void myri10ge_free_slices(struct myri10ge_priv *mgp)
dma_free_coherent(&pdev->dev, bytes,
ss->fw_stats, ss->fw_stats_bus);
ss->fw_stats = NULL;
+ netif_napi_del(&ss->napi);
}
}
kfree(mgp->ss);
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 8c66e22c3a0a..50986840c99c 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -2441,7 +2441,7 @@ static struct pci_error_handlers pch_gbe_err_handler = {
.resume = pch_gbe_io_resume
};
-static struct pci_driver pch_gbe_pcidev = {
+static struct pci_driver pch_gbe_driver = {
.name = KBUILD_MODNAME,
.id_table = pch_gbe_pcidev_id,
.probe = pch_gbe_probe,
@@ -2458,7 +2458,7 @@ static int __init pch_gbe_init_module(void)
{
int ret;
- ret = pci_register_driver(&pch_gbe_pcidev);
+ ret = pci_register_driver(&pch_gbe_driver);
if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
if (copybreak == 0) {
pr_info("copybreak disabled\n");
@@ -2472,7 +2472,7 @@ static int __init pch_gbe_init_module(void)
static void __exit pch_gbe_exit_module(void)
{
- pci_unregister_driver(&pch_gbe_pcidev);
+ pci_unregister_driver(&pch_gbe_driver);
}
module_init(pch_gbe_init_module);
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index b8bd936374f2..d890679e4c4d 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1054,6 +1054,7 @@ static int efx_init_io(struct efx_nic *efx)
{
struct pci_dev *pci_dev = efx->pci_dev;
dma_addr_t dma_mask = efx->type->max_dma_mask;
+ bool use_wc;
int rc;
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
@@ -1104,8 +1105,21 @@ static int efx_init_io(struct efx_nic *efx)
rc = -EIO;
goto fail3;
}
- efx->membase = ioremap_wc(efx->membase_phys,
- efx->type->mem_map_size);
+
+ /* bug22643: If SR-IOV is enabled then tx push over a write combined
+ * mapping is unsafe. We need to disable write combining in this case.
+ * MSI is unsupported when SR-IOV is enabled, and the firmware will
+ * have removed the MSI capability. So write combining is safe if
+ * there is an MSI capability.
+ */
+ use_wc = (!EFX_WORKAROUND_22643(efx) ||
+ pci_find_capability(pci_dev, PCI_CAP_ID_MSI));
+ if (use_wc)
+ efx->membase = ioremap_wc(efx->membase_phys,
+ efx->type->mem_map_size);
+ else
+ efx->membase = ioremap_nocache(efx->membase_phys,
+ efx->type->mem_map_size);
if (!efx->membase) {
netif_err(efx, probe, efx->net_dev,
"could not map memory BAR at %llx+%x\n",
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index e4dd3a7f304b..99ff11400cef 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -38,6 +38,8 @@
#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
/* Legacy interrupt storm when interrupt fifo fills */
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
+/* Write combining and sriov=enabled are incompatible */
+#define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA
/* Spurious parity errors in TSORT buffers */
#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index bc86f4b6ecc2..727874d9deb6 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -49,6 +49,8 @@
struct smsc95xx_priv {
u32 mac_cr;
+ u32 hash_hi;
+ u32 hash_lo;
spinlock_t mac_cr_lock;
bool use_tx_csum;
bool use_rx_csum;
@@ -370,10 +372,11 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
{
struct usbnet *dev = netdev_priv(netdev);
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
- u32 hash_hi = 0;
- u32 hash_lo = 0;
unsigned long flags;
+ pdata->hash_hi = 0;
+ pdata->hash_lo = 0;
+
spin_lock_irqsave(&pdata->mac_cr_lock, flags);
if (dev->net->flags & IFF_PROMISC) {
@@ -394,13 +397,13 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
u32 bitnum = smsc95xx_hash(ha->addr);
u32 mask = 0x01 << (bitnum & 0x1F);
if (bitnum & 0x20)
- hash_hi |= mask;
+ pdata->hash_hi |= mask;
else
- hash_lo |= mask;
+ pdata->hash_lo |= mask;
}
netif_dbg(dev, drv, dev->net, "HASHH=0x%08X, HASHL=0x%08X\n",
- hash_hi, hash_lo);
+ pdata->hash_hi, pdata->hash_lo);
} else {
netif_dbg(dev, drv, dev->net, "receive own packets only\n");
pdata->mac_cr &=
@@ -410,8 +413,8 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
/* Initiate async writes, as we can't wait for completion here */
- smsc95xx_write_reg_async(dev, HASHH, &hash_hi);
- smsc95xx_write_reg_async(dev, HASHL, &hash_lo);
+ smsc95xx_write_reg_async(dev, HASHH, &pdata->hash_hi);
+ smsc95xx_write_reg_async(dev, HASHL, &pdata->hash_lo);
smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr);
}
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 115f162c617a..524825720a09 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2160,6 +2160,8 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
if (!ath_drain_all_txq(sc, false))
ath_reset(sc, false);
+ ieee80211_wake_queues(hw);
+
out:
ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
mutex_unlock(&sc->mutex);
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 960d717ca7c2..a3241cd089b1 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1328,7 +1328,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
hdr = (struct ieee80211_hdr *)skb->data;
fc = hdr->frame_control;
- for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ for (i = 0; i < sc->hw->max_rates; i++) {
struct ieee80211_tx_rate *rate = &tx_info->status.rates[i];
if (!rate->count)
break;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index ef22096d40c9..26734e53b37f 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1725,8 +1725,8 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
u8 tidno;
spin_lock_bh(&txctl->txq->axq_lock);
-
- if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) {
+ if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
+ ieee80211_is_data_qos(hdr->frame_control)) {
tidno = ieee80211_get_qos_ctl(hdr)[0] &
IEEE80211_QOS_CTL_TID_MASK;
tid = ATH_AN_2_TID(txctl->an, tidno);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 2003c1d4295f..08ccb9496f76 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -2265,7 +2265,7 @@ signed long iwlagn_wait_notification(struct iwl_priv *priv,
int ret;
ret = wait_event_timeout(priv->_agn.notif_waitq,
- &wait_entry->triggered,
+ wait_entry->triggered,
timeout);
spin_lock_bh(&priv->_agn.notif_wait_lock);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 581dc9f10273..321b18b59135 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -3009,14 +3009,17 @@ static int iwl_mac_offchannel_tx_cancel_wait(struct ieee80211_hw *hw)
mutex_lock(&priv->mutex);
- if (!priv->_agn.offchan_tx_skb)
- return -EINVAL;
+ if (!priv->_agn.offchan_tx_skb) {
+ ret = -EINVAL;
+ goto unlock;
+ }
priv->_agn.offchan_tx_skb = NULL;
ret = iwl_scan_cancel_timeout(priv, 200);
if (ret)
ret = -EIO;
+unlock:
mutex_unlock(&priv->mutex);
return ret;
diff --git a/drivers/net/wireless/orinoco/cfg.c b/drivers/net/wireless/orinoco/cfg.c
index 09fae2f0ea08..736bbb9bd1d0 100644
--- a/drivers/net/wireless/orinoco/cfg.c
+++ b/drivers/net/wireless/orinoco/cfg.c
@@ -153,6 +153,9 @@ static int orinoco_scan(struct wiphy *wiphy, struct net_device *dev,
priv->scan_request = request;
err = orinoco_hw_trigger_scan(priv, request->ssids);
+ /* On error the we aren't processing the request */
+ if (err)
+ priv->scan_request = NULL;
return err;
}
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index f3d396e7544b..62c6b2b37dbe 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -1376,13 +1376,13 @@ static void orinoco_process_scan_results(struct work_struct *work)
spin_lock_irqsave(&priv->scan_lock, flags);
list_for_each_entry_safe(sd, temp, &priv->scan_list, list) {
- spin_unlock_irqrestore(&priv->scan_lock, flags);
buf = sd->buf;
len = sd->len;
type = sd->type;
list_del(&sd->list);
+ spin_unlock_irqrestore(&priv->scan_lock, flags);
kfree(sd);
if (len > 0) {
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index f1a92144996f..4e368657a83c 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -719,6 +719,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) },
/* AzureWave */
{ USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -913,7 +914,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) },
/* AzureWave */
{ USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -937,6 +937,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c17), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Edimax */
+ { USB_DEVICE(0x7392, 0x4085), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Encore */
{ USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Gemtek */
@@ -961,6 +963,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1d4d, 0x0011), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Planex */
+ { USB_DEVICE(0x2019, 0x5201), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Qcom */
{ USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -972,6 +975,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* Sweex */
{ USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Toshiba */
+ { USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Zyxel */
{ USB_DEVICE(0x0586, 0x341a), USB_DEVICE_DATA(&rt2800usb_ops) },
#endif
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 4f92cba6810a..f74a8701c67d 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -410,8 +410,8 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
if (!efuse_shadow_update_chk(hw)) {
efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
- memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
- (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+ memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
+ &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
@@ -446,9 +446,9 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
if (word_en != 0x0F) {
u8 tmpdata[8];
- memcpy((void *)tmpdata,
- (void *)(&rtlefuse->
- efuse_map[EFUSE_MODIFY_MAP][base]), 8);
+ memcpy(tmpdata,
+ &rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base],
+ 8);
RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD,
("U-efuse\n"), tmpdata, 8);
@@ -465,8 +465,8 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
efuse_power_switch(hw, true, false);
efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
- memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
- (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+ memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
+ &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, ("<---\n"));
@@ -479,13 +479,12 @@ void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw)
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
if (rtlefuse->autoload_failflag == true) {
- memset((void *)(&rtlefuse->efuse_map[EFUSE_INIT_MAP][0]), 128,
- 0xFF);
+ memset(&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 0xFF, 128);
} else
efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
- memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
- (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+ memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
+ &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
}
@@ -694,8 +693,8 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
if (offset > 15)
return false;
- memset((void *)data, PGPKT_DATA_SIZE * sizeof(u8), 0xff);
- memset((void *)tmpdata, PGPKT_DATA_SIZE * sizeof(u8), 0xff);
+ memset(data, 0xff, PGPKT_DATA_SIZE * sizeof(u8));
+ memset(tmpdata, 0xff, PGPKT_DATA_SIZE * sizeof(u8));
while (bcontinual && (efuse_addr < EFUSE_MAX_SIZE)) {
if (readstate & PG_STATE_HEADER) {
@@ -862,7 +861,7 @@ static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr,
tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
- memset((void *)originaldata, 8 * sizeof(u8), 0xff);
+ memset(originaldata, 0xff, 8 * sizeof(u8));
if (efuse_pg_packet_read(hw, tmp_pkt.offset, originaldata)) {
badworden = efuse_word_enable_data_write(hw,
@@ -917,7 +916,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
target_pkt.offset = offset;
target_pkt.word_en = word_en;
- memset((void *)target_pkt.data, 8 * sizeof(u8), 0xFF);
+ memset(target_pkt.data, 0xFF, 8 * sizeof(u8));
efuse_word_enable_data_read(word_en, data, target_pkt.data);
target_word_cnts = efuse_calculate_word_cnts(target_pkt.word_en);
@@ -1022,7 +1021,7 @@ static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw,
u8 badworden = 0x0F;
u8 tmpdata[8];
- memset((void *)tmpdata, PGPKT_DATA_SIZE, 0xff);
+ memset(tmpdata, 0xff, PGPKT_DATA_SIZE);
RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
("word_en = %x efuse_addr=%x\n", word_en, efuse_addr));
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 81e80489a052..58236e6d0921 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -60,6 +60,7 @@ static struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x157e, 0x3204), .driver_info = DEVICE_ZD1211 },
+ { USB_DEVICE(0x157e, 0x3207), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x1740, 0x2000), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
/* ZD1211B */
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 4789f8e8bf7a..a4115f1afe1f 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -36,7 +36,7 @@
#include <linux/iova.h>
#include <linux/iommu.h>
#include <linux/intel-iommu.h>
-#include <linux/sysdev.h>
+#include <linux/syscore_ops.h>
#include <linux/tboot.h>
#include <linux/dmi.h>
#include <asm/cacheflush.h>
@@ -3135,7 +3135,7 @@ static void iommu_flush_all(void)
}
}
-static int iommu_suspend(struct sys_device *dev, pm_message_t state)
+static int iommu_suspend(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu = NULL;
@@ -3175,7 +3175,7 @@ nomem:
return -ENOMEM;
}
-static int iommu_resume(struct sys_device *dev)
+static void iommu_resume(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu = NULL;
@@ -3183,7 +3183,7 @@ static int iommu_resume(struct sys_device *dev)
if (init_iommu_hw()) {
WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
- return -EIO;
+ return;
}
for_each_active_iommu(iommu, drhd) {
@@ -3204,40 +3204,20 @@ static int iommu_resume(struct sys_device *dev)
for_each_active_iommu(iommu, drhd)
kfree(iommu->iommu_state);
-
- return 0;
}
-static struct sysdev_class iommu_sysclass = {
- .name = "iommu",
+static struct syscore_ops iommu_syscore_ops = {
.resume = iommu_resume,
.suspend = iommu_suspend,
};
-static struct sys_device device_iommu = {
- .cls = &iommu_sysclass,
-};
-
-static int __init init_iommu_sysfs(void)
+static void __init init_iommu_pm_ops(void)
{
- int error;
-
- error = sysdev_class_register(&iommu_sysclass);
- if (error)
- return error;
-
- error = sysdev_register(&device_iommu);
- if (error)
- sysdev_class_unregister(&iommu_sysclass);
-
- return error;
+ register_syscore_ops(&iommu_syscore_ops);
}
#else
-static int __init init_iommu_sysfs(void)
-{
- return 0;
-}
+static inline int init_iommu_pm_ops(void) { }
#endif /* CONFIG_PM */
/*
@@ -3320,7 +3300,7 @@ int __init intel_iommu_init(void)
#endif
dma_ops = &intel_dma_ops;
- init_iommu_sysfs();
+ init_iommu_pm_ops();
register_iommu(&intel_iommu_ops);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index b714d787bddd..2472e7177b4b 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -740,6 +740,12 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
if (!__pci_complete_power_transition(dev, state))
error = 0;
+ /*
+ * When aspm_policy is "powersave" this call ensures
+ * that ASPM is configured.
+ */
+ if (!error && dev->bus->self)
+ pcie_aspm_powersave_config_link(dev->bus->self);
return error;
}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 3188cd96b338..eee09f756ec9 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -69,6 +69,7 @@ struct pcie_link_state {
};
static int aspm_disabled, aspm_force, aspm_clear_state;
+static bool aspm_support_enabled = true;
static DEFINE_MUTEX(aspm_lock);
static LIST_HEAD(link_list);
@@ -707,6 +708,28 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev)
up_read(&pci_bus_sem);
}
+void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
+{
+ struct pcie_link_state *link = pdev->link_state;
+
+ if (aspm_disabled || !pci_is_pcie(pdev) || !link)
+ return;
+
+ if (aspm_policy != POLICY_POWERSAVE)
+ return;
+
+ if ((pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
+ (pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
+ return;
+
+ down_read(&pci_bus_sem);
+ mutex_lock(&aspm_lock);
+ pcie_config_aspm_path(link);
+ pcie_set_clkpm(link, policy_to_clkpm_state(link));
+ mutex_unlock(&aspm_lock);
+ up_read(&pci_bus_sem);
+}
+
/*
* pci_disable_link_state - disable pci device's link state, so the link will
* never enter specific states
@@ -747,6 +770,8 @@ static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
int i;
struct pcie_link_state *link;
+ if (aspm_disabled)
+ return -EPERM;
for (i = 0; i < ARRAY_SIZE(policy_str); i++)
if (!strncmp(val, policy_str[i], strlen(policy_str[i])))
break;
@@ -801,6 +826,8 @@ static ssize_t link_state_store(struct device *dev,
struct pcie_link_state *link, *root = pdev->link_state->root;
u32 val = buf[0] - '0', state = 0;
+ if (aspm_disabled)
+ return -EPERM;
if (n < 1 || val > 3)
return -EINVAL;
@@ -896,6 +923,7 @@ static int __init pcie_aspm_disable(char *str)
{
if (!strcmp(str, "off")) {
aspm_disabled = 1;
+ aspm_support_enabled = false;
printk(KERN_INFO "PCIe ASPM is disabled\n");
} else if (!strcmp(str, "force")) {
aspm_force = 1;
@@ -930,3 +958,8 @@ int pcie_aspm_enabled(void)
}
EXPORT_SYMBOL(pcie_aspm_enabled);
+bool pcie_aspm_support_enabled(void)
+{
+ return aspm_support_enabled;
+}
+EXPORT_SYMBOL(pcie_aspm_support_enabled);
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 5130d0d22390..595654a1a6a6 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/pcieport_if.h>
#include <linux/aer.h>
-#include <linux/pci-aspm.h>
#include "../pci.h"
#include "portdrv.h"
@@ -356,10 +355,8 @@ int pcie_port_device_register(struct pci_dev *dev)
/* Get and check PCI Express port services */
capabilities = get_port_device_capability(dev);
- if (!capabilities) {
- pcie_no_aspm();
+ if (!capabilities)
return 0;
- }
pci_set_master(dev);
/*
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 61bf5d724139..52a462fc6b84 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -117,10 +117,24 @@ config BATTERY_BQ20Z75
config BATTERY_BQ27x00
tristate "BQ27x00 battery driver"
+ help
+ Say Y here to enable support for batteries with BQ27x00 (I2C/HDQ) chips.
+
+config BATTERY_BQ27X00_I2C
+ bool "BQ27200/BQ27500 support"
+ depends on BATTERY_BQ27x00
depends on I2C
+ default y
help
Say Y here to enable support for batteries with BQ27x00 (I2C) chips.
+config BATTERY_BQ27X00_PLATFORM
+ bool "BQ27000 support"
+ depends on BATTERY_BQ27x00
+ default y
+ help
+ Say Y here to enable support for batteries with BQ27000 (HDQ) chips.
+
config BATTERY_DA9030
tristate "DA9030 battery driver"
depends on PMIC_DA903X
diff --git a/drivers/power/bq20z75.c b/drivers/power/bq20z75.c
index 492da27e1a47..506585e31a5b 100644
--- a/drivers/power/bq20z75.c
+++ b/drivers/power/bq20z75.c
@@ -25,6 +25,10 @@
#include <linux/power_supply.h>
#include <linux/i2c.h>
#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+
+#include <linux/power/bq20z75.h>
enum {
REG_MANUFACTURER_DATA,
@@ -38,11 +42,22 @@ enum {
REG_CYCLE_COUNT,
REG_SERIAL_NUMBER,
REG_REMAINING_CAPACITY,
+ REG_REMAINING_CAPACITY_CHARGE,
REG_FULL_CHARGE_CAPACITY,
+ REG_FULL_CHARGE_CAPACITY_CHARGE,
REG_DESIGN_CAPACITY,
+ REG_DESIGN_CAPACITY_CHARGE,
REG_DESIGN_VOLTAGE,
};
+/* Battery Mode defines */
+#define BATTERY_MODE_OFFSET 0x03
+#define BATTERY_MODE_MASK 0x8000
+enum bq20z75_battery_mode {
+ BATTERY_MODE_AMPS,
+ BATTERY_MODE_WATTS
+};
+
/* manufacturer access defines */
#define MANUFACTURER_ACCESS_STATUS 0x0006
#define MANUFACTURER_ACCESS_SLEEP 0x0011
@@ -78,8 +93,12 @@ static const struct bq20z75_device_data {
BQ20Z75_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0E, 0, 100),
[REG_REMAINING_CAPACITY] =
BQ20Z75_DATA(POWER_SUPPLY_PROP_ENERGY_NOW, 0x0F, 0, 65535),
+ [REG_REMAINING_CAPACITY_CHARGE] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_CHARGE_NOW, 0x0F, 0, 65535),
[REG_FULL_CHARGE_CAPACITY] =
BQ20Z75_DATA(POWER_SUPPLY_PROP_ENERGY_FULL, 0x10, 0, 65535),
+ [REG_FULL_CHARGE_CAPACITY_CHARGE] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_CHARGE_FULL, 0x10, 0, 65535),
[REG_TIME_TO_EMPTY] =
BQ20Z75_DATA(POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, 0x12, 0,
65535),
@@ -93,6 +112,9 @@ static const struct bq20z75_device_data {
[REG_DESIGN_CAPACITY] =
BQ20Z75_DATA(POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, 0x18, 0,
65535),
+ [REG_DESIGN_CAPACITY_CHARGE] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, 0x18, 0,
+ 65535),
[REG_DESIGN_VOLTAGE] =
BQ20Z75_DATA(POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, 0x19, 0,
65535),
@@ -117,39 +139,72 @@ static enum power_supply_property bq20z75_properties[] = {
POWER_SUPPLY_PROP_ENERGY_NOW,
POWER_SUPPLY_PROP_ENERGY_FULL,
POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
};
struct bq20z75_info {
- struct i2c_client *client;
- struct power_supply power_supply;
+ struct i2c_client *client;
+ struct power_supply power_supply;
+ struct bq20z75_platform_data *pdata;
+ bool is_present;
+ bool gpio_detect;
+ bool enable_detection;
+ int irq;
};
static int bq20z75_read_word_data(struct i2c_client *client, u8 address)
{
- s32 ret;
+ struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
+ s32 ret = 0;
+ int retries = 1;
+
+ if (bq20z75_device->pdata)
+ retries = max(bq20z75_device->pdata->i2c_retry_count + 1, 1);
+
+ while (retries > 0) {
+ ret = i2c_smbus_read_word_data(client, address);
+ if (ret >= 0)
+ break;
+ retries--;
+ }
- ret = i2c_smbus_read_word_data(client, address);
if (ret < 0) {
- dev_err(&client->dev,
+ dev_dbg(&client->dev,
"%s: i2c read at address 0x%x failed\n",
__func__, address);
return ret;
}
+
return le16_to_cpu(ret);
}
static int bq20z75_write_word_data(struct i2c_client *client, u8 address,
u16 value)
{
- s32 ret;
+ struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
+ s32 ret = 0;
+ int retries = 1;
+
+ if (bq20z75_device->pdata)
+ retries = max(bq20z75_device->pdata->i2c_retry_count + 1, 1);
+
+ while (retries > 0) {
+ ret = i2c_smbus_write_word_data(client, address,
+ le16_to_cpu(value));
+ if (ret >= 0)
+ break;
+ retries--;
+ }
- ret = i2c_smbus_write_word_data(client, address, le16_to_cpu(value));
if (ret < 0) {
- dev_err(&client->dev,
+ dev_dbg(&client->dev,
"%s: i2c write to address 0x%x failed\n",
__func__, address);
return ret;
}
+
return 0;
}
@@ -158,6 +213,19 @@ static int bq20z75_get_battery_presence_and_health(
union power_supply_propval *val)
{
s32 ret;
+ struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
+
+ if (psp == POWER_SUPPLY_PROP_PRESENT &&
+ bq20z75_device->gpio_detect) {
+ ret = gpio_get_value(
+ bq20z75_device->pdata->battery_detect);
+ if (ret == bq20z75_device->pdata->battery_detect_present)
+ val->intval = 1;
+ else
+ val->intval = 0;
+ bq20z75_device->is_present = val->intval;
+ return ret;
+ }
/* Write to ManufacturerAccess with
* ManufacturerAccess command and then
@@ -165,9 +233,11 @@ static int bq20z75_get_battery_presence_and_health(
ret = bq20z75_write_word_data(client,
bq20z75_data[REG_MANUFACTURER_DATA].addr,
MANUFACTURER_ACCESS_STATUS);
- if (ret < 0)
+ if (ret < 0) {
+ if (psp == POWER_SUPPLY_PROP_PRESENT)
+ val->intval = 0; /* battery removed */
return ret;
-
+ }
ret = bq20z75_read_word_data(client,
bq20z75_data[REG_MANUFACTURER_DATA].addr);
@@ -248,30 +318,39 @@ static void bq20z75_unit_adjustment(struct i2c_client *client,
{
#define BASE_UNIT_CONVERSION 1000
#define BATTERY_MODE_CAP_MULT_WATT (10 * BASE_UNIT_CONVERSION)
-#define TIME_UNIT_CONVERSION 600
-#define TEMP_KELVIN_TO_CELCIUS 2731
+#define TIME_UNIT_CONVERSION 60
+#define TEMP_KELVIN_TO_CELSIUS 2731
switch (psp) {
case POWER_SUPPLY_PROP_ENERGY_NOW:
case POWER_SUPPLY_PROP_ENERGY_FULL:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ /* bq20z75 provides energy in units of 10mWh.
+ * Convert to µWh
+ */
val->intval *= BATTERY_MODE_CAP_MULT_WATT;
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
case POWER_SUPPLY_PROP_CURRENT_NOW:
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
val->intval *= BASE_UNIT_CONVERSION;
break;
case POWER_SUPPLY_PROP_TEMP:
- /* bq20z75 provides battery tempreture in 0.1°K
- * so convert it to 0.1°C */
- val->intval -= TEMP_KELVIN_TO_CELCIUS;
- val->intval *= 10;
+ /* bq20z75 provides battery temperature in 0.1K
+ * so convert it to 0.1°C
+ */
+ val->intval -= TEMP_KELVIN_TO_CELSIUS;
break;
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
+ /* bq20z75 provides time to empty and time to full in minutes.
+ * Convert to seconds
+ */
val->intval *= TIME_UNIT_CONVERSION;
break;
@@ -281,11 +360,44 @@ static void bq20z75_unit_adjustment(struct i2c_client *client,
}
}
+static enum bq20z75_battery_mode
+bq20z75_set_battery_mode(struct i2c_client *client,
+ enum bq20z75_battery_mode mode)
+{
+ int ret, original_val;
+
+ original_val = bq20z75_read_word_data(client, BATTERY_MODE_OFFSET);
+ if (original_val < 0)
+ return original_val;
+
+ if ((original_val & BATTERY_MODE_MASK) == mode)
+ return mode;
+
+ if (mode == BATTERY_MODE_AMPS)
+ ret = original_val & ~BATTERY_MODE_MASK;
+ else
+ ret = original_val | BATTERY_MODE_MASK;
+
+ ret = bq20z75_write_word_data(client, BATTERY_MODE_OFFSET, ret);
+ if (ret < 0)
+ return ret;
+
+ return original_val & BATTERY_MODE_MASK;
+}
+
static int bq20z75_get_battery_capacity(struct i2c_client *client,
int reg_offset, enum power_supply_property psp,
union power_supply_propval *val)
{
s32 ret;
+ enum bq20z75_battery_mode mode = BATTERY_MODE_WATTS;
+
+ if (power_supply_is_amp_property(psp))
+ mode = BATTERY_MODE_AMPS;
+
+ mode = bq20z75_set_battery_mode(client, mode);
+ if (mode < 0)
+ return mode;
ret = bq20z75_read_word_data(client, bq20z75_data[reg_offset].addr);
if (ret < 0)
@@ -298,6 +410,10 @@ static int bq20z75_get_battery_capacity(struct i2c_client *client,
} else
val->intval = ret;
+ ret = bq20z75_set_battery_mode(client, mode);
+ if (ret < 0)
+ return ret;
+
return 0;
}
@@ -318,12 +434,25 @@ static int bq20z75_get_battery_serial_number(struct i2c_client *client,
return 0;
}
+static int bq20z75_get_property_index(struct i2c_client *client,
+ enum power_supply_property psp)
+{
+ int count;
+ for (count = 0; count < ARRAY_SIZE(bq20z75_data); count++)
+ if (psp == bq20z75_data[count].psp)
+ return count;
+
+ dev_warn(&client->dev,
+ "%s: Invalid Property - %d\n", __func__, psp);
+
+ return -EINVAL;
+}
+
static int bq20z75_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
- int count;
- int ret;
+ int ret = 0;
struct bq20z75_info *bq20z75_device = container_of(psy,
struct bq20z75_info, power_supply);
struct i2c_client *client = bq20z75_device->client;
@@ -332,8 +461,8 @@ static int bq20z75_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_PRESENT:
case POWER_SUPPLY_PROP_HEALTH:
ret = bq20z75_get_battery_presence_and_health(client, psp, val);
- if (ret)
- return ret;
+ if (psp == POWER_SUPPLY_PROP_PRESENT)
+ return 0;
break;
case POWER_SUPPLY_PROP_TECHNOLOGY:
@@ -343,22 +472,19 @@ static int bq20z75_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_ENERGY_NOW:
case POWER_SUPPLY_PROP_ENERGY_FULL:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_CAPACITY:
- for (count = 0; count < ARRAY_SIZE(bq20z75_data); count++) {
- if (psp == bq20z75_data[count].psp)
- break;
- }
-
- ret = bq20z75_get_battery_capacity(client, count, psp, val);
- if (ret)
- return ret;
+ ret = bq20z75_get_property_index(client, psp);
+ if (ret < 0)
+ break;
+ ret = bq20z75_get_battery_capacity(client, ret, psp, val);
break;
case POWER_SUPPLY_PROP_SERIAL_NUMBER:
ret = bq20z75_get_battery_serial_number(client, val);
- if (ret)
- return ret;
break;
case POWER_SUPPLY_PROP_STATUS:
@@ -369,15 +495,11 @@ static int bq20z75_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
- for (count = 0; count < ARRAY_SIZE(bq20z75_data); count++) {
- if (psp == bq20z75_data[count].psp)
- break;
- }
-
- ret = bq20z75_get_battery_property(client, count, psp, val);
- if (ret)
- return ret;
+ ret = bq20z75_get_property_index(client, psp);
+ if (ret < 0)
+ break;
+ ret = bq20z75_get_battery_property(client, ret, psp, val);
break;
default:
@@ -386,26 +508,58 @@ static int bq20z75_get_property(struct power_supply *psy,
return -EINVAL;
}
- /* Convert units to match requirements for power supply class */
- bq20z75_unit_adjustment(client, psp, val);
+ if (!bq20z75_device->enable_detection)
+ goto done;
+
+ if (!bq20z75_device->gpio_detect &&
+ bq20z75_device->is_present != (ret >= 0)) {
+ bq20z75_device->is_present = (ret >= 0);
+ power_supply_changed(&bq20z75_device->power_supply);
+ }
+
+done:
+ if (!ret) {
+ /* Convert units to match requirements for power supply class */
+ bq20z75_unit_adjustment(client, psp, val);
+ }
dev_dbg(&client->dev,
- "%s: property = %d, value = %d\n", __func__, psp, val->intval);
+ "%s: property = %d, value = %x\n", __func__, psp, val->intval);
+
+ if (ret && bq20z75_device->is_present)
+ return ret;
+
+ /* battery not present, so return NODATA for properties */
+ if (ret)
+ return -ENODATA;
return 0;
}
-static int bq20z75_probe(struct i2c_client *client,
+static irqreturn_t bq20z75_irq(int irq, void *devid)
+{
+ struct power_supply *battery = devid;
+
+ power_supply_changed(battery);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit bq20z75_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct bq20z75_info *bq20z75_device;
+ struct bq20z75_platform_data *pdata = client->dev.platform_data;
int rc;
+ int irq;
bq20z75_device = kzalloc(sizeof(struct bq20z75_info), GFP_KERNEL);
if (!bq20z75_device)
return -ENOMEM;
bq20z75_device->client = client;
+ bq20z75_device->enable_detection = false;
+ bq20z75_device->gpio_detect = false;
bq20z75_device->power_supply.name = "battery";
bq20z75_device->power_supply.type = POWER_SUPPLY_TYPE_BATTERY;
bq20z75_device->power_supply.properties = bq20z75_properties;
@@ -413,26 +567,86 @@ static int bq20z75_probe(struct i2c_client *client,
ARRAY_SIZE(bq20z75_properties);
bq20z75_device->power_supply.get_property = bq20z75_get_property;
+ if (pdata) {
+ bq20z75_device->gpio_detect =
+ gpio_is_valid(pdata->battery_detect);
+ bq20z75_device->pdata = pdata;
+ }
+
i2c_set_clientdata(client, bq20z75_device);
+ if (!bq20z75_device->gpio_detect)
+ goto skip_gpio;
+
+ rc = gpio_request(pdata->battery_detect, dev_name(&client->dev));
+ if (rc) {
+ dev_warn(&client->dev, "Failed to request gpio: %d\n", rc);
+ bq20z75_device->gpio_detect = false;
+ goto skip_gpio;
+ }
+
+ rc = gpio_direction_input(pdata->battery_detect);
+ if (rc) {
+ dev_warn(&client->dev, "Failed to get gpio as input: %d\n", rc);
+ gpio_free(pdata->battery_detect);
+ bq20z75_device->gpio_detect = false;
+ goto skip_gpio;
+ }
+
+ irq = gpio_to_irq(pdata->battery_detect);
+ if (irq <= 0) {
+ dev_warn(&client->dev, "Failed to get gpio as irq: %d\n", irq);
+ gpio_free(pdata->battery_detect);
+ bq20z75_device->gpio_detect = false;
+ goto skip_gpio;
+ }
+
+ rc = request_irq(irq, bq20z75_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ dev_name(&client->dev), &bq20z75_device->power_supply);
+ if (rc) {
+ dev_warn(&client->dev, "Failed to request irq: %d\n", rc);
+ gpio_free(pdata->battery_detect);
+ bq20z75_device->gpio_detect = false;
+ goto skip_gpio;
+ }
+
+ bq20z75_device->irq = irq;
+
+skip_gpio:
+
rc = power_supply_register(&client->dev, &bq20z75_device->power_supply);
if (rc) {
dev_err(&client->dev,
"%s: Failed to register power supply\n", __func__);
- kfree(bq20z75_device);
- return rc;
+ goto exit_psupply;
}
dev_info(&client->dev,
"%s: battery gas gauge device registered\n", client->name);
return 0;
+
+exit_psupply:
+ if (bq20z75_device->irq)
+ free_irq(bq20z75_device->irq, &bq20z75_device->power_supply);
+ if (bq20z75_device->gpio_detect)
+ gpio_free(pdata->battery_detect);
+
+ kfree(bq20z75_device);
+
+ return rc;
}
-static int bq20z75_remove(struct i2c_client *client)
+static int __devexit bq20z75_remove(struct i2c_client *client)
{
struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
+ if (bq20z75_device->irq)
+ free_irq(bq20z75_device->irq, &bq20z75_device->power_supply);
+ if (bq20z75_device->gpio_detect)
+ gpio_free(bq20z75_device->pdata->battery_detect);
+
power_supply_unregister(&bq20z75_device->power_supply);
kfree(bq20z75_device);
bq20z75_device = NULL;
@@ -444,13 +658,14 @@ static int bq20z75_remove(struct i2c_client *client)
static int bq20z75_suspend(struct i2c_client *client,
pm_message_t state)
{
+ struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
s32 ret;
/* write to manufacturer access with sleep command */
ret = bq20z75_write_word_data(client,
bq20z75_data[REG_MANUFACTURER_DATA].addr,
MANUFACTURER_ACCESS_SLEEP);
- if (ret < 0)
+ if (bq20z75_device->is_present && ret < 0)
return ret;
return 0;
@@ -465,10 +680,11 @@ static const struct i2c_device_id bq20z75_id[] = {
{ "bq20z75", 0 },
{}
};
+MODULE_DEVICE_TABLE(i2c, bq20z75_id);
static struct i2c_driver bq20z75_battery_driver = {
.probe = bq20z75_probe,
- .remove = bq20z75_remove,
+ .remove = __devexit_p(bq20z75_remove),
.suspend = bq20z75_suspend,
.resume = bq20z75_resume,
.id_table = bq20z75_id,
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index eff0273d4030..59e68dbd028b 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it>
* Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it>
+ * Copyright (C) 2010-2011 Lars-Peter Clausen <lars@metafoo.de>
*
* Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc.
*
@@ -15,6 +16,13 @@
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*
*/
+
+/*
+ * Datasheets:
+ * http://focus.ti.com/docs/prod/folders/print/bq27000.html
+ * http://focus.ti.com/docs/prod/folders/print/bq27500.html
+ */
+
#include <linux/module.h>
#include <linux/param.h>
#include <linux/jiffies.h>
@@ -27,7 +35,9 @@
#include <linux/slab.h>
#include <asm/unaligned.h>
-#define DRIVER_VERSION "1.1.0"
+#include <linux/power/bq27x00_battery.h>
+
+#define DRIVER_VERSION "1.2.0"
#define BQ27x00_REG_TEMP 0x06
#define BQ27x00_REG_VOLT 0x08
@@ -36,36 +46,59 @@
#define BQ27x00_REG_TTE 0x16
#define BQ27x00_REG_TTF 0x18
#define BQ27x00_REG_TTECP 0x26
+#define BQ27x00_REG_NAC 0x0C /* Nominal available capaciy */
+#define BQ27x00_REG_LMD 0x12 /* Last measured discharge */
+#define BQ27x00_REG_CYCT 0x2A /* Cycle count total */
+#define BQ27x00_REG_AE 0x22 /* Available enery */
#define BQ27000_REG_RSOC 0x0B /* Relative State-of-Charge */
+#define BQ27000_REG_ILMD 0x76 /* Initial last measured discharge */
#define BQ27000_FLAG_CHGS BIT(7)
+#define BQ27000_FLAG_FC BIT(5)
-#define BQ27500_REG_SOC 0x2c
+#define BQ27500_REG_SOC 0x2C
+#define BQ27500_REG_DCAP 0x3C /* Design capacity */
#define BQ27500_FLAG_DSC BIT(0)
#define BQ27500_FLAG_FC BIT(9)
-/* If the system has several batteries we need a different name for each
- * of them...
- */
-static DEFINE_IDR(battery_id);
-static DEFINE_MUTEX(battery_mutex);
+#define BQ27000_RS 20 /* Resistor sense */
struct bq27x00_device_info;
struct bq27x00_access_methods {
- int (*read)(u8 reg, int *rt_value, int b_single,
- struct bq27x00_device_info *di);
+ int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
};
enum bq27x00_chip { BQ27000, BQ27500 };
+struct bq27x00_reg_cache {
+ int temperature;
+ int time_to_empty;
+ int time_to_empty_avg;
+ int time_to_full;
+ int charge_full;
+ int charge_counter;
+ int capacity;
+ int flags;
+
+ int current_now;
+};
+
struct bq27x00_device_info {
struct device *dev;
int id;
- struct bq27x00_access_methods *bus;
- struct power_supply bat;
enum bq27x00_chip chip;
- struct i2c_client *client;
+ struct bq27x00_reg_cache cache;
+ int charge_design_full;
+
+ unsigned long last_update;
+ struct delayed_work work;
+
+ struct power_supply bat;
+
+ struct bq27x00_access_methods bus;
+
+ struct mutex lock;
};
static enum power_supply_property bq27x00_battery_props[] = {
@@ -78,164 +111,328 @@ static enum power_supply_property bq27x00_battery_props[] = {
POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
};
+static unsigned int poll_interval = 360;
+module_param(poll_interval, uint, 0644);
+MODULE_PARM_DESC(poll_interval, "battery poll interval in seconds - " \
+ "0 disables polling");
+
/*
* Common code for BQ27x00 devices
*/
-static int bq27x00_read(u8 reg, int *rt_value, int b_single,
- struct bq27x00_device_info *di)
+static inline int bq27x00_read(struct bq27x00_device_info *di, u8 reg,
+ bool single)
{
- return di->bus->read(reg, rt_value, b_single, di);
+ return di->bus.read(di, reg, single);
}
/*
- * Return the battery temperature in tenths of degree Celsius
+ * Return the battery Relative State-of-Charge
* Or < 0 if something fails.
*/
-static int bq27x00_battery_temperature(struct bq27x00_device_info *di)
+static int bq27x00_battery_read_rsoc(struct bq27x00_device_info *di)
{
- int ret;
- int temp = 0;
+ int rsoc;
- ret = bq27x00_read(BQ27x00_REG_TEMP, &temp, 0, di);
- if (ret) {
- dev_err(di->dev, "error reading temperature\n");
- return ret;
+ if (di->chip == BQ27500)
+ rsoc = bq27x00_read(di, BQ27500_REG_SOC, false);
+ else
+ rsoc = bq27x00_read(di, BQ27000_REG_RSOC, true);
+
+ if (rsoc < 0)
+ dev_err(di->dev, "error reading relative State-of-Charge\n");
+
+ return rsoc;
+}
+
+/*
+ * Return a battery charge value in µAh
+ * Or < 0 if something fails.
+ */
+static int bq27x00_battery_read_charge(struct bq27x00_device_info *di, u8 reg)
+{
+ int charge;
+
+ charge = bq27x00_read(di, reg, false);
+ if (charge < 0) {
+ dev_err(di->dev, "error reading nominal available capacity\n");
+ return charge;
}
if (di->chip == BQ27500)
- return temp - 2731;
+ charge *= 1000;
else
- return ((temp >> 2) - 273) * 10;
+ charge = charge * 3570 / BQ27000_RS;
+
+ return charge;
}
/*
- * Return the battery Voltage in milivolts
+ * Return the battery Nominal available capaciy in µAh
* Or < 0 if something fails.
*/
-static int bq27x00_battery_voltage(struct bq27x00_device_info *di)
+static inline int bq27x00_battery_read_nac(struct bq27x00_device_info *di)
{
- int ret;
- int volt = 0;
+ return bq27x00_battery_read_charge(di, BQ27x00_REG_NAC);
+}
- ret = bq27x00_read(BQ27x00_REG_VOLT, &volt, 0, di);
- if (ret) {
- dev_err(di->dev, "error reading voltage\n");
- return ret;
+/*
+ * Return the battery Last measured discharge in µAh
+ * Or < 0 if something fails.
+ */
+static inline int bq27x00_battery_read_lmd(struct bq27x00_device_info *di)
+{
+ return bq27x00_battery_read_charge(di, BQ27x00_REG_LMD);
+}
+
+/*
+ * Return the battery Initial last measured discharge in µAh
+ * Or < 0 if something fails.
+ */
+static int bq27x00_battery_read_ilmd(struct bq27x00_device_info *di)
+{
+ int ilmd;
+
+ if (di->chip == BQ27500)
+ ilmd = bq27x00_read(di, BQ27500_REG_DCAP, false);
+ else
+ ilmd = bq27x00_read(di, BQ27000_REG_ILMD, true);
+
+ if (ilmd < 0) {
+ dev_err(di->dev, "error reading initial last measured discharge\n");
+ return ilmd;
}
- return volt * 1000;
+ if (di->chip == BQ27500)
+ ilmd *= 1000;
+ else
+ ilmd = ilmd * 256 * 3570 / BQ27000_RS;
+
+ return ilmd;
}
/*
- * Return the battery average current
- * Note that current can be negative signed as well
- * Or 0 if something fails.
+ * Return the battery Cycle count total
+ * Or < 0 if something fails.
*/
-static int bq27x00_battery_current(struct bq27x00_device_info *di)
+static int bq27x00_battery_read_cyct(struct bq27x00_device_info *di)
{
- int ret;
- int curr = 0;
- int flags = 0;
+ int cyct;
- ret = bq27x00_read(BQ27x00_REG_AI, &curr, 0, di);
- if (ret) {
- dev_err(di->dev, "error reading current\n");
- return 0;
+ cyct = bq27x00_read(di, BQ27x00_REG_CYCT, false);
+ if (cyct < 0)
+ dev_err(di->dev, "error reading cycle count total\n");
+
+ return cyct;
+}
+
+/*
+ * Read a time register.
+ * Return < 0 if something fails.
+ */
+static int bq27x00_battery_read_time(struct bq27x00_device_info *di, u8 reg)
+{
+ int tval;
+
+ tval = bq27x00_read(di, reg, false);
+ if (tval < 0) {
+ dev_err(di->dev, "error reading register %02x: %d\n", reg, tval);
+ return tval;
}
- if (di->chip == BQ27500) {
- /* bq27500 returns signed value */
- curr = (int)(s16)curr;
- } else {
- ret = bq27x00_read(BQ27x00_REG_FLAGS, &flags, 0, di);
- if (ret < 0) {
- dev_err(di->dev, "error reading flags\n");
- return 0;
- }
- if (flags & BQ27000_FLAG_CHGS) {
- dev_dbg(di->dev, "negative current!\n");
- curr = -curr;
- }
+ if (tval == 65535)
+ return -ENODATA;
+
+ return tval * 60;
+}
+
+static void bq27x00_update(struct bq27x00_device_info *di)
+{
+ struct bq27x00_reg_cache cache = {0, };
+ bool is_bq27500 = di->chip == BQ27500;
+
+ cache.flags = bq27x00_read(di, BQ27x00_REG_FLAGS, is_bq27500);
+ if (cache.flags >= 0) {
+ cache.capacity = bq27x00_battery_read_rsoc(di);
+ cache.temperature = bq27x00_read(di, BQ27x00_REG_TEMP, false);
+ cache.time_to_empty = bq27x00_battery_read_time(di, BQ27x00_REG_TTE);
+ cache.time_to_empty_avg = bq27x00_battery_read_time(di, BQ27x00_REG_TTECP);
+ cache.time_to_full = bq27x00_battery_read_time(di, BQ27x00_REG_TTF);
+ cache.charge_full = bq27x00_battery_read_lmd(di);
+ cache.charge_counter = bq27x00_battery_read_cyct(di);
+
+ if (!is_bq27500)
+ cache.current_now = bq27x00_read(di, BQ27x00_REG_AI, false);
+
+ /* We only have to read charge design full once */
+ if (di->charge_design_full <= 0)
+ di->charge_design_full = bq27x00_battery_read_ilmd(di);
+ }
+
+ /* Ignore current_now which is a snapshot of the current battery state
+ * and is likely to be different even between two consecutive reads */
+ if (memcmp(&di->cache, &cache, sizeof(cache) - sizeof(int)) != 0) {
+ di->cache = cache;
+ power_supply_changed(&di->bat);
}
- return curr * 1000;
+ di->last_update = jiffies;
+}
+
+static void bq27x00_battery_poll(struct work_struct *work)
+{
+ struct bq27x00_device_info *di =
+ container_of(work, struct bq27x00_device_info, work.work);
+
+ bq27x00_update(di);
+
+ if (poll_interval > 0) {
+ /* The timer does not have to be accurate. */
+ set_timer_slack(&di->work.timer, poll_interval * HZ / 4);
+ schedule_delayed_work(&di->work, poll_interval * HZ);
+ }
}
+
/*
- * Return the battery Relative State-of-Charge
+ * Return the battery temperature in tenths of degree Celsius
* Or < 0 if something fails.
*/
-static int bq27x00_battery_rsoc(struct bq27x00_device_info *di)
+static int bq27x00_battery_temperature(struct bq27x00_device_info *di,
+ union power_supply_propval *val)
{
- int ret;
- int rsoc = 0;
+ if (di->cache.temperature < 0)
+ return di->cache.temperature;
if (di->chip == BQ27500)
- ret = bq27x00_read(BQ27500_REG_SOC, &rsoc, 0, di);
+ val->intval = di->cache.temperature - 2731;
else
- ret = bq27x00_read(BQ27000_REG_RSOC, &rsoc, 1, di);
- if (ret) {
- dev_err(di->dev, "error reading relative State-of-Charge\n");
- return ret;
+ val->intval = ((di->cache.temperature * 5) - 5463) / 2;
+
+ return 0;
+}
+
+/*
+ * Return the battery average current in µA
+ * Note that current can be negative signed as well
+ * Or 0 if something fails.
+ */
+static int bq27x00_battery_current(struct bq27x00_device_info *di,
+ union power_supply_propval *val)
+{
+ int curr;
+
+ if (di->chip == BQ27500)
+ curr = bq27x00_read(di, BQ27x00_REG_AI, false);
+ else
+ curr = di->cache.current_now;
+
+ if (curr < 0)
+ return curr;
+
+ if (di->chip == BQ27500) {
+ /* bq27500 returns signed value */
+ val->intval = (int)((s16)curr) * 1000;
+ } else {
+ if (di->cache.flags & BQ27000_FLAG_CHGS) {
+ dev_dbg(di->dev, "negative current!\n");
+ curr = -curr;
+ }
+
+ val->intval = curr * 3570 / BQ27000_RS;
}
- return rsoc;
+ return 0;
}
static int bq27x00_battery_status(struct bq27x00_device_info *di,
- union power_supply_propval *val)
+ union power_supply_propval *val)
{
- int flags = 0;
int status;
- int ret;
-
- ret = bq27x00_read(BQ27x00_REG_FLAGS, &flags, 0, di);
- if (ret < 0) {
- dev_err(di->dev, "error reading flags\n");
- return ret;
- }
if (di->chip == BQ27500) {
- if (flags & BQ27500_FLAG_FC)
+ if (di->cache.flags & BQ27500_FLAG_FC)
status = POWER_SUPPLY_STATUS_FULL;
- else if (flags & BQ27500_FLAG_DSC)
+ else if (di->cache.flags & BQ27500_FLAG_DSC)
status = POWER_SUPPLY_STATUS_DISCHARGING;
else
status = POWER_SUPPLY_STATUS_CHARGING;
} else {
- if (flags & BQ27000_FLAG_CHGS)
+ if (di->cache.flags & BQ27000_FLAG_FC)
+ status = POWER_SUPPLY_STATUS_FULL;
+ else if (di->cache.flags & BQ27000_FLAG_CHGS)
status = POWER_SUPPLY_STATUS_CHARGING;
+ else if (power_supply_am_i_supplied(&di->bat))
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
else
status = POWER_SUPPLY_STATUS_DISCHARGING;
}
val->intval = status;
+
return 0;
}
/*
- * Read a time register.
- * Return < 0 if something fails.
+ * Return the battery Voltage in milivolts
+ * Or < 0 if something fails.
*/
-static int bq27x00_battery_time(struct bq27x00_device_info *di, int reg,
- union power_supply_propval *val)
+static int bq27x00_battery_voltage(struct bq27x00_device_info *di,
+ union power_supply_propval *val)
{
- int tval = 0;
- int ret;
+ int volt;
- ret = bq27x00_read(reg, &tval, 0, di);
- if (ret) {
- dev_err(di->dev, "error reading register %02x\n", reg);
- return ret;
+ volt = bq27x00_read(di, BQ27x00_REG_VOLT, false);
+ if (volt < 0)
+ return volt;
+
+ val->intval = volt * 1000;
+
+ return 0;
+}
+
+/*
+ * Return the battery Available energy in µWh
+ * Or < 0 if something fails.
+ */
+static int bq27x00_battery_energy(struct bq27x00_device_info *di,
+ union power_supply_propval *val)
+{
+ int ae;
+
+ ae = bq27x00_read(di, BQ27x00_REG_AE, false);
+ if (ae < 0) {
+ dev_err(di->dev, "error reading available energy\n");
+ return ae;
}
- if (tval == 65535)
- return -ENODATA;
+ if (di->chip == BQ27500)
+ ae *= 1000;
+ else
+ ae = ae * 29200 / BQ27000_RS;
+
+ val->intval = ae;
+
+ return 0;
+}
+
+
+static int bq27x00_simple_value(int value,
+ union power_supply_propval *val)
+{
+ if (value < 0)
+ return value;
+
+ val->intval = value;
- val->intval = tval * 60;
return 0;
}
@@ -249,33 +446,61 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
int ret = 0;
struct bq27x00_device_info *di = to_bq27x00_device_info(psy);
+ mutex_lock(&di->lock);
+ if (time_is_before_jiffies(di->last_update + 5 * HZ)) {
+ cancel_delayed_work_sync(&di->work);
+ bq27x00_battery_poll(&di->work.work);
+ }
+ mutex_unlock(&di->lock);
+
+ if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0)
+ return -ENODEV;
+
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
ret = bq27x00_battery_status(di, val);
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = bq27x00_battery_voltage(di, val);
+ break;
case POWER_SUPPLY_PROP_PRESENT:
- val->intval = bq27x00_battery_voltage(di);
- if (psp == POWER_SUPPLY_PROP_PRESENT)
- val->intval = val->intval <= 0 ? 0 : 1;
+ val->intval = di->cache.flags < 0 ? 0 : 1;
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
- val->intval = bq27x00_battery_current(di);
+ ret = bq27x00_battery_current(di, val);
break;
case POWER_SUPPLY_PROP_CAPACITY:
- val->intval = bq27x00_battery_rsoc(di);
+ ret = bq27x00_simple_value(di->cache.capacity, val);
break;
case POWER_SUPPLY_PROP_TEMP:
- val->intval = bq27x00_battery_temperature(di);
+ ret = bq27x00_battery_temperature(di, val);
break;
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
- ret = bq27x00_battery_time(di, BQ27x00_REG_TTE, val);
+ ret = bq27x00_simple_value(di->cache.time_to_empty, val);
break;
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
- ret = bq27x00_battery_time(di, BQ27x00_REG_TTECP, val);
+ ret = bq27x00_simple_value(di->cache.time_to_empty_avg, val);
break;
case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW:
- ret = bq27x00_battery_time(di, BQ27x00_REG_TTF, val);
+ ret = bq27x00_simple_value(di->cache.time_to_full, val);
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ ret = bq27x00_simple_value(bq27x00_battery_read_nac(di), val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ ret = bq27x00_simple_value(di->cache.charge_full, val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ ret = bq27x00_simple_value(di->charge_design_full, val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ ret = bq27x00_simple_value(di->cache.charge_counter, val);
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_NOW:
+ ret = bq27x00_battery_energy(di, val);
break;
default:
return -EINVAL;
@@ -284,56 +509,91 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
return ret;
}
-static void bq27x00_powersupply_init(struct bq27x00_device_info *di)
+static void bq27x00_external_power_changed(struct power_supply *psy)
{
+ struct bq27x00_device_info *di = to_bq27x00_device_info(psy);
+
+ cancel_delayed_work_sync(&di->work);
+ schedule_delayed_work(&di->work, 0);
+}
+
+static int bq27x00_powersupply_init(struct bq27x00_device_info *di)
+{
+ int ret;
+
di->bat.type = POWER_SUPPLY_TYPE_BATTERY;
di->bat.properties = bq27x00_battery_props;
di->bat.num_properties = ARRAY_SIZE(bq27x00_battery_props);
di->bat.get_property = bq27x00_battery_get_property;
- di->bat.external_power_changed = NULL;
+ di->bat.external_power_changed = bq27x00_external_power_changed;
+
+ INIT_DELAYED_WORK(&di->work, bq27x00_battery_poll);
+ mutex_init(&di->lock);
+
+ ret = power_supply_register(di->dev, &di->bat);
+ if (ret) {
+ dev_err(di->dev, "failed to register battery: %d\n", ret);
+ return ret;
+ }
+
+ dev_info(di->dev, "support ver. %s enabled\n", DRIVER_VERSION);
+
+ bq27x00_update(di);
+
+ return 0;
}
-/*
- * i2c specific code
+static void bq27x00_powersupply_unregister(struct bq27x00_device_info *di)
+{
+ cancel_delayed_work_sync(&di->work);
+
+ power_supply_unregister(&di->bat);
+
+ mutex_destroy(&di->lock);
+}
+
+
+/* i2c specific code */
+#ifdef CONFIG_BATTERY_BQ27X00_I2C
+
+/* If the system has several batteries we need a different name for each
+ * of them...
*/
+static DEFINE_IDR(battery_id);
+static DEFINE_MUTEX(battery_mutex);
-static int bq27x00_read_i2c(u8 reg, int *rt_value, int b_single,
- struct bq27x00_device_info *di)
+static int bq27x00_read_i2c(struct bq27x00_device_info *di, u8 reg, bool single)
{
- struct i2c_client *client = di->client;
- struct i2c_msg msg[1];
+ struct i2c_client *client = to_i2c_client(di->dev);
+ struct i2c_msg msg[2];
unsigned char data[2];
- int err;
+ int ret;
if (!client->adapter)
return -ENODEV;
- msg->addr = client->addr;
- msg->flags = 0;
- msg->len = 1;
- msg->buf = data;
-
- data[0] = reg;
- err = i2c_transfer(client->adapter, msg, 1);
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].buf = &reg;
+ msg[0].len = sizeof(reg);
+ msg[1].addr = client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = data;
+ if (single)
+ msg[1].len = 1;
+ else
+ msg[1].len = 2;
- if (err >= 0) {
- if (!b_single)
- msg->len = 2;
- else
- msg->len = 1;
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret < 0)
+ return ret;
- msg->flags = I2C_M_RD;
- err = i2c_transfer(client->adapter, msg, 1);
- if (err >= 0) {
- if (!b_single)
- *rt_value = get_unaligned_le16(data);
- else
- *rt_value = data[0];
+ if (!single)
+ ret = get_unaligned_le16(data);
+ else
+ ret = data[0];
- return 0;
- }
- }
- return err;
+ return ret;
}
static int bq27x00_battery_probe(struct i2c_client *client,
@@ -341,7 +601,6 @@ static int bq27x00_battery_probe(struct i2c_client *client,
{
char *name;
struct bq27x00_device_info *di;
- struct bq27x00_access_methods *bus;
int num;
int retval = 0;
@@ -368,38 +627,20 @@ static int bq27x00_battery_probe(struct i2c_client *client,
retval = -ENOMEM;
goto batt_failed_2;
}
+
di->id = num;
+ di->dev = &client->dev;
di->chip = id->driver_data;
+ di->bat.name = name;
+ di->bus.read = &bq27x00_read_i2c;
- bus = kzalloc(sizeof(*bus), GFP_KERNEL);
- if (!bus) {
- dev_err(&client->dev, "failed to allocate access method "
- "data\n");
- retval = -ENOMEM;
+ if (bq27x00_powersupply_init(di))
goto batt_failed_3;
- }
i2c_set_clientdata(client, di);
- di->dev = &client->dev;
- di->bat.name = name;
- bus->read = &bq27x00_read_i2c;
- di->bus = bus;
- di->client = client;
-
- bq27x00_powersupply_init(di);
-
- retval = power_supply_register(&client->dev, &di->bat);
- if (retval) {
- dev_err(&client->dev, "failed to register battery\n");
- goto batt_failed_4;
- }
-
- dev_info(&client->dev, "support ver. %s enabled\n", DRIVER_VERSION);
return 0;
-batt_failed_4:
- kfree(bus);
batt_failed_3:
kfree(di);
batt_failed_2:
@@ -416,9 +657,8 @@ static int bq27x00_battery_remove(struct i2c_client *client)
{
struct bq27x00_device_info *di = i2c_get_clientdata(client);
- power_supply_unregister(&di->bat);
+ bq27x00_powersupply_unregister(di);
- kfree(di->bus);
kfree(di->bat.name);
mutex_lock(&battery_mutex);
@@ -430,15 +670,12 @@ static int bq27x00_battery_remove(struct i2c_client *client)
return 0;
}
-/*
- * Module stuff
- */
-
static const struct i2c_device_id bq27x00_id[] = {
{ "bq27200", BQ27000 }, /* bq27200 is same as bq27000, but with i2c */
{ "bq27500", BQ27500 },
{},
};
+MODULE_DEVICE_TABLE(i2c, bq27x00_id);
static struct i2c_driver bq27x00_battery_driver = {
.driver = {
@@ -449,13 +686,164 @@ static struct i2c_driver bq27x00_battery_driver = {
.id_table = bq27x00_id,
};
+static inline int bq27x00_battery_i2c_init(void)
+{
+ int ret = i2c_add_driver(&bq27x00_battery_driver);
+ if (ret)
+ printk(KERN_ERR "Unable to register BQ27x00 i2c driver\n");
+
+ return ret;
+}
+
+static inline void bq27x00_battery_i2c_exit(void)
+{
+ i2c_del_driver(&bq27x00_battery_driver);
+}
+
+#else
+
+static inline int bq27x00_battery_i2c_init(void) { return 0; }
+static inline void bq27x00_battery_i2c_exit(void) {};
+
+#endif
+
+/* platform specific code */
+#ifdef CONFIG_BATTERY_BQ27X00_PLATFORM
+
+static int bq27000_read_platform(struct bq27x00_device_info *di, u8 reg,
+ bool single)
+{
+ struct device *dev = di->dev;
+ struct bq27000_platform_data *pdata = dev->platform_data;
+ unsigned int timeout = 3;
+ int upper, lower;
+ int temp;
+
+ if (!single) {
+ /* Make sure the value has not changed in between reading the
+ * lower and the upper part */
+ upper = pdata->read(dev, reg + 1);
+ do {
+ temp = upper;
+ if (upper < 0)
+ return upper;
+
+ lower = pdata->read(dev, reg);
+ if (lower < 0)
+ return lower;
+
+ upper = pdata->read(dev, reg + 1);
+ } while (temp != upper && --timeout);
+
+ if (timeout == 0)
+ return -EIO;
+
+ return (upper << 8) | lower;
+ }
+
+ return pdata->read(dev, reg);
+}
+
+static int __devinit bq27000_battery_probe(struct platform_device *pdev)
+{
+ struct bq27x00_device_info *di;
+ struct bq27000_platform_data *pdata = pdev->dev.platform_data;
+ int ret;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform_data supplied\n");
+ return -EINVAL;
+ }
+
+ if (!pdata->read) {
+ dev_err(&pdev->dev, "no hdq read callback supplied\n");
+ return -EINVAL;
+ }
+
+ di = kzalloc(sizeof(*di), GFP_KERNEL);
+ if (!di) {
+ dev_err(&pdev->dev, "failed to allocate device info data\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, di);
+
+ di->dev = &pdev->dev;
+ di->chip = BQ27000;
+
+ di->bat.name = pdata->name ?: dev_name(&pdev->dev);
+ di->bus.read = &bq27000_read_platform;
+
+ ret = bq27x00_powersupply_init(di);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+
+ return ret;
+}
+
+static int __devexit bq27000_battery_remove(struct platform_device *pdev)
+{
+ struct bq27x00_device_info *di = platform_get_drvdata(pdev);
+
+ bq27x00_powersupply_unregister(di);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+
+ return 0;
+}
+
+static struct platform_driver bq27000_battery_driver = {
+ .probe = bq27000_battery_probe,
+ .remove = __devexit_p(bq27000_battery_remove),
+ .driver = {
+ .name = "bq27000-battery",
+ .owner = THIS_MODULE,
+ },
+};
+
+static inline int bq27x00_battery_platform_init(void)
+{
+ int ret = platform_driver_register(&bq27000_battery_driver);
+ if (ret)
+ printk(KERN_ERR "Unable to register BQ27000 platform driver\n");
+
+ return ret;
+}
+
+static inline void bq27x00_battery_platform_exit(void)
+{
+ platform_driver_unregister(&bq27000_battery_driver);
+}
+
+#else
+
+static inline int bq27x00_battery_platform_init(void) { return 0; }
+static inline void bq27x00_battery_platform_exit(void) {};
+
+#endif
+
+/*
+ * Module stuff
+ */
+
static int __init bq27x00_battery_init(void)
{
int ret;
- ret = i2c_add_driver(&bq27x00_battery_driver);
+ ret = bq27x00_battery_i2c_init();
if (ret)
- printk(KERN_ERR "Unable to register BQ27x00 driver\n");
+ return ret;
+
+ ret = bq27x00_battery_platform_init();
+ if (ret)
+ bq27x00_battery_i2c_exit();
return ret;
}
@@ -463,7 +851,8 @@ module_init(bq27x00_battery_init);
static void __exit bq27x00_battery_exit(void)
{
- i2c_del_driver(&bq27x00_battery_driver);
+ bq27x00_battery_platform_exit();
+ bq27x00_battery_i2c_exit();
}
module_exit(bq27x00_battery_exit);
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index 6957e8af6449..4d2dc4fa2888 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -393,6 +393,7 @@ static const struct i2c_device_id ds278x_id[] = {
{"ds2786", DS2786},
{},
};
+MODULE_DEVICE_TABLE(i2c, ds278x_id);
static struct i2c_driver ds278x_battery_driver = {
.driver = {
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 970f7335d3a7..329b46b2327d 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -171,6 +171,8 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
dev_set_drvdata(dev, psy);
psy->dev = dev;
+ INIT_WORK(&psy->changed_work, power_supply_changed_work);
+
rc = kobject_set_name(&dev->kobj, "%s", psy->name);
if (rc)
goto kobject_set_name_failed;
@@ -179,8 +181,6 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
if (rc)
goto device_add_failed;
- INIT_WORK(&psy->changed_work, power_supply_changed_work);
-
rc = power_supply_create_triggers(psy);
if (rc)
goto create_triggers_failed;
diff --git a/drivers/power/power_supply_leds.c b/drivers/power/power_supply_leds.c
index 031a554837f7..da25eb94e5c6 100644
--- a/drivers/power/power_supply_leds.c
+++ b/drivers/power/power_supply_leds.c
@@ -21,6 +21,8 @@
static void power_supply_update_bat_leds(struct power_supply *psy)
{
union power_supply_propval status;
+ unsigned long delay_on = 0;
+ unsigned long delay_off = 0;
if (psy->get_property(psy, POWER_SUPPLY_PROP_STATUS, &status))
return;
@@ -32,16 +34,22 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
led_trigger_event(psy->charging_full_trig, LED_FULL);
led_trigger_event(psy->charging_trig, LED_OFF);
led_trigger_event(psy->full_trig, LED_FULL);
+ led_trigger_event(psy->charging_blink_full_solid_trig,
+ LED_FULL);
break;
case POWER_SUPPLY_STATUS_CHARGING:
led_trigger_event(psy->charging_full_trig, LED_FULL);
led_trigger_event(psy->charging_trig, LED_FULL);
led_trigger_event(psy->full_trig, LED_OFF);
+ led_trigger_blink(psy->charging_blink_full_solid_trig,
+ &delay_on, &delay_off);
break;
default:
led_trigger_event(psy->charging_full_trig, LED_OFF);
led_trigger_event(psy->charging_trig, LED_OFF);
led_trigger_event(psy->full_trig, LED_OFF);
+ led_trigger_event(psy->charging_blink_full_solid_trig,
+ LED_OFF);
break;
}
}
@@ -64,15 +72,24 @@ static int power_supply_create_bat_triggers(struct power_supply *psy)
if (!psy->full_trig_name)
goto full_failed;
+ psy->charging_blink_full_solid_trig_name = kasprintf(GFP_KERNEL,
+ "%s-charging-blink-full-solid", psy->name);
+ if (!psy->charging_blink_full_solid_trig_name)
+ goto charging_blink_full_solid_failed;
+
led_trigger_register_simple(psy->charging_full_trig_name,
&psy->charging_full_trig);
led_trigger_register_simple(psy->charging_trig_name,
&psy->charging_trig);
led_trigger_register_simple(psy->full_trig_name,
&psy->full_trig);
+ led_trigger_register_simple(psy->charging_blink_full_solid_trig_name,
+ &psy->charging_blink_full_solid_trig);
goto success;
+charging_blink_full_solid_failed:
+ kfree(psy->full_trig_name);
full_failed:
kfree(psy->charging_trig_name);
charging_failed:
@@ -88,6 +105,8 @@ static void power_supply_remove_bat_triggers(struct power_supply *psy)
led_trigger_unregister_simple(psy->charging_full_trig);
led_trigger_unregister_simple(psy->charging_trig);
led_trigger_unregister_simple(psy->full_trig);
+ led_trigger_unregister_simple(psy->charging_blink_full_solid_trig);
+ kfree(psy->charging_blink_full_solid_trig_name);
kfree(psy->full_trig_name);
kfree(psy->charging_trig_name);
kfree(psy->charging_full_trig_name);
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index cd1f90754a3a..605514afc29f 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -270,7 +270,7 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
attr = &power_supply_attrs[psy->properties[j]];
ret = power_supply_show_property(dev, attr, prop_buf);
- if (ret == -ENODEV) {
+ if (ret == -ENODEV || ret == -ENODATA) {
/* When a battery is absent, we expect -ENODEV. Don't abort;
send the uevent with at least the the PRESENT=0 property */
ret = 0;
diff --git a/drivers/power/s3c_adc_battery.c b/drivers/power/s3c_adc_battery.c
index 4255f2358b13..d36c289aaef5 100644
--- a/drivers/power/s3c_adc_battery.c
+++ b/drivers/power/s3c_adc_battery.c
@@ -406,8 +406,8 @@ static int s3c_adc_bat_resume(struct platform_device *pdev)
return 0;
}
#else
-#define s3c_adc_battery_suspend NULL
-#define s3c_adc_battery_resume NULL
+#define s3c_adc_bat_suspend NULL
+#define s3c_adc_bat_resume NULL
#endif
static struct platform_driver s3c_adc_bat_driver = {
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
index ff1f42398a2e..92c16e1677bd 100644
--- a/drivers/power/twl4030_charger.c
+++ b/drivers/power/twl4030_charger.c
@@ -71,8 +71,11 @@ struct twl4030_bci {
struct power_supply usb;
struct otg_transceiver *transceiver;
struct notifier_block otg_nb;
+ struct work_struct work;
int irq_chg;
int irq_bci;
+
+ unsigned long event;
};
/*
@@ -258,14 +261,11 @@ static irqreturn_t twl4030_bci_interrupt(int irq, void *arg)
return IRQ_HANDLED;
}
-static int twl4030_bci_usb_ncb(struct notifier_block *nb, unsigned long val,
- void *priv)
+static void twl4030_bci_usb_work(struct work_struct *data)
{
- struct twl4030_bci *bci = container_of(nb, struct twl4030_bci, otg_nb);
+ struct twl4030_bci *bci = container_of(data, struct twl4030_bci, work);
- dev_dbg(bci->dev, "OTG notify %lu\n", val);
-
- switch (val) {
+ switch (bci->event) {
case USB_EVENT_VBUS:
case USB_EVENT_CHARGER:
twl4030_charger_enable_usb(bci, true);
@@ -274,6 +274,17 @@ static int twl4030_bci_usb_ncb(struct notifier_block *nb, unsigned long val,
twl4030_charger_enable_usb(bci, false);
break;
}
+}
+
+static int twl4030_bci_usb_ncb(struct notifier_block *nb, unsigned long val,
+ void *priv)
+{
+ struct twl4030_bci *bci = container_of(nb, struct twl4030_bci, otg_nb);
+
+ dev_dbg(bci->dev, "OTG notify %lu\n", val);
+
+ bci->event = val;
+ schedule_work(&bci->work);
return NOTIFY_OK;
}
@@ -466,6 +477,8 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
goto fail_bci_irq;
}
+ INIT_WORK(&bci->work, twl4030_bci_usb_work);
+
bci->transceiver = otg_get_transceiver();
if (bci->transceiver != NULL) {
bci->otg_nb.notifier_call = twl4030_bci_usb_ncb;
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c
index e5ed52d71937..2a9ab89f83b8 100644
--- a/drivers/power/z2_battery.c
+++ b/drivers/power/z2_battery.c
@@ -134,6 +134,8 @@ static int z2_batt_ps_init(struct z2_charger *charger, int props)
enum power_supply_property *prop;
struct z2_battery_info *info = charger->info;
+ if (info->charge_gpio >= 0)
+ props++; /* POWER_SUPPLY_PROP_STATUS */
if (info->batt_tech >= 0)
props++; /* POWER_SUPPLY_PROP_TECHNOLOGY */
if (info->batt_I2C_reg >= 0)
@@ -293,6 +295,7 @@ static const struct i2c_device_id z2_batt_id[] = {
{ "aer915", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, z2_batt_id);
static struct i2c_driver z2_batt_driver = {
.driver = {
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index de75f67f4cc3..b9f29e0d4295 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -126,7 +126,7 @@ config REGULATOR_MAX8998
and S5PC1XX chips to control VCC_CORE and VCC_USIM voltages.
config REGULATOR_TWL4030
- bool "TI TWL4030/TWL5030/TWL6030/TPS695x0 PMIC"
+ bool "TI TWL4030/TWL5030/TWL6030/TPS659x0 PMIC"
depends on TWL4030_CORE
help
This driver supports the voltage regulators provided by
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 2dec589a8908..b1d77946e9c6 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -206,29 +206,6 @@ static int ab3100_enable_regulator(struct regulator_dev *reg)
return err;
}
- /* Per-regulator power on delay from spec */
- switch (abreg->regreg) {
- case AB3100_LDO_A: /* Fallthrough */
- case AB3100_LDO_C: /* Fallthrough */
- case AB3100_LDO_D: /* Fallthrough */
- case AB3100_LDO_E: /* Fallthrough */
- case AB3100_LDO_H: /* Fallthrough */
- case AB3100_LDO_K:
- udelay(200);
- break;
- case AB3100_LDO_F:
- udelay(600);
- break;
- case AB3100_LDO_G:
- udelay(400);
- break;
- case AB3100_BUCK:
- mdelay(1);
- break;
- default:
- break;
- }
-
return 0;
}
@@ -450,11 +427,37 @@ static int ab3100_get_voltage_regulator_external(struct regulator_dev *reg)
return abreg->plfdata->external_voltage;
}
+static int ab3100_enable_time_regulator(struct regulator_dev *reg)
+{
+ struct ab3100_regulator *abreg = reg->reg_data;
+
+ /* Per-regulator power on delay from spec */
+ switch (abreg->regreg) {
+ case AB3100_LDO_A: /* Fallthrough */
+ case AB3100_LDO_C: /* Fallthrough */
+ case AB3100_LDO_D: /* Fallthrough */
+ case AB3100_LDO_E: /* Fallthrough */
+ case AB3100_LDO_H: /* Fallthrough */
+ case AB3100_LDO_K:
+ return 200;
+ case AB3100_LDO_F:
+ return 600;
+ case AB3100_LDO_G:
+ return 400;
+ case AB3100_BUCK:
+ return 1000;
+ default:
+ break;
+ }
+ return 0;
+}
+
static struct regulator_ops regulator_ops_fixed = {
.enable = ab3100_enable_regulator,
.disable = ab3100_disable_regulator,
.is_enabled = ab3100_is_enabled_regulator,
.get_voltage = ab3100_get_voltage_regulator,
+ .enable_time = ab3100_enable_time_regulator,
};
static struct regulator_ops regulator_ops_variable = {
@@ -464,6 +467,7 @@ static struct regulator_ops regulator_ops_variable = {
.get_voltage = ab3100_get_voltage_regulator,
.set_voltage = ab3100_set_voltage_regulator,
.list_voltage = ab3100_list_voltage_regulator,
+ .enable_time = ab3100_enable_time_regulator,
};
static struct regulator_ops regulator_ops_variable_sleepable = {
@@ -474,6 +478,7 @@ static struct regulator_ops regulator_ops_variable_sleepable = {
.set_voltage = ab3100_set_voltage_regulator,
.set_suspend_voltage = ab3100_set_suspend_voltage_regulator,
.list_voltage = ab3100_list_voltage_regulator,
+ .enable_time = ab3100_enable_time_regulator,
};
/*
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index d9a052c53aec..02f3c2333c83 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -9,7 +9,7 @@
* AB8500 peripheral regulators
*
* AB8500 supports the following regulators:
- * VAUX1/2/3, VINTCORE, VTVOUT, VAUDIO, VAMIC1/2, VDMIC, VANA
+ * VAUX1/2/3, VINTCORE, VTVOUT, VUSB, VAUDIO, VAMIC1/2, VDMIC, VANA
*/
#include <linux/init.h>
#include <linux/kernel.h>
@@ -38,6 +38,7 @@
* @voltage_mask: mask to control regulator voltage
* @voltages: supported voltage table
* @voltages_len: number of supported voltages for the regulator
+ * @delay: startup/set voltage delay in us
*/
struct ab8500_regulator_info {
struct device *dev;
@@ -55,6 +56,7 @@ struct ab8500_regulator_info {
u8 voltage_mask;
int const *voltages;
int voltages_len;
+ unsigned int delay;
};
/* voltage tables for the vauxn/vintcore supplies */
@@ -290,6 +292,29 @@ static int ab8500_regulator_set_voltage(struct regulator_dev *rdev,
return ret;
}
+static int ab8500_regulator_enable_time(struct regulator_dev *rdev)
+{
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ return info->delay;
+}
+
+static int ab8500_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
+ unsigned int old_sel,
+ unsigned int new_sel)
+{
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+
+ /* If the regulator isn't on, it won't take time here */
+ ret = ab8500_regulator_is_enabled(rdev);
+ if (ret < 0)
+ return ret;
+ if (!ret)
+ return 0;
+ return info->delay;
+}
+
static struct regulator_ops ab8500_regulator_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
@@ -297,6 +322,8 @@ static struct regulator_ops ab8500_regulator_ops = {
.get_voltage = ab8500_regulator_get_voltage,
.set_voltage = ab8500_regulator_set_voltage,
.list_voltage = ab8500_list_voltage,
+ .enable_time = ab8500_regulator_enable_time,
+ .set_voltage_time_sel = ab8500_regulator_set_voltage_time_sel,
};
static int ab8500_fixed_get_voltage(struct regulator_dev *rdev)
@@ -317,6 +344,8 @@ static struct regulator_ops ab8500_regulator_fixed_ops = {
.is_enabled = ab8500_regulator_is_enabled,
.get_voltage = ab8500_fixed_get_voltage,
.list_voltage = ab8500_list_voltage,
+ .enable_time = ab8500_regulator_enable_time,
+ .set_voltage_time_sel = ab8500_regulator_set_voltage_time_sel,
};
static struct ab8500_regulator_info
@@ -426,12 +455,28 @@ static struct ab8500_regulator_info
.owner = THIS_MODULE,
.n_voltages = 1,
},
+ .delay = 10000,
.fixed_uV = 2000000,
.update_bank = 0x03,
.update_reg = 0x80,
.update_mask = 0x82,
.update_val_enable = 0x02,
},
+ [AB8500_LDO_USB] = {
+ .desc = {
+ .name = "LDO-USB",
+ .ops = &ab8500_regulator_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_USB,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 3300000,
+ .update_bank = 0x03,
+ .update_reg = 0x82,
+ .update_mask = 0x03,
+ .update_val_enable = 0x01,
+ },
[AB8500_LDO_AUDIO] = {
.desc = {
.name = "LDO-AUDIO",
@@ -511,6 +556,186 @@ static struct ab8500_regulator_info
};
+struct ab8500_reg_init {
+ u8 bank;
+ u8 addr;
+ u8 mask;
+};
+
+#define REG_INIT(_id, _bank, _addr, _mask) \
+ [_id] = { \
+ .bank = _bank, \
+ .addr = _addr, \
+ .mask = _mask, \
+ }
+
+static struct ab8500_reg_init ab8500_reg_init[] = {
+ /*
+ * 0x30, VanaRequestCtrl
+ * 0x0C, VpllRequestCtrl
+ * 0xc0, VextSupply1RequestCtrl
+ */
+ REG_INIT(AB8500_REGUREQUESTCTRL2, 0x03, 0x04, 0xfc),
+ /*
+ * 0x03, VextSupply2RequestCtrl
+ * 0x0c, VextSupply3RequestCtrl
+ * 0x30, Vaux1RequestCtrl
+ * 0xc0, Vaux2RequestCtrl
+ */
+ REG_INIT(AB8500_REGUREQUESTCTRL3, 0x03, 0x05, 0xff),
+ /*
+ * 0x03, Vaux3RequestCtrl
+ * 0x04, SwHPReq
+ */
+ REG_INIT(AB8500_REGUREQUESTCTRL4, 0x03, 0x06, 0x07),
+ /*
+ * 0x08, VanaSysClkReq1HPValid
+ * 0x20, Vaux1SysClkReq1HPValid
+ * 0x40, Vaux2SysClkReq1HPValid
+ * 0x80, Vaux3SysClkReq1HPValid
+ */
+ REG_INIT(AB8500_REGUSYSCLKREQ1HPVALID1, 0x03, 0x07, 0xe8),
+ /*
+ * 0x10, VextSupply1SysClkReq1HPValid
+ * 0x20, VextSupply2SysClkReq1HPValid
+ * 0x40, VextSupply3SysClkReq1HPValid
+ */
+ REG_INIT(AB8500_REGUSYSCLKREQ1HPVALID2, 0x03, 0x08, 0x70),
+ /*
+ * 0x08, VanaHwHPReq1Valid
+ * 0x20, Vaux1HwHPReq1Valid
+ * 0x40, Vaux2HwHPReq1Valid
+ * 0x80, Vaux3HwHPReq1Valid
+ */
+ REG_INIT(AB8500_REGUHWHPREQ1VALID1, 0x03, 0x09, 0xe8),
+ /*
+ * 0x01, VextSupply1HwHPReq1Valid
+ * 0x02, VextSupply2HwHPReq1Valid
+ * 0x04, VextSupply3HwHPReq1Valid
+ */
+ REG_INIT(AB8500_REGUHWHPREQ1VALID2, 0x03, 0x0a, 0x07),
+ /*
+ * 0x08, VanaHwHPReq2Valid
+ * 0x20, Vaux1HwHPReq2Valid
+ * 0x40, Vaux2HwHPReq2Valid
+ * 0x80, Vaux3HwHPReq2Valid
+ */
+ REG_INIT(AB8500_REGUHWHPREQ2VALID1, 0x03, 0x0b, 0xe8),
+ /*
+ * 0x01, VextSupply1HwHPReq2Valid
+ * 0x02, VextSupply2HwHPReq2Valid
+ * 0x04, VextSupply3HwHPReq2Valid
+ */
+ REG_INIT(AB8500_REGUHWHPREQ2VALID2, 0x03, 0x0c, 0x07),
+ /*
+ * 0x20, VanaSwHPReqValid
+ * 0x80, Vaux1SwHPReqValid
+ */
+ REG_INIT(AB8500_REGUSWHPREQVALID1, 0x03, 0x0d, 0xa0),
+ /*
+ * 0x01, Vaux2SwHPReqValid
+ * 0x02, Vaux3SwHPReqValid
+ * 0x04, VextSupply1SwHPReqValid
+ * 0x08, VextSupply2SwHPReqValid
+ * 0x10, VextSupply3SwHPReqValid
+ */
+ REG_INIT(AB8500_REGUSWHPREQVALID2, 0x03, 0x0e, 0x1f),
+ /*
+ * 0x02, SysClkReq2Valid1
+ * ...
+ * 0x80, SysClkReq8Valid1
+ */
+ REG_INIT(AB8500_REGUSYSCLKREQVALID1, 0x03, 0x0f, 0xfe),
+ /*
+ * 0x02, SysClkReq2Valid2
+ * ...
+ * 0x80, SysClkReq8Valid2
+ */
+ REG_INIT(AB8500_REGUSYSCLKREQVALID2, 0x03, 0x10, 0xfe),
+ /*
+ * 0x02, VTVoutEna
+ * 0x04, Vintcore12Ena
+ * 0x38, Vintcore12Sel
+ * 0x40, Vintcore12LP
+ * 0x80, VTVoutLP
+ */
+ REG_INIT(AB8500_REGUMISC1, 0x03, 0x80, 0xfe),
+ /*
+ * 0x02, VaudioEna
+ * 0x04, VdmicEna
+ * 0x08, Vamic1Ena
+ * 0x10, Vamic2Ena
+ */
+ REG_INIT(AB8500_VAUDIOSUPPLY, 0x03, 0x83, 0x1e),
+ /*
+ * 0x01, Vamic1_dzout
+ * 0x02, Vamic2_dzout
+ */
+ REG_INIT(AB8500_REGUCTRL1VAMIC, 0x03, 0x84, 0x03),
+ /*
+ * 0x0c, VanaRegu
+ * 0x03, VpllRegu
+ */
+ REG_INIT(AB8500_VPLLVANAREGU, 0x04, 0x06, 0x0f),
+ /*
+ * 0x01, VrefDDREna
+ * 0x02, VrefDDRSleepMode
+ */
+ REG_INIT(AB8500_VREFDDR, 0x04, 0x07, 0x03),
+ /*
+ * 0x03, VextSupply1Regu
+ * 0x0c, VextSupply2Regu
+ * 0x30, VextSupply3Regu
+ * 0x40, ExtSupply2Bypass
+ * 0x80, ExtSupply3Bypass
+ */
+ REG_INIT(AB8500_EXTSUPPLYREGU, 0x04, 0x08, 0xff),
+ /*
+ * 0x03, Vaux1Regu
+ * 0x0c, Vaux2Regu
+ */
+ REG_INIT(AB8500_VAUX12REGU, 0x04, 0x09, 0x0f),
+ /*
+ * 0x03, Vaux3Regu
+ */
+ REG_INIT(AB8500_VRF1VAUX3REGU, 0x04, 0x0a, 0x03),
+ /*
+ * 0x3f, Vsmps1Sel1
+ */
+ REG_INIT(AB8500_VSMPS1SEL1, 0x04, 0x13, 0x3f),
+ /*
+ * 0x0f, Vaux1Sel
+ */
+ REG_INIT(AB8500_VAUX1SEL, 0x04, 0x1f, 0x0f),
+ /*
+ * 0x0f, Vaux2Sel
+ */
+ REG_INIT(AB8500_VAUX2SEL, 0x04, 0x20, 0x0f),
+ /*
+ * 0x07, Vaux3Sel
+ */
+ REG_INIT(AB8500_VRF1VAUX3SEL, 0x04, 0x21, 0x07),
+ /*
+ * 0x01, VextSupply12LP
+ */
+ REG_INIT(AB8500_REGUCTRL2SPARE, 0x04, 0x22, 0x01),
+ /*
+ * 0x04, Vaux1Disch
+ * 0x08, Vaux2Disch
+ * 0x10, Vaux3Disch
+ * 0x20, Vintcore12Disch
+ * 0x40, VTVoutDisch
+ * 0x80, VaudioDisch
+ */
+ REG_INIT(AB8500_REGUCTRLDISCH, 0x04, 0x43, 0xfc),
+ /*
+ * 0x02, VanaDisch
+ * 0x04, VdmicPullDownEna
+ * 0x10, VdmicDisch
+ */
+ REG_INIT(AB8500_REGUCTRLDISCH2, 0x04, 0x44, 0x16),
+};
+
static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
@@ -529,10 +754,51 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
/* make sure the platform data has the correct size */
if (pdata->num_regulator != ARRAY_SIZE(ab8500_regulator_info)) {
- dev_err(&pdev->dev, "platform configuration error\n");
+ dev_err(&pdev->dev, "Configuration error: size mismatch.\n");
return -EINVAL;
}
+ /* initialize registers */
+ for (i = 0; i < pdata->num_regulator_reg_init; i++) {
+ int id;
+ u8 value;
+
+ id = pdata->regulator_reg_init[i].id;
+ value = pdata->regulator_reg_init[i].value;
+
+ /* check for configuration errors */
+ if (id >= AB8500_NUM_REGULATOR_REGISTERS) {
+ dev_err(&pdev->dev,
+ "Configuration error: id outside range.\n");
+ return -EINVAL;
+ }
+ if (value & ~ab8500_reg_init[id].mask) {
+ dev_err(&pdev->dev,
+ "Configuration error: value outside mask.\n");
+ return -EINVAL;
+ }
+
+ /* initialize register */
+ err = abx500_mask_and_set_register_interruptible(&pdev->dev,
+ ab8500_reg_init[id].bank,
+ ab8500_reg_init[id].addr,
+ ab8500_reg_init[id].mask,
+ value);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "Failed to initialize 0x%02x, 0x%02x.\n",
+ ab8500_reg_init[id].bank,
+ ab8500_reg_init[id].addr);
+ return err;
+ }
+ dev_vdbg(&pdev->dev,
+ " init: 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
+ ab8500_reg_init[id].bank,
+ ab8500_reg_init[id].addr,
+ ab8500_reg_init[id].mask,
+ value);
+ }
+
/* register all regulators */
for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
struct ab8500_regulator_info *info = NULL;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 9fa20957847d..3ffc6979d164 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1629,6 +1629,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV)
{
int ret;
+ int delay = 0;
unsigned int selector;
trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV);
@@ -1662,6 +1663,22 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
}
}
+ /*
+ * If we can't obtain the old selector there is not enough
+ * info to call set_voltage_time_sel().
+ */
+ if (rdev->desc->ops->set_voltage_time_sel &&
+ rdev->desc->ops->get_voltage_sel) {
+ unsigned int old_selector = 0;
+
+ ret = rdev->desc->ops->get_voltage_sel(rdev);
+ if (ret < 0)
+ return ret;
+ old_selector = ret;
+ delay = rdev->desc->ops->set_voltage_time_sel(rdev,
+ old_selector, selector);
+ }
+
if (best_val != INT_MAX) {
ret = rdev->desc->ops->set_voltage_sel(rdev, selector);
selector = best_val;
@@ -1672,6 +1689,14 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
ret = -EINVAL;
}
+ /* Insert any necessary delays */
+ if (delay >= 1000) {
+ mdelay(delay / 1000);
+ udelay(delay % 1000);
+ } else if (delay) {
+ udelay(delay);
+ }
+
if (ret == 0)
_notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE,
NULL);
@@ -1740,6 +1765,51 @@ out:
EXPORT_SYMBOL_GPL(regulator_set_voltage);
/**
+ * regulator_set_voltage_time - get raise/fall time
+ * @regulator: regulator source
+ * @old_uV: starting voltage in microvolts
+ * @new_uV: target voltage in microvolts
+ *
+ * Provided with the starting and ending voltage, this function attempts to
+ * calculate the time in microseconds required to rise or fall to this new
+ * voltage.
+ */
+int regulator_set_voltage_time(struct regulator *regulator,
+ int old_uV, int new_uV)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+ struct regulator_ops *ops = rdev->desc->ops;
+ int old_sel = -1;
+ int new_sel = -1;
+ int voltage;
+ int i;
+
+ /* Currently requires operations to do this */
+ if (!ops->list_voltage || !ops->set_voltage_time_sel
+ || !rdev->desc->n_voltages)
+ return -EINVAL;
+
+ for (i = 0; i < rdev->desc->n_voltages; i++) {
+ /* We only look for exact voltage matches here */
+ voltage = regulator_list_voltage(regulator, i);
+ if (voltage < 0)
+ return -EINVAL;
+ if (voltage == 0)
+ continue;
+ if (voltage == old_uV)
+ old_sel = i;
+ if (voltage == new_uV)
+ new_sel = i;
+ }
+
+ if (old_sel < 0 || new_sel < 0)
+ return -EINVAL;
+
+ return ops->set_voltage_time_sel(rdev, old_sel, new_sel);
+}
+EXPORT_SYMBOL_GPL(regulator_set_voltage_time);
+
+/**
* regulator_sync_voltage - re-apply last regulator output voltage
* @regulator: regulator source
*
@@ -2565,8 +2635,11 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
init_data->consumer_supplies[i].dev,
init_data->consumer_supplies[i].dev_name,
init_data->consumer_supplies[i].supply);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(dev, "Failed to set supply %s\n",
+ init_data->consumer_supplies[i].supply);
goto unset_supplies;
+ }
}
list_add(&rdev->list, &regulator_list);
@@ -2653,6 +2726,47 @@ out:
EXPORT_SYMBOL_GPL(regulator_suspend_prepare);
/**
+ * regulator_suspend_finish - resume regulators from system wide suspend
+ *
+ * Turn on regulators that might be turned off by regulator_suspend_prepare
+ * and that should be turned on according to the regulators properties.
+ */
+int regulator_suspend_finish(void)
+{
+ struct regulator_dev *rdev;
+ int ret = 0, error;
+
+ mutex_lock(&regulator_list_mutex);
+ list_for_each_entry(rdev, &regulator_list, list) {
+ struct regulator_ops *ops = rdev->desc->ops;
+
+ mutex_lock(&rdev->mutex);
+ if ((rdev->use_count > 0 || rdev->constraints->always_on) &&
+ ops->enable) {
+ error = ops->enable(rdev);
+ if (error)
+ ret = error;
+ } else {
+ if (!has_full_constraints)
+ goto unlock;
+ if (!ops->disable)
+ goto unlock;
+ if (ops->is_enabled && !ops->is_enabled(rdev))
+ goto unlock;
+
+ error = ops->disable(rdev);
+ if (error)
+ ret = error;
+ }
+unlock:
+ mutex_unlock(&rdev->mutex);
+ }
+ mutex_unlock(&regulator_list_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_suspend_finish);
+
+/**
* regulator_has_full_constraints - the system has fully specified constraints
*
* Calling this function will cause the regulator API to disable all
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index 01ef7e9903bb..77e0cfb30b23 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -1185,6 +1185,7 @@ static const struct platform_device_id max8997_pmic_id[] = {
{ "max8997-pmic", 0},
{ },
};
+MODULE_DEVICE_TABLE(platform, max8997_pmic_id);
static struct platform_driver max8997_pmic_driver = {
.driver = {
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index 0ec49ca527a8..43410266f993 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -887,6 +887,7 @@ static const struct platform_device_id max8998_pmic_id[] = {
{ "lp3974-pmic", TYPE_LP3974 },
{ }
};
+MODULE_DEVICE_TABLE(platform, max8998_pmic_id);
static struct platform_driver max8998_pmic_driver = {
.driver = {
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
index 176a6be5a8ce..9166aa0a9df7 100644
--- a/drivers/regulator/tps6524x-regulator.c
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -596,7 +596,7 @@ static struct regulator_ops regulator_ops = {
.get_current_limit = get_current_limit,
};
-static int __devexit pmic_remove(struct spi_device *spi)
+static int pmic_remove(struct spi_device *spi)
{
struct tps6524x *hw = spi_get_drvdata(spi);
int i;
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 06df898842c0..e93453b1b978 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -565,9 +565,8 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
}
irq = platform_get_irq_byname(pdev, "UV");
- ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_uv_irq,
- IRQF_TRIGGER_RISING, dcdc->name,
- dcdc);
+ ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
+ IRQF_TRIGGER_RISING, dcdc->name, dcdc);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n",
irq, ret);
@@ -575,9 +574,8 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
}
irq = platform_get_irq_byname(pdev, "HC");
- ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_oc_irq,
- IRQF_TRIGGER_RISING, dcdc->name,
- dcdc);
+ ret = request_threaded_irq(irq, NULL, wm831x_dcdc_oc_irq,
+ IRQF_TRIGGER_RISING, dcdc->name, dcdc);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request HC IRQ %d: %d\n",
irq, ret);
@@ -589,7 +587,7 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
return 0;
err_uv:
- wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc);
+ free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
err_regulator:
regulator_unregister(dcdc->regulator);
err:
@@ -606,8 +604,8 @@ static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "HC"), dcdc);
- wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc);
+ free_irq(platform_get_irq_byname(pdev, "HC"), dcdc);
+ free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
regulator_unregister(dcdc->regulator);
if (dcdc->dvs_gpio)
gpio_free(dcdc->dvs_gpio);
@@ -756,9 +754,8 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
}
irq = platform_get_irq_byname(pdev, "UV");
- ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_uv_irq,
- IRQF_TRIGGER_RISING, dcdc->name,
- dcdc);
+ ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
+ IRQF_TRIGGER_RISING, dcdc->name, dcdc);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n",
irq, ret);
@@ -783,7 +780,7 @@ static __devexit int wm831x_buckp_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc);
+ free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
regulator_unregister(dcdc->regulator);
kfree(dcdc);
@@ -885,9 +882,9 @@ static __devinit int wm831x_boostp_probe(struct platform_device *pdev)
}
irq = platform_get_irq_byname(pdev, "UV");
- ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_uv_irq,
- IRQF_TRIGGER_RISING, dcdc->name,
- dcdc);
+ ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
+ IRQF_TRIGGER_RISING, dcdc->name,
+ dcdc);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n",
irq, ret);
@@ -908,11 +905,10 @@ err:
static __devexit int wm831x_boostp_remove(struct platform_device *pdev)
{
struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
- struct wm831x *wm831x = dcdc->wm831x;
platform_set_drvdata(pdev, NULL);
- wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc);
+ free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
regulator_unregister(dcdc->regulator);
kfree(dcdc);
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index 6c446cd6ad54..01f27c7f4236 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -198,9 +198,8 @@ static __devinit int wm831x_isink_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- ret = wm831x_request_irq(wm831x, irq, wm831x_isink_irq,
- IRQF_TRIGGER_RISING, isink->name,
- isink);
+ ret = request_threaded_irq(irq, NULL, wm831x_isink_irq,
+ IRQF_TRIGGER_RISING, isink->name, isink);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request ISINK IRQ %d: %d\n",
irq, ret);
@@ -221,11 +220,10 @@ err:
static __devexit int wm831x_isink_remove(struct platform_device *pdev)
{
struct wm831x_isink *isink = platform_get_drvdata(pdev);
- struct wm831x *wm831x = isink->wm831x;
platform_set_drvdata(pdev, NULL);
- wm831x_free_irq(wm831x, platform_get_irq(pdev, 0), isink);
+ free_irq(platform_get_irq(pdev, 0), isink);
regulator_unregister(isink->regulator);
kfree(isink);
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index c94fc5b7cd5b..2220cf8defb1 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -354,9 +354,9 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
}
irq = platform_get_irq_byname(pdev, "UV");
- ret = wm831x_request_irq(wm831x, irq, wm831x_ldo_uv_irq,
- IRQF_TRIGGER_RISING, ldo->name,
- ldo);
+ ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq,
+ IRQF_TRIGGER_RISING, ldo->name,
+ ldo);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n",
irq, ret);
@@ -377,11 +377,10 @@ err:
static __devexit int wm831x_gp_ldo_remove(struct platform_device *pdev)
{
struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
- struct wm831x *wm831x = ldo->wm831x;
platform_set_drvdata(pdev, NULL);
- wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), ldo);
+ free_irq(platform_get_irq_byname(pdev, "UV"), ldo);
regulator_unregister(ldo->regulator);
kfree(ldo);
@@ -619,9 +618,8 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
}
irq = platform_get_irq_byname(pdev, "UV");
- ret = wm831x_request_irq(wm831x, irq, wm831x_ldo_uv_irq,
- IRQF_TRIGGER_RISING, ldo->name,
- ldo);
+ ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq,
+ IRQF_TRIGGER_RISING, ldo->name, ldo);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n",
irq, ret);
@@ -642,9 +640,8 @@ err:
static __devexit int wm831x_aldo_remove(struct platform_device *pdev)
{
struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
- struct wm831x *wm831x = ldo->wm831x;
- wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), ldo);
+ free_irq(platform_get_irq_byname(pdev, "UV"), ldo);
regulator_unregister(ldo->regulator);
kfree(ldo);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 379d8592bc6e..459f2cbe80fc 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -3982,8 +3982,10 @@ out_err:
}
static struct ccw_driver dasd_eckd_driver = {
- .name = "dasd-eckd",
- .owner = THIS_MODULE,
+ .driver = {
+ .name = "dasd-eckd",
+ .owner = THIS_MODULE,
+ },
.ids = dasd_eckd_ids,
.probe = dasd_eckd_probe,
.remove = dasd_generic_remove,
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index be89b3a893da..4b71b1164868 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -65,8 +65,10 @@ dasd_fba_set_online(struct ccw_device *cdev)
}
static struct ccw_driver dasd_fba_driver = {
- .name = "dasd-fba",
- .owner = THIS_MODULE,
+ .driver = {
+ .name = "dasd-fba",
+ .owner = THIS_MODULE,
+ },
.ids = dasd_fba_ids,
.probe = dasd_fba_probe,
.remove = dasd_generic_remove,
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 3fb4335d491d..694464c65fcd 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -764,8 +764,10 @@ static struct ccw_device_id raw3215_id[] = {
};
static struct ccw_driver raw3215_ccw_driver = {
- .name = "3215",
- .owner = THIS_MODULE,
+ .driver = {
+ .name = "3215",
+ .owner = THIS_MODULE,
+ },
.ids = raw3215_id,
.probe = &raw3215_probe,
.remove = &raw3215_remove,
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 96ba2fd1c8ad..4c023761946f 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -1388,8 +1388,10 @@ static struct ccw_device_id raw3270_id[] = {
};
static struct ccw_driver raw3270_ccw_driver = {
- .name = "3270",
- .owner = THIS_MODULE,
+ .driver = {
+ .name = "3270",
+ .owner = THIS_MODULE,
+ },
.ids = raw3270_id,
.probe = &raw3270_probe,
.remove = &raw3270_remove,
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index c26511171ffe..9eff2df70ddb 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -1320,8 +1320,10 @@ tape_34xx_online(struct ccw_device *cdev)
}
static struct ccw_driver tape_34xx_driver = {
- .name = "tape_34xx",
- .owner = THIS_MODULE,
+ .driver = {
+ .name = "tape_34xx",
+ .owner = THIS_MODULE,
+ },
.ids = tape_34xx_ids,
.probe = tape_generic_probe,
.remove = tape_generic_remove,
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index de2e99e0a71b..b98dcbd16711 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -1761,8 +1761,10 @@ tape_3590_online(struct ccw_device *cdev)
}
static struct ccw_driver tape_3590_driver = {
- .name = "tape_3590",
- .owner = THIS_MODULE,
+ .driver = {
+ .name = "tape_3590",
+ .owner = THIS_MODULE,
+ },
.ids = tape_3590_ids,
.probe = tape_generic_probe,
.remove = tape_generic_remove,
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index caef1757341d..f6b00c3df425 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -64,8 +64,10 @@ static int ur_set_offline(struct ccw_device *cdev);
static int ur_pm_suspend(struct ccw_device *cdev);
static struct ccw_driver ur_driver = {
- .name = "vmur",
- .owner = THIS_MODULE,
+ .driver = {
+ .name = "vmur",
+ .owner = THIS_MODULE,
+ },
.ids = ur_ids,
.probe = ur_probe,
.remove = ur_remove,
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 2864581d8ecb..5c567414c4bb 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -428,7 +428,7 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(dev->driver);
- if (!try_module_get(gdrv->owner))
+ if (!try_module_get(gdrv->driver.owner))
return -EINVAL;
ret = strict_strtoul(buf, 0, &value);
@@ -442,7 +442,7 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const
else
ret = -EINVAL;
out:
- module_put(gdrv->owner);
+ module_put(gdrv->driver.owner);
return (ret == 0) ? count : ret;
}
@@ -616,8 +616,6 @@ int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
{
/* register our new driver with the core */
cdriver->driver.bus = &ccwgroup_bus_type;
- cdriver->driver.name = cdriver->name;
- cdriver->driver.owner = cdriver->owner;
return driver_register(&cdriver->driver);
}
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e50b12163afe..df14c51f6532 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -127,7 +127,7 @@ static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
return ret;
}
-struct bus_type ccw_bus_type;
+static struct bus_type ccw_bus_type;
static void io_subchannel_irq(struct subchannel *);
static int io_subchannel_probe(struct subchannel *);
@@ -547,7 +547,7 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
return -EAGAIN;
- if (cdev->drv && !try_module_get(cdev->drv->owner)) {
+ if (cdev->drv && !try_module_get(cdev->drv->driver.owner)) {
atomic_set(&cdev->private->onoff, 0);
return -EINVAL;
}
@@ -573,7 +573,7 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
}
out:
if (cdev->drv)
- module_put(cdev->drv->owner);
+ module_put(cdev->drv->driver.owner);
atomic_set(&cdev->private->onoff, 0);
return (ret < 0) ? ret : count;
}
@@ -1970,7 +1970,7 @@ static const struct dev_pm_ops ccw_pm_ops = {
.restore = ccw_device_pm_restore,
};
-struct bus_type ccw_bus_type = {
+static struct bus_type ccw_bus_type = {
.name = "ccw",
.match = ccw_bus_match,
.uevent = ccw_uevent,
@@ -1993,8 +1993,6 @@ int ccw_driver_register(struct ccw_driver *cdriver)
struct device_driver *drv = &cdriver->driver;
drv->bus = &ccw_bus_type;
- drv->name = cdriver->name;
- drv->owner = cdriver->owner;
return driver_register(drv);
}
@@ -2112,5 +2110,4 @@ EXPORT_SYMBOL(ccw_device_set_offline);
EXPORT_SYMBOL(ccw_driver_register);
EXPORT_SYMBOL(ccw_driver_unregister);
EXPORT_SYMBOL(get_ccwdev_by_busid);
-EXPORT_SYMBOL(ccw_bus_type);
EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 379de2d1ec49..7e297c7bb5ff 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -133,7 +133,6 @@ void ccw_device_set_notoper(struct ccw_device *cdev);
/* qdio needs this. */
void ccw_device_set_timeout(struct ccw_device *, int);
extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
-extern struct bus_type ccw_bus_type;
/* Channel measurement facility related */
void retry_set_schib(struct ccw_device *cdev);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 5640c89cd9de..479c665e9e7c 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -1508,7 +1508,8 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
return -EBUSY;
-
+ if (!count)
+ return 0;
if (callflags & QDIO_FLAG_SYNC_INPUT)
return handle_inbound(irq_ptr->input_qs[q_nr],
callflags, bufnr, count);
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index ce3a5c13ce0b..9feb62febb3d 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -264,8 +264,10 @@ static struct device *claw_root_dev;
/* ccwgroup table */
static struct ccwgroup_driver claw_group_driver = {
- .owner = THIS_MODULE,
- .name = "claw",
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "claw",
+ },
.max_slaves = 2,
.driver_id = 0xC3D3C1E6,
.probe = claw_probe,
@@ -282,8 +284,10 @@ static struct ccw_device_id claw_ids[] = {
MODULE_DEVICE_TABLE(ccw, claw_ids);
static struct ccw_driver claw_ccw_driver = {
- .owner = THIS_MODULE,
- .name = "claw",
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "claw",
+ },
.ids = claw_ids,
.probe = ccwgroup_probe_ccwdev,
.remove = ccwgroup_remove_ccwdev,
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 4c2845985927..c189296763a4 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1764,16 +1764,20 @@ static struct ccw_device_id ctcm_ids[] = {
MODULE_DEVICE_TABLE(ccw, ctcm_ids);
static struct ccw_driver ctcm_ccw_driver = {
- .owner = THIS_MODULE,
- .name = "ctcm",
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ctcm",
+ },
.ids = ctcm_ids,
.probe = ccwgroup_probe_ccwdev,
.remove = ccwgroup_remove_ccwdev,
};
static struct ccwgroup_driver ctcm_group_driver = {
- .owner = THIS_MODULE,
- .name = CTC_DRIVER_NAME,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = CTC_DRIVER_NAME,
+ },
.max_slaves = 2,
.driver_id = 0xC3E3C3D4, /* CTCM */
.probe = ctcm_probe_device,
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 30b2a820e670..7fbc4adbb6d5 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -2396,8 +2396,10 @@ static struct ccw_device_id lcs_ids[] = {
MODULE_DEVICE_TABLE(ccw, lcs_ids);
static struct ccw_driver lcs_ccw_driver = {
- .owner = THIS_MODULE,
- .name = "lcs",
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "lcs",
+ },
.ids = lcs_ids,
.probe = ccwgroup_probe_ccwdev,
.remove = ccwgroup_remove_ccwdev,
@@ -2407,8 +2409,10 @@ static struct ccw_driver lcs_ccw_driver = {
* LCS ccwgroup driver registration
*/
static struct ccwgroup_driver lcs_group_driver = {
- .owner = THIS_MODULE,
- .name = "lcs",
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "lcs",
+ },
.max_slaves = 2,
.driver_id = 0xD3C3E2,
.probe = lcs_probe_device,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 25eef304bd47..10a3a3b4dd3e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3902,7 +3902,9 @@ static struct ccw_device_id qeth_ids[] = {
MODULE_DEVICE_TABLE(ccw, qeth_ids);
static struct ccw_driver qeth_ccw_driver = {
- .name = "qeth",
+ .driver = {
+ .name = "qeth",
+ },
.ids = qeth_ids,
.probe = ccwgroup_probe_ccwdev,
.remove = ccwgroup_remove_ccwdev,
@@ -4428,8 +4430,10 @@ static int qeth_core_restore(struct ccwgroup_device *gdev)
}
static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
- .owner = THIS_MODULE,
- .name = "qeth",
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "qeth",
+ },
.driver_id = 0xD8C5E3C8,
.probe = qeth_core_probe_device,
.remove = qeth_core_remove_device,
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 4f7852dd30c7..e8b7cee62046 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -251,8 +251,10 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
}
struct ccw_driver zfcp_ccw_driver = {
- .owner = THIS_MODULE,
- .name = "zfcp",
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "zfcp",
+ },
.ids = zfcp_ccw_device_id,
.probe = zfcp_ccw_probe,
.remove = zfcp_ccw_remove,
diff --git a/drivers/scsi/aacraid/Makefile b/drivers/scsi/aacraid/Makefile
index 92df4d6b6147..1bd9fd18f7f3 100644
--- a/drivers/scsi/aacraid/Makefile
+++ b/drivers/scsi/aacraid/Makefile
@@ -3,6 +3,6 @@
obj-$(CONFIG_SCSI_AACRAID) := aacraid.o
aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \
- dpcsup.o rx.o sa.o rkt.o nark.o
+ dpcsup.o rx.o sa.o rkt.o nark.o src.o
ccflags-y := -Idrivers/scsi
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 7df2dd1d2c6f..118ce83a737c 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -5,7 +5,8 @@
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
- * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -1486,7 +1487,9 @@ int aac_get_adapter_info(struct aac_dev* dev)
dev->a_ops.adapter_write = aac_write_block;
}
dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
- if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
+ if (dev->adapter_info.options & AAC_OPT_NEW_COMM_TYPE1)
+ dev->adapter_info.options |= AAC_OPT_NEW_COMM;
+ if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
/*
* Worst case size that could cause sg overflow when
* we break up SG elements that are larger than 64KB.
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 4dbcc055ac78..29ab00016b78 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
*----------------------------------------------------------------------------*/
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 26400
+# define AAC_DRIVER_BUILD 28000
# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -277,6 +277,16 @@ enum aac_queue_types {
#define FsaNormal 1
+/* transport FIB header (PMC) */
+struct aac_fib_xporthdr {
+ u64 HostAddress; /* FIB host address w/o xport header */
+ u32 Size; /* FIB size excluding xport header */
+ u32 Handle; /* driver handle to reference the FIB */
+ u64 Reserved[2];
+};
+
+#define ALIGN32 32
+
/*
* Define the FIB. The FIB is the where all the requested data and
* command information are put to the application on the FSA adapter.
@@ -394,7 +404,9 @@ enum fib_xfer_state {
AdapterMicroFib = (1<<17),
BIOSFibPath = (1<<18),
FastResponseCapable = (1<<19),
- ApiFib = (1<<20) // Its an API Fib.
+ ApiFib = (1<<20), /* Its an API Fib */
+ /* PMC NEW COMM: There is no more AIF data pending */
+ NoMoreAifDataAvailable = (1<<21)
};
/*
@@ -404,6 +416,7 @@ enum fib_xfer_state {
#define ADAPTER_INIT_STRUCT_REVISION 3
#define ADAPTER_INIT_STRUCT_REVISION_4 4 // rocket science
+#define ADAPTER_INIT_STRUCT_REVISION_6 6 /* PMC src */
struct aac_init
{
@@ -428,9 +441,15 @@ struct aac_init
#define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001
#define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010
#define INITFLAGS_DRIVER_SUPPORTS_PM 0x00000020
+#define INITFLAGS_NEW_COMM_TYPE1_SUPPORTED 0x00000041
__le32 MaxIoCommands; /* max outstanding commands */
__le32 MaxIoSize; /* largest I/O command */
__le32 MaxFibSize; /* largest FIB to adapter */
+ /* ADAPTER_INIT_STRUCT_REVISION_5 begins here */
+ __le32 MaxNumAif; /* max number of aif */
+ /* ADAPTER_INIT_STRUCT_REVISION_6 begins here */
+ __le32 HostRRQ_AddrLow;
+ __le32 HostRRQ_AddrHigh; /* Host RRQ (response queue) for SRC */
};
enum aac_log_level {
@@ -685,7 +704,7 @@ struct rx_inbound {
#define OutboundDoorbellReg MUnit.ODR
struct rx_registers {
- struct rx_mu_registers MUnit; /* 1300h - 1344h */
+ struct rx_mu_registers MUnit; /* 1300h - 1347h */
__le32 reserved1[2]; /* 1348h - 134ch */
struct rx_inbound IndexRegs;
};
@@ -703,7 +722,7 @@ struct rx_registers {
#define rkt_inbound rx_inbound
struct rkt_registers {
- struct rkt_mu_registers MUnit; /* 1300h - 1344h */
+ struct rkt_mu_registers MUnit; /* 1300h - 1347h */
__le32 reserved1[1006]; /* 1348h - 22fch */
struct rkt_inbound IndexRegs; /* 2300h - */
};
@@ -713,6 +732,44 @@ struct rkt_registers {
#define rkt_writeb(AEP, CSR, value) writeb(value, &((AEP)->regs.rkt->CSR))
#define rkt_writel(AEP, CSR, value) writel(value, &((AEP)->regs.rkt->CSR))
+/*
+ * PMC SRC message unit registers
+ */
+
+#define src_inbound rx_inbound
+
+struct src_mu_registers {
+ /* PCI*| Name */
+ __le32 reserved0[8]; /* 00h | Reserved */
+ __le32 IDR; /* 20h | Inbound Doorbell Register */
+ __le32 IISR; /* 24h | Inbound Int. Status Register */
+ __le32 reserved1[3]; /* 28h | Reserved */
+ __le32 OIMR; /* 34h | Outbound Int. Mask Register */
+ __le32 reserved2[25]; /* 38h | Reserved */
+ __le32 ODR_R; /* 9ch | Outbound Doorbell Read */
+ __le32 ODR_C; /* a0h | Outbound Doorbell Clear */
+ __le32 reserved3[6]; /* a4h | Reserved */
+ __le32 OMR; /* bch | Outbound Message Register */
+ __le32 IQ_L; /* c0h | Inbound Queue (Low address) */
+ __le32 IQ_H; /* c4h | Inbound Queue (High address) */
+};
+
+struct src_registers {
+ struct src_mu_registers MUnit; /* 00h - c7h */
+ __le32 reserved1[130790]; /* c8h - 7fc5fh */
+ struct src_inbound IndexRegs; /* 7fc60h */
+};
+
+#define src_readb(AEP, CSR) readb(&((AEP)->regs.src.bar0->CSR))
+#define src_readl(AEP, CSR) readl(&((AEP)->regs.src.bar0->CSR))
+#define src_writeb(AEP, CSR, value) writeb(value, \
+ &((AEP)->regs.src.bar0->CSR))
+#define src_writel(AEP, CSR, value) writel(value, \
+ &((AEP)->regs.src.bar0->CSR))
+
+#define SRC_ODR_SHIFT 12
+#define SRC_IDR_SHIFT 9
+
typedef void (*fib_callback)(void *ctxt, struct fib *fibctx);
struct aac_fib_context {
@@ -879,6 +936,7 @@ struct aac_supplement_adapter_info
#define AAC_OPTION_MU_RESET cpu_to_le32(0x00000001)
#define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002)
#define AAC_OPTION_POWER_MANAGEMENT cpu_to_le32(0x00000004)
+#define AAC_OPTION_DOORBELL_RESET cpu_to_le32(0x00004000)
#define AAC_SIS_VERSION_V3 3
#define AAC_SIS_SLOT_UNKNOWN 0xFF
@@ -940,6 +998,7 @@ struct aac_bus_info_response {
#define AAC_OPT_SUPPLEMENT_ADAPTER_INFO cpu_to_le32(1<<16)
#define AAC_OPT_NEW_COMM cpu_to_le32(1<<17)
#define AAC_OPT_NEW_COMM_64 cpu_to_le32(1<<18)
+#define AAC_OPT_NEW_COMM_TYPE1 cpu_to_le32(1<<28)
struct aac_dev
{
@@ -952,6 +1011,7 @@ struct aac_dev
*/
unsigned max_fib_size;
unsigned sg_tablesize;
+ unsigned max_num_aif;
/*
* Map for 128 fib objects (64k)
@@ -980,10 +1040,21 @@ struct aac_dev
struct adapter_ops a_ops;
unsigned long fsrev; /* Main driver's revision number */
- unsigned base_size; /* Size of mapped in region */
+ unsigned long dbg_base; /* address of UART
+ * debug buffer */
+
+ unsigned base_size, dbg_size; /* Size of
+ * mapped in region */
+
struct aac_init *init; /* Holds initialization info to communicate with adapter */
dma_addr_t init_pa; /* Holds physical address of the init struct */
+ u32 *host_rrq; /* response queue
+ * if AAC_COMM_MESSAGE_TYPE1 */
+
+ dma_addr_t host_rrq_pa; /* phys. address */
+ u32 host_rrq_idx; /* index into rrq buffer */
+
struct pci_dev *pdev; /* Our PCI interface */
void * printfbuf; /* pointer to buffer used for printf's from the adapter */
void * comm_addr; /* Base address of Comm area */
@@ -1003,14 +1074,20 @@ struct aac_dev
*/
#ifndef AAC_MIN_FOOTPRINT_SIZE
# define AAC_MIN_FOOTPRINT_SIZE 8192
+# define AAC_MIN_SRC_BAR0_SIZE 0x400000
+# define AAC_MIN_SRC_BAR1_SIZE 0x800
#endif
union
{
struct sa_registers __iomem *sa;
struct rx_registers __iomem *rx;
struct rkt_registers __iomem *rkt;
+ struct {
+ struct src_registers __iomem *bar0;
+ char __iomem *bar1;
+ } src;
} regs;
- volatile void __iomem *base;
+ volatile void __iomem *base, *dbg_base_mapped;
volatile struct rx_inbound __iomem *IndexRegs;
u32 OIMR; /* Mask Register Cache */
/*
@@ -1031,9 +1108,8 @@ struct aac_dev
u8 comm_interface;
# define AAC_COMM_PRODUCER 0
# define AAC_COMM_MESSAGE 1
- /* macro side-effects BEWARE */
-# define raw_io_interface \
- init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4)
+# define AAC_COMM_MESSAGE_TYPE1 3
+ u8 raw_io_interface;
u8 raw_io_64;
u8 printf_enabled;
u8 in_reset;
@@ -1789,6 +1865,10 @@ extern struct aac_common aac_config;
#define DoorBellAdapterNormCmdNotFull (1<<3) /* Adapter -> Host */
#define DoorBellAdapterNormRespNotFull (1<<4) /* Adapter -> Host */
#define DoorBellPrintfReady (1<<5) /* Adapter -> Host */
+#define DoorBellAifPending (1<<6) /* Adapter -> Host */
+
+/* PMC specific outbound doorbell bits */
+#define PmDoorBellResponseSent (1<<1) /* Adapter -> Host */
/*
* For FIB communication, we need all of the following things
@@ -1831,6 +1911,9 @@ extern struct aac_common aac_config;
#define AifReqAPIJobUpdate 109 /* Update a job report from the API */
#define AifReqAPIJobFinish 110 /* Finish a job from the API */
+/* PMC NEW COMM: Request the event data */
+#define AifReqEvent 200
+
/*
* Adapter Initiated FIB command structures. Start with the adapter
* initiated FIBs that really come from the adapter, and get responded
@@ -1886,10 +1969,13 @@ int aac_rx_init(struct aac_dev *dev);
int aac_rkt_init(struct aac_dev *dev);
int aac_nark_init(struct aac_dev *dev);
int aac_sa_init(struct aac_dev *dev);
+int aac_src_init(struct aac_dev *dev);
int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify);
unsigned int aac_response_normal(struct aac_queue * q);
unsigned int aac_command_normal(struct aac_queue * q);
-unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index);
+unsigned int aac_intr_normal(struct aac_dev *dev, u32 Index,
+ int isAif, int isFastResponse,
+ struct hw_fib *aif_fib);
int aac_reset_adapter(struct aac_dev * dev, int forced);
int aac_check_health(struct aac_dev * dev);
int aac_command_thread(void *data);
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 645ddd9d9b9e..8a0b33033177 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -5,7 +5,8 @@
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
- * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index a7261486ccd4..7ac8fdb5577b 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -5,7 +5,8 @@
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
- * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -52,12 +53,16 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
unsigned long size, align;
const unsigned long fibsize = 4096;
const unsigned long printfbufsiz = 256;
+ unsigned long host_rrq_size = 0;
struct aac_init *init;
dma_addr_t phys;
unsigned long aac_max_hostphysmempages;
- size = fibsize + sizeof(struct aac_init) + commsize + commalign + printfbufsiz;
-
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1)
+ host_rrq_size = (dev->scsi_host_ptr->can_queue
+ + AAC_NUM_MGT_FIB) * sizeof(u32);
+ size = fibsize + sizeof(struct aac_init) + commsize +
+ commalign + printfbufsiz + host_rrq_size;
base = pci_alloc_consistent(dev->pdev, size, &phys);
@@ -70,8 +75,14 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
dev->comm_phys = phys;
dev->comm_size = size;
- dev->init = (struct aac_init *)(base + fibsize);
- dev->init_pa = phys + fibsize;
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
+ dev->host_rrq = (u32 *)(base + fibsize);
+ dev->host_rrq_pa = phys + fibsize;
+ memset(dev->host_rrq, 0, host_rrq_size);
+ }
+
+ dev->init = (struct aac_init *)(base + fibsize + host_rrq_size);
+ dev->init_pa = phys + fibsize + host_rrq_size;
init = dev->init;
@@ -106,8 +117,13 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
init->InitFlags = 0;
if (dev->comm_interface == AAC_COMM_MESSAGE) {
- init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
+ init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n"));
+ } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
+ init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6);
+ init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_TYPE1_SUPPORTED);
+ dprintk((KERN_WARNING
+ "aacraid: New Comm Interface type1 enabled\n"));
}
init->InitFlags |= cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME |
INITFLAGS_DRIVER_SUPPORTS_PM);
@@ -115,11 +131,18 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
init->MaxFibSize = cpu_to_le32(dev->max_fib_size);
+ init->MaxNumAif = cpu_to_le32(dev->max_num_aif);
+ init->HostRRQ_AddrHigh = (u32)((u64)dev->host_rrq_pa >> 32);
+ init->HostRRQ_AddrLow = (u32)(dev->host_rrq_pa & 0xffffffff);
+
+
/*
* Increment the base address by the amount already used
*/
- base = base + fibsize + sizeof(struct aac_init);
- phys = (dma_addr_t)((ulong)phys + fibsize + sizeof(struct aac_init));
+ base = base + fibsize + host_rrq_size + sizeof(struct aac_init);
+ phys = (dma_addr_t)((ulong)phys + fibsize + host_rrq_size +
+ sizeof(struct aac_init));
+
/*
* Align the beginning of Headers to commalign
*/
@@ -314,15 +337,22 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
- sizeof(struct aac_write) + sizeof(struct sgentry))
/ sizeof(struct sgentry);
dev->comm_interface = AAC_COMM_PRODUCER;
- dev->raw_io_64 = 0;
+ dev->raw_io_interface = dev->raw_io_64 = 0;
+
if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) &&
(status[0] == 0x00000001)) {
if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64))
dev->raw_io_64 = 1;
- if (dev->a_ops.adapter_comm &&
- (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM)))
- dev->comm_interface = AAC_COMM_MESSAGE;
+ if (dev->a_ops.adapter_comm) {
+ if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1)) {
+ dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
+ dev->raw_io_interface = 1;
+ } else if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM)) {
+ dev->comm_interface = AAC_COMM_MESSAGE;
+ dev->raw_io_interface = 1;
+ }
+ }
if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
(status[2] > dev->base_size)) {
aac_adapter_ioremap(dev, 0);
@@ -350,10 +380,12 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
* status[3] & 0xFFFF maximum number FIBs outstanding
*/
host->max_sectors = (status[1] >> 16) << 1;
- dev->max_fib_size = status[1] & 0xFFFF;
+ /* Multiple of 32 for PMC */
+ dev->max_fib_size = status[1] & 0xFFE0;
host->sg_tablesize = status[2] >> 16;
dev->sg_tablesize = status[2] & 0xFFFF;
host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB;
+ dev->max_num_aif = status[4] & 0xFFFF;
/*
* NOTE:
* All these overrides are based on a fixed internal
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 060ac4bd5a14..dd7ad3ba2dad 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -5,7 +5,8 @@
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
- * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -63,9 +64,11 @@ static int fib_map_alloc(struct aac_dev *dev)
"allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue,
AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
- if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size
- * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
- &dev->hw_fib_pa))==NULL)
+ dev->hw_fib_va = pci_alloc_consistent(dev->pdev,
+ (dev->max_fib_size + sizeof(struct aac_fib_xporthdr))
+ * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1),
+ &dev->hw_fib_pa);
+ if (dev->hw_fib_va == NULL)
return -ENOMEM;
return 0;
}
@@ -110,9 +113,22 @@ int aac_fib_setup(struct aac_dev * dev)
if (i<0)
return -ENOMEM;
+ /* 32 byte alignment for PMC */
+ hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1);
+ dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
+ (hw_fib_pa - dev->hw_fib_pa));
+ dev->hw_fib_pa = hw_fib_pa;
+ memset(dev->hw_fib_va, 0,
+ (dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) *
+ (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
+
+ /* add Xport header */
+ dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
+ sizeof(struct aac_fib_xporthdr));
+ dev->hw_fib_pa += sizeof(struct aac_fib_xporthdr);
+
hw_fib = dev->hw_fib_va;
hw_fib_pa = dev->hw_fib_pa;
- memset(hw_fib, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
/*
* Initialise the fibs
*/
@@ -129,8 +145,10 @@ int aac_fib_setup(struct aac_dev * dev)
hw_fib->header.XferState = cpu_to_le32(0xffffffff);
hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);
fibptr->hw_fib_pa = hw_fib_pa;
- hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + dev->max_fib_size);
- hw_fib_pa = hw_fib_pa + dev->max_fib_size;
+ hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
+ dev->max_fib_size + sizeof(struct aac_fib_xporthdr));
+ hw_fib_pa = hw_fib_pa +
+ dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
}
/*
* Add the fib chain to the free list
@@ -664,9 +682,14 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
unsigned long nointr = 0;
unsigned long qflags;
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
+ kfree(hw_fib);
+ return 0;
+ }
+
if (hw_fib->header.XferState == 0) {
if (dev->comm_interface == AAC_COMM_MESSAGE)
- kfree (hw_fib);
+ kfree(hw_fib);
return 0;
}
/*
@@ -674,7 +697,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
*/
if (hw_fib->header.StructType != FIB_MAGIC) {
if (dev->comm_interface == AAC_COMM_MESSAGE)
- kfree (hw_fib);
+ kfree(hw_fib);
return -EINVAL;
}
/*
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index 9c7408fe8c7d..f0c66a80ad13 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -5,7 +5,8 @@
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
- * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -228,6 +229,48 @@ unsigned int aac_command_normal(struct aac_queue *q)
return 0;
}
+/*
+ *
+ * aac_aif_callback
+ * @context: the context set in the fib - here it is scsi cmd
+ * @fibptr: pointer to the fib
+ *
+ * Handles the AIFs - new method (SRC)
+ *
+ */
+
+static void aac_aif_callback(void *context, struct fib * fibptr)
+{
+ struct fib *fibctx;
+ struct aac_dev *dev;
+ struct aac_aifcmd *cmd;
+ int status;
+
+ fibctx = (struct fib *)context;
+ BUG_ON(fibptr == NULL);
+ dev = fibptr->dev;
+
+ if (fibptr->hw_fib_va->header.XferState &
+ cpu_to_le32(NoMoreAifDataAvailable)) {
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ return;
+ }
+
+ aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va);
+
+ aac_fib_init(fibctx);
+ cmd = (struct aac_aifcmd *) fib_data(fibctx);
+ cmd->command = cpu_to_le32(AifReqEvent);
+
+ status = aac_fib_send(AifRequest,
+ fibctx,
+ sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
+ FsaNormal,
+ 0, 1,
+ (fib_callback)aac_aif_callback, fibctx);
+}
+
/**
* aac_intr_normal - Handle command replies
@@ -238,19 +281,17 @@ unsigned int aac_command_normal(struct aac_queue *q)
* know there is a response on our normal priority queue. We will pull off
* all QE there are and wake up all the waiters before exiting.
*/
-
-unsigned int aac_intr_normal(struct aac_dev * dev, u32 index)
+unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
+ int isAif, int isFastResponse, struct hw_fib *aif_fib)
{
unsigned long mflags;
dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
- if ((index & 0x00000002L)) {
+ if (isAif == 1) { /* AIF - common */
struct hw_fib * hw_fib;
struct fib * fib;
struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue];
unsigned long flags;
- if (index == 0xFFFFFFFEL) /* Special Case */
- return 0; /* Do nothing */
/*
* Allocate a FIB. For non queued stuff we can just use
* the stack so we are happy. We need a fib object in order to
@@ -263,8 +304,13 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index)
kfree (fib);
return 1;
}
- memcpy(hw_fib, (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) +
- (index & ~0x00000002L)), sizeof(struct hw_fib));
+ if (aif_fib != NULL) {
+ memcpy(hw_fib, aif_fib, sizeof(struct hw_fib));
+ } else {
+ memcpy(hw_fib,
+ (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) +
+ index), sizeof(struct hw_fib));
+ }
INIT_LIST_HEAD(&fib->fiblink);
fib->type = FSAFS_NTC_FIB_CONTEXT;
fib->size = sizeof(struct fib);
@@ -277,9 +323,26 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index)
wake_up_interruptible(&q->cmdready);
spin_unlock_irqrestore(q->lock, flags);
return 1;
+ } else if (isAif == 2) { /* AIF - new (SRC) */
+ struct fib *fibctx;
+ struct aac_aifcmd *cmd;
+
+ fibctx = aac_fib_alloc(dev);
+ if (!fibctx)
+ return 1;
+ aac_fib_init(fibctx);
+
+ cmd = (struct aac_aifcmd *) fib_data(fibctx);
+ cmd->command = cpu_to_le32(AifReqEvent);
+
+ return aac_fib_send(AifRequest,
+ fibctx,
+ sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
+ FsaNormal,
+ 0, 1,
+ (fib_callback)aac_aif_callback, fibctx);
} else {
- int fast = index & 0x01;
- struct fib * fib = &dev->fibs[index >> 2];
+ struct fib *fib = &dev->fibs[index];
struct hw_fib * hwfib = fib->hw_fib_va;
/*
@@ -298,7 +361,7 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index)
return 0;
}
- if (fast) {
+ if (isFastResponse) {
/*
* Doctor the fib
*/
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 2c93d9496d62..4ff26521d75f 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -5,7 +5,8 @@
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
- * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -54,7 +55,7 @@
#include "aacraid.h"
-#define AAC_DRIVER_VERSION "1.1-5"
+#define AAC_DRIVER_VERSION "1.1-7"
#ifndef AAC_DRIVER_BRANCH
#define AAC_DRIVER_BRANCH ""
#endif
@@ -161,6 +162,7 @@ static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
{ 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */
{ 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */
{ 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */
+ { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Catch All */
{ 0,}
};
MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
@@ -235,7 +237,8 @@ static struct aac_driver_ident aac_drivers[] = {
{ aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */
{ aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
{ aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
- { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec NEMER/ARK Catch All */
+ { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */
+ { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Catch All */
};
/**
@@ -653,8 +656,10 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
* This adapter needs a blind reset, only do so for Adapters that
* support a register, instead of a commanded, reset.
*/
- if ((aac->supplement_adapter_info.SupportedOptions2 &
- AAC_OPTION_MU_RESET) &&
+ if (((aac->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_MU_RESET) ||
+ (aac->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_DOORBELL_RESET)) &&
aac_check_reset &&
((aac_check_reset != 1) ||
!(aac->supplement_adapter_info.SupportedOptions2 &
diff --git a/drivers/scsi/aacraid/nark.c b/drivers/scsi/aacraid/nark.c
index c55f7c862f0e..f397d21a0c06 100644
--- a/drivers/scsi/aacraid/nark.c
+++ b/drivers/scsi/aacraid/nark.c
@@ -4,7 +4,8 @@
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
- * Copyright (c) 2006-2007 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
index 16d8db550027..be44de92429a 100644
--- a/drivers/scsi/aacraid/rkt.c
+++ b/drivers/scsi/aacraid/rkt.c
@@ -5,7 +5,8 @@
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
- * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 84d77fd86e5b..ce530f113fdb 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -5,7 +5,8 @@
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
- * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -84,15 +85,35 @@ static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id)
static irqreturn_t aac_rx_intr_message(int irq, void *dev_id)
{
+ int isAif, isFastResponse, isSpecial;
struct aac_dev *dev = dev_id;
u32 Index = rx_readl(dev, MUnit.OutboundQueue);
if (unlikely(Index == 0xFFFFFFFFL))
Index = rx_readl(dev, MUnit.OutboundQueue);
if (likely(Index != 0xFFFFFFFFL)) {
do {
- if (unlikely(aac_intr_normal(dev, Index))) {
- rx_writel(dev, MUnit.OutboundQueue, Index);
- rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady);
+ isAif = isFastResponse = isSpecial = 0;
+ if (Index & 0x00000002L) {
+ isAif = 1;
+ if (Index == 0xFFFFFFFEL)
+ isSpecial = 1;
+ Index &= ~0x00000002L;
+ } else {
+ if (Index & 0x00000001L)
+ isFastResponse = 1;
+ Index >>= 2;
+ }
+ if (!isSpecial) {
+ if (unlikely(aac_intr_normal(dev,
+ Index, isAif,
+ isFastResponse, NULL))) {
+ rx_writel(dev,
+ MUnit.OutboundQueue,
+ Index);
+ rx_writel(dev,
+ MUnit.ODR,
+ DoorBellAdapterNormRespReady);
+ }
}
Index = rx_readl(dev, MUnit.OutboundQueue);
} while (Index != 0xFFFFFFFFL);
@@ -631,6 +652,10 @@ int _aac_rx_init(struct aac_dev *dev)
name, instance);
goto error_iounmap;
}
+ dev->dbg_base = dev->scsi_host_ptr->base;
+ dev->dbg_base_mapped = dev->base;
+ dev->dbg_size = dev->base_size;
+
aac_adapter_enable_int(dev);
/*
* Tell the adapter that all is configured, and it can
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 622c21c68e65..e5d4457121ea 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -5,7 +5,8 @@
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
- * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -391,6 +392,10 @@ int aac_sa_init(struct aac_dev *dev)
name, instance);
goto error_iounmap;
}
+ dev->dbg_base = dev->scsi_host_ptr->base;
+ dev->dbg_base_mapped = dev->base;
+ dev->dbg_size = dev->base_size;
+
aac_adapter_enable_int(dev);
/*
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
new file mode 100644
index 000000000000..c20494660603
--- /dev/null
+++ b/drivers/scsi/aacraid/src.c
@@ -0,0 +1,594 @@
+/*
+ * Adaptec AAC series RAID controller driver
+ * (c) Copyright 2001 Red Hat Inc.
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ * src.c
+ *
+ * Abstract: Hardware Device Interface for PMC SRC based controllers
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+#include <linux/completion.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <scsi/scsi_host.h>
+
+#include "aacraid.h"
+
+static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
+{
+ struct aac_dev *dev = dev_id;
+ unsigned long bellbits, bellbits_shifted;
+ int our_interrupt = 0;
+ int isFastResponse;
+ u32 index, handle;
+
+ bellbits = src_readl(dev, MUnit.ODR_R);
+ if (bellbits & PmDoorBellResponseSent) {
+ bellbits = PmDoorBellResponseSent;
+ /* handle async. status */
+ our_interrupt = 1;
+ index = dev->host_rrq_idx;
+ if (dev->host_rrq[index] == 0) {
+ u32 old_index = index;
+ /* adjust index */
+ do {
+ index++;
+ if (index == dev->scsi_host_ptr->can_queue +
+ AAC_NUM_MGT_FIB)
+ index = 0;
+ if (dev->host_rrq[index] != 0)
+ break;
+ } while (index != old_index);
+ dev->host_rrq_idx = index;
+ }
+ for (;;) {
+ isFastResponse = 0;
+ /* remove toggle bit (31) */
+ handle = (dev->host_rrq[index] & 0x7fffffff);
+ /* check fast response bit (30) */
+ if (handle & 0x40000000)
+ isFastResponse = 1;
+ handle &= 0x0000ffff;
+ if (handle == 0)
+ break;
+
+ aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
+
+ dev->host_rrq[index++] = 0;
+ if (index == dev->scsi_host_ptr->can_queue +
+ AAC_NUM_MGT_FIB)
+ index = 0;
+ dev->host_rrq_idx = index;
+ }
+ } else {
+ bellbits_shifted = (bellbits >> SRC_ODR_SHIFT);
+ if (bellbits_shifted & DoorBellAifPending) {
+ our_interrupt = 1;
+ /* handle AIF */
+ aac_intr_normal(dev, 0, 2, 0, NULL);
+ }
+ }
+
+ if (our_interrupt) {
+ src_writel(dev, MUnit.ODR_C, bellbits);
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+/**
+ * aac_src_disable_interrupt - Disable interrupts
+ * @dev: Adapter
+ */
+
+static void aac_src_disable_interrupt(struct aac_dev *dev)
+{
+ src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
+}
+
+/**
+ * aac_src_enable_interrupt_message - Enable interrupts
+ * @dev: Adapter
+ */
+
+static void aac_src_enable_interrupt_message(struct aac_dev *dev)
+{
+ src_writel(dev, MUnit.OIMR, dev->OIMR = 0xfffffff8);
+}
+
+/**
+ * src_sync_cmd - send a command and wait
+ * @dev: Adapter
+ * @command: Command to execute
+ * @p1: first parameter
+ * @ret: adapter status
+ *
+ * This routine will send a synchronous command to the adapter and wait
+ * for its completion.
+ */
+
+static int src_sync_cmd(struct aac_dev *dev, u32 command,
+ u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
+ u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4)
+{
+ unsigned long start;
+ int ok;
+
+ /*
+ * Write the command into Mailbox 0
+ */
+ writel(command, &dev->IndexRegs->Mailbox[0]);
+ /*
+ * Write the parameters into Mailboxes 1 - 6
+ */
+ writel(p1, &dev->IndexRegs->Mailbox[1]);
+ writel(p2, &dev->IndexRegs->Mailbox[2]);
+ writel(p3, &dev->IndexRegs->Mailbox[3]);
+ writel(p4, &dev->IndexRegs->Mailbox[4]);
+
+ /*
+ * Clear the synch command doorbell to start on a clean slate.
+ */
+ src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
+
+ /*
+ * Disable doorbell interrupts
+ */
+ src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
+
+ /*
+ * Force the completion of the mask register write before issuing
+ * the interrupt.
+ */
+ src_readl(dev, MUnit.OIMR);
+
+ /*
+ * Signal that there is a new synch command
+ */
+ src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT);
+
+ ok = 0;
+ start = jiffies;
+
+ /*
+ * Wait up to 30 seconds
+ */
+ while (time_before(jiffies, start+30*HZ)) {
+ /* Delay 5 microseconds to let Mon960 get info. */
+ udelay(5);
+
+ /* Mon960 will set doorbell0 bit
+ * when it has completed the command
+ */
+ if ((src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT) & OUTBOUNDDOORBELL_0) {
+ /* Clear the doorbell */
+ src_writel(dev,
+ MUnit.ODR_C,
+ OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
+ ok = 1;
+ break;
+ }
+
+ /* Yield the processor in case we are slow */
+ msleep(1);
+ }
+ if (unlikely(ok != 1)) {
+ /* Restore interrupt mask even though we timed out */
+ aac_adapter_enable_int(dev);
+ return -ETIMEDOUT;
+ }
+
+ /* Pull the synch status from Mailbox 0 */
+ if (status)
+ *status = readl(&dev->IndexRegs->Mailbox[0]);
+ if (r1)
+ *r1 = readl(&dev->IndexRegs->Mailbox[1]);
+ if (r2)
+ *r2 = readl(&dev->IndexRegs->Mailbox[2]);
+ if (r3)
+ *r3 = readl(&dev->IndexRegs->Mailbox[3]);
+ if (r4)
+ *r4 = readl(&dev->IndexRegs->Mailbox[4]);
+
+ /* Clear the synch command doorbell */
+ src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
+
+ /* Restore interrupt mask */
+ aac_adapter_enable_int(dev);
+ return 0;
+
+}
+
+/**
+ * aac_src_interrupt_adapter - interrupt adapter
+ * @dev: Adapter
+ *
+ * Send an interrupt to the i960 and breakpoint it.
+ */
+
+static void aac_src_interrupt_adapter(struct aac_dev *dev)
+{
+ src_sync_cmd(dev, BREAKPOINT_REQUEST,
+ 0, 0, 0, 0, 0, 0,
+ NULL, NULL, NULL, NULL, NULL);
+}
+
+/**
+ * aac_src_notify_adapter - send an event to the adapter
+ * @dev: Adapter
+ * @event: Event to send
+ *
+ * Notify the i960 that something it probably cares about has
+ * happened.
+ */
+
+static void aac_src_notify_adapter(struct aac_dev *dev, u32 event)
+{
+ switch (event) {
+
+ case AdapNormCmdQue:
+ src_writel(dev, MUnit.ODR_C,
+ INBOUNDDOORBELL_1 << SRC_ODR_SHIFT);
+ break;
+ case HostNormRespNotFull:
+ src_writel(dev, MUnit.ODR_C,
+ INBOUNDDOORBELL_4 << SRC_ODR_SHIFT);
+ break;
+ case AdapNormRespQue:
+ src_writel(dev, MUnit.ODR_C,
+ INBOUNDDOORBELL_2 << SRC_ODR_SHIFT);
+ break;
+ case HostNormCmdNotFull:
+ src_writel(dev, MUnit.ODR_C,
+ INBOUNDDOORBELL_3 << SRC_ODR_SHIFT);
+ break;
+ case FastIo:
+ src_writel(dev, MUnit.ODR_C,
+ INBOUNDDOORBELL_6 << SRC_ODR_SHIFT);
+ break;
+ case AdapPrintfDone:
+ src_writel(dev, MUnit.ODR_C,
+ INBOUNDDOORBELL_5 << SRC_ODR_SHIFT);
+ break;
+ default:
+ BUG();
+ break;
+ }
+}
+
+/**
+ * aac_src_start_adapter - activate adapter
+ * @dev: Adapter
+ *
+ * Start up processing on an i960 based AAC adapter
+ */
+
+static void aac_src_start_adapter(struct aac_dev *dev)
+{
+ struct aac_init *init;
+
+ init = dev->init;
+ init->HostElapsedSeconds = cpu_to_le32(get_seconds());
+
+ /* We can only use a 32 bit address here */
+ src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
+ 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
+}
+
+/**
+ * aac_src_check_health
+ * @dev: device to check if healthy
+ *
+ * Will attempt to determine if the specified adapter is alive and
+ * capable of handling requests, returning 0 if alive.
+ */
+static int aac_src_check_health(struct aac_dev *dev)
+{
+ u32 status = src_readl(dev, MUnit.OMR);
+
+ /*
+ * Check to see if the board failed any self tests.
+ */
+ if (unlikely(status & SELF_TEST_FAILED))
+ return -1;
+
+ /*
+ * Check to see if the board panic'd.
+ */
+ if (unlikely(status & KERNEL_PANIC))
+ return (status >> 16) & 0xFF;
+ /*
+ * Wait for the adapter to be up and running.
+ */
+ if (unlikely(!(status & KERNEL_UP_AND_RUNNING)))
+ return -3;
+ /*
+ * Everything is OK
+ */
+ return 0;
+}
+
+/**
+ * aac_src_deliver_message
+ * @fib: fib to issue
+ *
+ * Will send a fib, returning 0 if successful.
+ */
+static int aac_src_deliver_message(struct fib *fib)
+{
+ struct aac_dev *dev = fib->dev;
+ struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
+ unsigned long qflags;
+ u32 fibsize;
+ u64 address;
+ struct aac_fib_xporthdr *pFibX;
+
+ spin_lock_irqsave(q->lock, qflags);
+ q->numpending++;
+ spin_unlock_irqrestore(q->lock, qflags);
+
+ /* Calculate the amount to the fibsize bits */
+ fibsize = (sizeof(struct aac_fib_xporthdr) +
+ fib->hw_fib_va->header.Size + 127) / 128 - 1;
+ if (fibsize > (ALIGN32 - 1))
+ fibsize = ALIGN32 - 1;
+
+ /* Fill XPORT header */
+ pFibX = (struct aac_fib_xporthdr *)
+ ((unsigned char *)fib->hw_fib_va -
+ sizeof(struct aac_fib_xporthdr));
+ pFibX->Handle = fib->hw_fib_va->header.SenderData + 1;
+ pFibX->HostAddress = fib->hw_fib_pa;
+ pFibX->Size = fib->hw_fib_va->header.Size;
+ address = fib->hw_fib_pa - (u64)sizeof(struct aac_fib_xporthdr);
+
+ src_writel(dev, MUnit.IQ_H, (u32)(address >> 32));
+ src_writel(dev, MUnit.IQ_L, (u32)(address & 0xffffffff) + fibsize);
+ return 0;
+}
+
+/**
+ * aac_src_ioremap
+ * @size: mapping resize request
+ *
+ */
+static int aac_src_ioremap(struct aac_dev *dev, u32 size)
+{
+ if (!size) {
+ iounmap(dev->regs.src.bar0);
+ dev->regs.src.bar0 = NULL;
+ iounmap(dev->base);
+ dev->base = NULL;
+ return 0;
+ }
+ dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2),
+ AAC_MIN_SRC_BAR1_SIZE);
+ dev->base = NULL;
+ if (dev->regs.src.bar1 == NULL)
+ return -1;
+ dev->base = dev->regs.src.bar0 = ioremap(dev->scsi_host_ptr->base,
+ size);
+ if (dev->base == NULL) {
+ iounmap(dev->regs.src.bar1);
+ dev->regs.src.bar1 = NULL;
+ return -1;
+ }
+ dev->IndexRegs = &((struct src_registers __iomem *)
+ dev->base)->IndexRegs;
+ return 0;
+}
+
+static int aac_src_restart_adapter(struct aac_dev *dev, int bled)
+{
+ u32 var, reset_mask;
+
+ if (bled >= 0) {
+ if (bled)
+ printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
+ dev->name, dev->id, bled);
+ bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
+ 0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL);
+ if (bled || (var != 0x00000001))
+ bled = -EINVAL;
+ if (dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_DOORBELL_RESET) {
+ src_writel(dev, MUnit.IDR, reset_mask);
+ msleep(5000); /* Delay 5 seconds */
+ }
+ }
+
+ if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
+ return -ENODEV;
+
+ if (startup_timeout < 300)
+ startup_timeout = 300;
+
+ return 0;
+}
+
+/**
+ * aac_src_select_comm - Select communications method
+ * @dev: Adapter
+ * @comm: communications method
+ */
+int aac_src_select_comm(struct aac_dev *dev, int comm)
+{
+ switch (comm) {
+ case AAC_COMM_MESSAGE:
+ dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
+ dev->a_ops.adapter_intr = aac_src_intr_message;
+ dev->a_ops.adapter_deliver = aac_src_deliver_message;
+ break;
+ default:
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * aac_src_init - initialize an Cardinal Frey Bar card
+ * @dev: device to configure
+ *
+ */
+
+int aac_src_init(struct aac_dev *dev)
+{
+ unsigned long start;
+ unsigned long status;
+ int restart = 0;
+ int instance = dev->id;
+ const char *name = dev->name;
+
+ dev->a_ops.adapter_ioremap = aac_src_ioremap;
+ dev->a_ops.adapter_comm = aac_src_select_comm;
+
+ dev->base_size = AAC_MIN_SRC_BAR0_SIZE;
+ if (aac_adapter_ioremap(dev, dev->base_size)) {
+ printk(KERN_WARNING "%s: unable to map adapter.\n", name);
+ goto error_iounmap;
+ }
+
+ /* Failure to reset here is an option ... */
+ dev->a_ops.adapter_sync_cmd = src_sync_cmd;
+ dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
+ if ((aac_reset_devices || reset_devices) &&
+ !aac_src_restart_adapter(dev, 0))
+ ++restart;
+ /*
+ * Check to see if the board panic'd while booting.
+ */
+ status = src_readl(dev, MUnit.OMR);
+ if (status & KERNEL_PANIC) {
+ if (aac_src_restart_adapter(dev, aac_src_check_health(dev)))
+ goto error_iounmap;
+ ++restart;
+ }
+ /*
+ * Check to see if the board failed any self tests.
+ */
+ status = src_readl(dev, MUnit.OMR);
+ if (status & SELF_TEST_FAILED) {
+ printk(KERN_ERR "%s%d: adapter self-test failed.\n",
+ dev->name, instance);
+ goto error_iounmap;
+ }
+ /*
+ * Check to see if the monitor panic'd while booting.
+ */
+ if (status & MONITOR_PANIC) {
+ printk(KERN_ERR "%s%d: adapter monitor panic.\n",
+ dev->name, instance);
+ goto error_iounmap;
+ }
+ start = jiffies;
+ /*
+ * Wait for the adapter to be up and running. Wait up to 3 minutes
+ */
+ while (!((status = src_readl(dev, MUnit.OMR)) &
+ KERNEL_UP_AND_RUNNING)) {
+ if ((restart &&
+ (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
+ time_after(jiffies, start+HZ*startup_timeout)) {
+ printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
+ dev->name, instance, status);
+ goto error_iounmap;
+ }
+ if (!restart &&
+ ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
+ time_after(jiffies, start + HZ *
+ ((startup_timeout > 60)
+ ? (startup_timeout - 60)
+ : (startup_timeout / 2))))) {
+ if (likely(!aac_src_restart_adapter(dev,
+ aac_src_check_health(dev))))
+ start = jiffies;
+ ++restart;
+ }
+ msleep(1);
+ }
+ if (restart && aac_commit)
+ aac_commit = 1;
+ /*
+ * Fill in the common function dispatch table.
+ */
+ dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
+ dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
+ dev->a_ops.adapter_notify = aac_src_notify_adapter;
+ dev->a_ops.adapter_sync_cmd = src_sync_cmd;
+ dev->a_ops.adapter_check_health = aac_src_check_health;
+ dev->a_ops.adapter_restart = aac_src_restart_adapter;
+
+ /*
+ * First clear out all interrupts. Then enable the one's that we
+ * can handle.
+ */
+ aac_adapter_comm(dev, AAC_COMM_MESSAGE);
+ aac_adapter_disable_int(dev);
+ src_writel(dev, MUnit.ODR_C, 0xffffffff);
+ aac_adapter_enable_int(dev);
+
+ if (aac_init_adapter(dev) == NULL)
+ goto error_iounmap;
+ if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1)
+ goto error_iounmap;
+
+ dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
+
+ if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
+ IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
+
+ if (dev->msi)
+ pci_disable_msi(dev->pdev);
+
+ printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
+ name, instance);
+ goto error_iounmap;
+ }
+ dev->dbg_base = pci_resource_start(dev->pdev, 2);
+ dev->dbg_base_mapped = dev->regs.src.bar1;
+ dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE;
+
+ aac_adapter_enable_int(dev);
+ /*
+ * Tell the adapter that all is configured, and it can
+ * start accepting requests
+ */
+ aac_src_start_adapter(dev);
+
+ return 0;
+
+error_iounmap:
+
+ return -1;
+}
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index df2fc09ba479..b6d350ac4288 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -62,7 +62,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
-#define BNX2FC_VERSION "1.0.0"
+#define BNX2FC_VERSION "1.0.1"
#define PFX "bnx2fc: "
@@ -84,9 +84,15 @@
#define BNX2FC_NUM_MAX_SESS 128
#define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS))
-#define BNX2FC_MAX_OUTSTANDING_CMNDS 4096
+#define BNX2FC_MAX_OUTSTANDING_CMNDS 2048
+#define BNX2FC_CAN_QUEUE BNX2FC_MAX_OUTSTANDING_CMNDS
+#define BNX2FC_ELSTM_XIDS BNX2FC_CAN_QUEUE
#define BNX2FC_MIN_PAYLOAD 256
#define BNX2FC_MAX_PAYLOAD 2048
+#define BNX2FC_MFS \
+ (BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header))
+#define BNX2FC_MINI_JUMBO_MTU 2500
+
#define BNX2FC_RQ_BUF_SZ 256
#define BNX2FC_RQ_BUF_LOG_SZ (ilog2(BNX2FC_RQ_BUF_SZ))
@@ -98,7 +104,8 @@
#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe))
#define BNX2FC_5771X_DB_PAGE_SIZE 128
-#define BNX2FC_MAX_TASKS BNX2FC_MAX_OUTSTANDING_CMNDS
+#define BNX2FC_MAX_TASKS \
+ (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS)
#define BNX2FC_TASK_SIZE 128
#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE)
#define BNX2FC_TASK_CTX_ARR_SZ (BNX2FC_MAX_TASKS/BNX2FC_TASKS_PER_PAGE)
@@ -112,10 +119,10 @@
#define BNX2FC_WRITE (1 << 0)
#define BNX2FC_MIN_XID 0
-#define BNX2FC_MAX_XID (BNX2FC_MAX_OUTSTANDING_CMNDS - 1)
-#define FCOE_MIN_XID (BNX2FC_MAX_OUTSTANDING_CMNDS)
-#define FCOE_MAX_XID \
- (BNX2FC_MAX_OUTSTANDING_CMNDS + (nr_cpu_ids * 256))
+#define BNX2FC_MAX_XID \
+ (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS - 1)
+#define FCOE_MIN_XID (BNX2FC_MAX_XID + 1)
+#define FCOE_MAX_XID (FCOE_MIN_XID + 4095)
#define BNX2FC_MAX_LUN 0xFFFF
#define BNX2FC_MAX_FCP_TGT 256
#define BNX2FC_MAX_CMD_LEN 16
@@ -125,7 +132,6 @@
#define BNX2FC_WAIT_CNT 120
#define BNX2FC_FW_TIMEOUT (3 * HZ)
-
#define PORT_MAX 2
#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index e476e8753079..e2e647509a73 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -21,7 +21,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
#define DRV_MODULE_NAME "bnx2fc"
#define DRV_MODULE_VERSION BNX2FC_VERSION
-#define DRV_MODULE_RELDATE "Jan 25, 2011"
+#define DRV_MODULE_RELDATE "Mar 17, 2011"
static char version[] __devinitdata =
@@ -437,17 +437,16 @@ static int bnx2fc_l2_rcv_thread(void *arg)
set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
schedule();
- set_current_state(TASK_RUNNING);
spin_lock_bh(&bg->fcoe_rx_list.lock);
while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) {
spin_unlock_bh(&bg->fcoe_rx_list.lock);
bnx2fc_recv_frame(skb);
spin_lock_bh(&bg->fcoe_rx_list.lock);
}
+ __set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_bh(&bg->fcoe_rx_list.lock);
- set_current_state(TASK_INTERRUPTIBLE);
}
- set_current_state(TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
return 0;
}
@@ -569,7 +568,6 @@ int bnx2fc_percpu_io_thread(void *arg)
set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
schedule();
- set_current_state(TASK_RUNNING);
spin_lock_bh(&p->fp_work_lock);
while (!list_empty(&p->work_list)) {
list_splice_init(&p->work_list, &work_list);
@@ -583,10 +581,10 @@ int bnx2fc_percpu_io_thread(void *arg)
spin_lock_bh(&p->fp_work_lock);
}
+ __set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_bh(&p->fp_work_lock);
- set_current_state(TASK_INTERRUPTIBLE);
}
- set_current_state(TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
return 0;
}
@@ -661,31 +659,6 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
return 0;
}
-static int bnx2fc_mfs_update(struct fc_lport *lport)
-{
- struct fcoe_port *port = lport_priv(lport);
- struct bnx2fc_hba *hba = port->priv;
- struct net_device *netdev = hba->netdev;
- u32 mfs;
- u32 max_mfs;
-
- mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
- sizeof(struct fcoe_crc_eof));
- max_mfs = BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header);
- BNX2FC_HBA_DBG(lport, "mfs = %d, max_mfs = %d\n", mfs, max_mfs);
- if (mfs > max_mfs)
- mfs = max_mfs;
-
- /* Adjust mfs to be a multiple of 256 bytes */
- mfs = (((mfs - sizeof(struct fc_frame_header)) / BNX2FC_MIN_PAYLOAD) *
- BNX2FC_MIN_PAYLOAD);
- mfs = mfs + sizeof(struct fc_frame_header);
-
- BNX2FC_HBA_DBG(lport, "Set MFS = %d\n", mfs);
- if (fc_set_mfs(lport, mfs))
- return -EINVAL;
- return 0;
-}
static void bnx2fc_link_speed_update(struct fc_lport *lport)
{
struct fcoe_port *port = lport_priv(lport);
@@ -754,7 +727,7 @@ static int bnx2fc_net_config(struct fc_lport *lport)
!hba->phys_dev->ethtool_ops->get_pauseparam)
return -EOPNOTSUPP;
- if (bnx2fc_mfs_update(lport))
+ if (fc_set_mfs(lport, BNX2FC_MFS))
return -EINVAL;
skb_queue_head_init(&port->fcoe_pending_queue);
@@ -825,14 +798,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event)
if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
printk(KERN_ERR "indicate_netevent: "\
"adapter is not UP!!\n");
- /* fall thru to update mfs if MTU has changed */
- case NETDEV_CHANGEMTU:
- BNX2FC_HBA_DBG(lport, "NETDEV_CHANGEMTU event\n");
- bnx2fc_mfs_update(lport);
- mutex_lock(&lport->lp_mutex);
- list_for_each_entry(vport, &lport->vports, list)
- bnx2fc_mfs_update(vport);
- mutex_unlock(&lport->lp_mutex);
break;
case NETDEV_DOWN:
@@ -1095,13 +1060,6 @@ static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba)
struct netdev_hw_addr *ha;
int sel_san_mac = 0;
- /* Do not support for bonding device */
- if ((netdev->priv_flags & IFF_MASTER_ALB) ||
- (netdev->priv_flags & IFF_SLAVE_INACTIVE) ||
- (netdev->priv_flags & IFF_MASTER_8023AD)) {
- return -EOPNOTSUPP;
- }
-
/* setup Source MAC Address */
rcu_read_lock();
for_each_dev_addr(physdev, ha) {
@@ -1432,16 +1390,9 @@ static int bnx2fc_destroy(struct net_device *netdev)
struct net_device *phys_dev;
int rc = 0;
- if (!rtnl_trylock())
- return restart_syscall();
+ rtnl_lock();
mutex_lock(&bnx2fc_dev_lock);
-#ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE
- if (THIS_MODULE->state != MODULE_STATE_LIVE) {
- rc = -ENODEV;
- goto netdev_err;
- }
-#endif
/* obtain physical netdev */
if (netdev->priv_flags & IFF_802_1Q_VLAN)
phys_dev = vlan_dev_real_dev(netdev);
@@ -1805,18 +1756,10 @@ static int bnx2fc_disable(struct net_device *netdev)
struct ethtool_drvinfo drvinfo;
int rc = 0;
- if (!rtnl_trylock()) {
- printk(KERN_ERR PFX "retrying for rtnl_lock\n");
- return -EIO;
- }
+ rtnl_lock();
mutex_lock(&bnx2fc_dev_lock);
- if (THIS_MODULE->state != MODULE_STATE_LIVE) {
- rc = -ENODEV;
- goto nodev;
- }
-
/* obtain physical netdev */
if (netdev->priv_flags & IFF_802_1Q_VLAN)
phys_dev = vlan_dev_real_dev(netdev);
@@ -1867,19 +1810,11 @@ static int bnx2fc_enable(struct net_device *netdev)
struct ethtool_drvinfo drvinfo;
int rc = 0;
- if (!rtnl_trylock()) {
- printk(KERN_ERR PFX "retrying for rtnl_lock\n");
- return -EIO;
- }
+ rtnl_lock();
BNX2FC_MISC_DBG("Entered %s\n", __func__);
mutex_lock(&bnx2fc_dev_lock);
- if (THIS_MODULE->state != MODULE_STATE_LIVE) {
- rc = -ENODEV;
- goto nodev;
- }
-
/* obtain physical netdev */
if (netdev->priv_flags & IFF_802_1Q_VLAN)
phys_dev = vlan_dev_real_dev(netdev);
@@ -1942,18 +1877,9 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
return -EIO;
}
- if (!rtnl_trylock()) {
- printk(KERN_ERR "trying for rtnl_lock\n");
- return -EIO;
- }
- mutex_lock(&bnx2fc_dev_lock);
+ rtnl_lock();
-#ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE
- if (THIS_MODULE->state != MODULE_STATE_LIVE) {
- rc = -ENODEV;
- goto mod_err;
- }
-#endif
+ mutex_lock(&bnx2fc_dev_lock);
if (!try_module_get(THIS_MODULE)) {
rc = -EINVAL;
@@ -2506,7 +2432,7 @@ static struct scsi_host_template bnx2fc_shost_template = {
.change_queue_type = fc_change_queue_type,
.this_id = -1,
.cmd_per_lun = 3,
- .can_queue = (BNX2FC_MAX_OUTSTANDING_CMNDS/2),
+ .can_queue = BNX2FC_CAN_QUEUE,
.use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
.max_sectors = 512,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 4f4096836742..1b680e288c56 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -87,7 +87,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
fcoe_init1.task_list_pbl_addr_hi =
(u32) ((u64) hba->task_ctx_bd_dma >> 32);
- fcoe_init1.mtu = hba->netdev->mtu;
+ fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU;
fcoe_init1.flags = (PAGE_SHIFT <<
FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
@@ -590,7 +590,10 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
+ spin_lock_bh(&tgt->tgt_lock);
rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
+ spin_unlock_bh(&tgt->tgt_lock);
+
if (rq_data) {
buf = rq_data;
} else {
@@ -603,8 +606,10 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
}
for (i = 0; i < num_rq; i++) {
+ spin_lock_bh(&tgt->tgt_lock);
rq_data = (unsigned char *)
bnx2fc_get_next_rqe(tgt, 1);
+ spin_unlock_bh(&tgt->tgt_lock);
len = BNX2FC_RQ_BUF_SZ;
memcpy(buf1, rq_data, len);
buf1 += len;
@@ -615,13 +620,15 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
if (buf != rq_data)
kfree(buf);
+ spin_lock_bh(&tgt->tgt_lock);
bnx2fc_return_rqe(tgt, num_rq);
+ spin_unlock_bh(&tgt->tgt_lock);
break;
case FCOE_ERROR_DETECTION_CQE_TYPE:
/*
- *In case of error reporting CQE a single RQ entry
- * is consumes.
+ * In case of error reporting CQE a single RQ entry
+ * is consumed.
*/
spin_lock_bh(&tgt->tgt_lock);
num_rq = 1;
@@ -705,6 +712,7 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
*In case of warning reporting CQE a single RQ entry
* is consumes.
*/
+ spin_lock_bh(&tgt->tgt_lock);
num_rq = 1;
err_entry = (struct fcoe_err_report_entry *)
bnx2fc_get_next_rqe(tgt, 1);
@@ -717,6 +725,7 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
err_entry->tx_buf_off, err_entry->rx_buf_off);
bnx2fc_return_rqe(tgt, 1);
+ spin_unlock_bh(&tgt->tgt_lock);
break;
default:
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 0f1dd23730db..d3fc302c241a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -11,6 +11,9 @@
*/
#include "bnx2fc.h"
+
+#define RESERVE_FREE_LIST_INDEX num_possible_cpus()
+
static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
int bd_index);
static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
@@ -242,8 +245,9 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
u32 mem_size;
u16 xid;
int i;
- int num_ios;
+ int num_ios, num_pri_ios;
size_t bd_tbl_sz;
+ int arr_sz = num_possible_cpus() + 1;
if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \
@@ -263,14 +267,14 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
}
cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) *
- num_possible_cpus(), GFP_KERNEL);
+ arr_sz, GFP_KERNEL);
if (!cmgr->free_list) {
printk(KERN_ERR PFX "failed to alloc free_list\n");
goto mem_err;
}
cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) *
- num_possible_cpus(), GFP_KERNEL);
+ arr_sz, GFP_KERNEL);
if (!cmgr->free_list_lock) {
printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
goto mem_err;
@@ -279,13 +283,18 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
cmgr->hba = hba;
cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
- for (i = 0; i < num_possible_cpus(); i++) {
+ for (i = 0; i < arr_sz; i++) {
INIT_LIST_HEAD(&cmgr->free_list[i]);
spin_lock_init(&cmgr->free_list_lock[i]);
}
- /* Pre-allocated pool of bnx2fc_cmds */
+ /*
+ * Pre-allocated pool of bnx2fc_cmds.
+ * Last entry in the free list array is the free list
+ * of slow path requests.
+ */
xid = BNX2FC_MIN_XID;
+ num_pri_ios = num_ios - BNX2FC_ELSTM_XIDS;
for (i = 0; i < num_ios; i++) {
io_req = kzalloc(sizeof(*io_req), GFP_KERNEL);
@@ -298,11 +307,13 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout);
io_req->xid = xid++;
- if (io_req->xid >= BNX2FC_MAX_OUTSTANDING_CMNDS)
- printk(KERN_ERR PFX "ERROR allocating xids - 0x%x\n",
- io_req->xid);
- list_add_tail(&io_req->link,
- &cmgr->free_list[io_req->xid % num_possible_cpus()]);
+ if (i < num_pri_ios)
+ list_add_tail(&io_req->link,
+ &cmgr->free_list[io_req->xid %
+ num_possible_cpus()]);
+ else
+ list_add_tail(&io_req->link,
+ &cmgr->free_list[num_possible_cpus()]);
io_req++;
}
@@ -389,7 +400,7 @@ free_cmd_pool:
if (!cmgr->free_list)
goto free_cmgr;
- for (i = 0; i < num_possible_cpus(); i++) {
+ for (i = 0; i < num_possible_cpus() + 1; i++) {
struct list_head *list;
struct list_head *tmp;
@@ -413,6 +424,7 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
struct bnx2fc_cmd *io_req;
struct list_head *listp;
struct io_bdt *bd_tbl;
+ int index = RESERVE_FREE_LIST_INDEX;
u32 max_sqes;
u16 xid;
@@ -432,26 +444,26 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
* NOTE: Free list insertions and deletions are protected with
* cmgr lock
*/
- spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
- if ((list_empty(&(cmd_mgr->free_list[smp_processor_id()]))) ||
+ spin_lock_bh(&cmd_mgr->free_list_lock[index]);
+ if ((list_empty(&(cmd_mgr->free_list[index]))) ||
(tgt->num_active_ios.counter >= max_sqes)) {
BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available "
"ios(%d):sqes(%d)\n",
tgt->num_active_ios.counter, tgt->max_sqes);
- if (list_empty(&(cmd_mgr->free_list[smp_processor_id()])))
+ if (list_empty(&(cmd_mgr->free_list[index])))
printk(KERN_ERR PFX "elstm_alloc: list_empty\n");
- spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
return NULL;
}
listp = (struct list_head *)
- cmd_mgr->free_list[smp_processor_id()].next;
+ cmd_mgr->free_list[index].next;
list_del_init(listp);
io_req = (struct bnx2fc_cmd *) listp;
xid = io_req->xid;
cmd_mgr->cmds[xid] = io_req;
atomic_inc(&tgt->num_active_ios);
- spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
INIT_LIST_HEAD(&io_req->link);
@@ -479,27 +491,30 @@ static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
struct io_bdt *bd_tbl;
u32 max_sqes;
u16 xid;
+ int index = get_cpu();
max_sqes = BNX2FC_SCSI_MAX_SQES;
/*
* NOTE: Free list insertions and deletions are protected with
* cmgr lock
*/
- spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
- if ((list_empty(&cmd_mgr->free_list[smp_processor_id()])) ||
+ spin_lock_bh(&cmd_mgr->free_list_lock[index]);
+ if ((list_empty(&cmd_mgr->free_list[index])) ||
(tgt->num_active_ios.counter >= max_sqes)) {
- spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
+ put_cpu();
return NULL;
}
listp = (struct list_head *)
- cmd_mgr->free_list[smp_processor_id()].next;
+ cmd_mgr->free_list[index].next;
list_del_init(listp);
io_req = (struct bnx2fc_cmd *) listp;
xid = io_req->xid;
cmd_mgr->cmds[xid] = io_req;
atomic_inc(&tgt->num_active_ios);
- spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
+ put_cpu();
INIT_LIST_HEAD(&io_req->link);
@@ -522,8 +537,15 @@ void bnx2fc_cmd_release(struct kref *ref)
struct bnx2fc_cmd *io_req = container_of(ref,
struct bnx2fc_cmd, refcount);
struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
+ int index;
+
+ if (io_req->cmd_type == BNX2FC_SCSI_CMD)
+ index = io_req->xid % num_possible_cpus();
+ else
+ index = RESERVE_FREE_LIST_INDEX;
- spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+
+ spin_lock_bh(&cmd_mgr->free_list_lock[index]);
if (io_req->cmd_type != BNX2FC_SCSI_CMD)
bnx2fc_free_mp_resc(io_req);
cmd_mgr->cmds[io_req->xid] = NULL;
@@ -531,9 +553,10 @@ void bnx2fc_cmd_release(struct kref *ref)
list_del_init(&io_req->link);
/* Add it to the free list */
list_add(&io_req->link,
- &cmd_mgr->free_list[smp_processor_id()]);
+ &cmd_mgr->free_list[index]);
atomic_dec(&io_req->tgt->num_active_ios);
- spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
+
}
static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 7ea93af60260..7cc05e4e82d5 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -304,10 +304,8 @@ static void bnx2fc_upload_session(struct fcoe_port *port,
" not sent to FW\n");
/* Free session resources */
- spin_lock_bh(&tgt->cq_lock);
bnx2fc_free_session_resc(hba, tgt);
bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
- spin_unlock_bh(&tgt->cq_lock);
}
static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
@@ -830,11 +828,13 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
tgt->rq = NULL;
}
/* Free CQ */
+ spin_lock_bh(&tgt->cq_lock);
if (tgt->cq) {
dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
tgt->cq, tgt->cq_dma);
tgt->cq = NULL;
}
+ spin_unlock_bh(&tgt->cq_lock);
/* Free SQ */
if (tgt->sq) {
dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 8eeb39ffa37f..e98ae33f1295 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -132,14 +132,25 @@ static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
if (page_count(sg_page(sg)) >= 1 && !recv)
return;
- segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
+ if (recv) {
+ segment->atomic_mapped = true;
+ segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
+ } else {
+ segment->atomic_mapped = false;
+ /* the xmit path can sleep with the page mapped so use kmap */
+ segment->sg_mapped = kmap(sg_page(sg));
+ }
+
segment->data = segment->sg_mapped + sg->offset + segment->sg_offset;
}
void iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
{
if (segment->sg_mapped) {
- kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
+ if (segment->atomic_mapped)
+ kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
+ else
+ kunmap(sg_page(segment->sg));
segment->sg_mapped = NULL;
segment->data = NULL;
}
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index 14de249917f8..88928f00aa2d 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -1,7 +1,7 @@
#/*******************************************************************
# * This file is part of the Emulex Linux Device Driver for *
# * Fibre Channel Host Bus Adapters. *
-# * Copyright (C) 2004-2006 Emulex. All rights reserved. *
+# * Copyright (C) 2004-2011 Emulex. All rights reserved. *
# * EMULEX and SLI are trademarks of Emulex. *
# * www.emulex.com *
# * *
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index b64c6da870d3..60e98a62f308 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -539,6 +539,8 @@ struct lpfc_hba {
(struct lpfc_hba *, uint32_t);
int (*lpfc_hba_down_link)
(struct lpfc_hba *, uint32_t);
+ int (*lpfc_selective_reset)
+ (struct lpfc_hba *);
/* SLI4 specific HBA data structure */
struct lpfc_sli4_hba sli4_hba;
@@ -895,7 +897,18 @@ lpfc_worker_wake_up(struct lpfc_hba *phba)
return;
}
-static inline void
+static inline int
+lpfc_readl(void __iomem *addr, uint32_t *data)
+{
+ uint32_t temp;
+ temp = readl(addr);
+ if (temp == 0xffffffff)
+ return -EIO;
+ *data = temp;
+ return 0;
+}
+
+static inline int
lpfc_sli_read_hs(struct lpfc_hba *phba)
{
/*
@@ -904,15 +917,17 @@ lpfc_sli_read_hs(struct lpfc_hba *phba)
*/
phba->sli.slistat.err_attn_event++;
- /* Save status info */
- phba->work_hs = readl(phba->HSregaddr);
- phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
- phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
+ /* Save status info and check for unplug error */
+ if (lpfc_readl(phba->HSregaddr, &phba->work_hs) ||
+ lpfc_readl(phba->MBslimaddr + 0xa8, &phba->work_status[0]) ||
+ lpfc_readl(phba->MBslimaddr + 0xac, &phba->work_status[1])) {
+ return -EIO;
+ }
/* Clear chip Host Attention error bit */
writel(HA_ERATT, phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
phba->pport->stopped = 1;
- return;
+ return 0;
}
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index e7c020df12fa..4e0faa00b96f 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -685,7 +685,7 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
* -EIO reset not configured or error posting the event
* zero for success
**/
-static int
+int
lpfc_selective_reset(struct lpfc_hba *phba)
{
struct completion online_compl;
@@ -746,7 +746,7 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
int status = -EINVAL;
if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
- status = lpfc_selective_reset(phba);
+ status = phba->lpfc_selective_reset(phba);
if (status == 0)
return strlen(buf);
@@ -1224,7 +1224,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
if (val & ENABLE_FCP_RING_POLLING) {
if ((val & DISABLE_FCP_RING_INT) &&
!(old_val & DISABLE_FCP_RING_INT)) {
- creg_val = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+ spin_unlock_irq(&phba->hbalock);
+ return -EINVAL;
+ }
creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
@@ -1242,7 +1245,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
spin_unlock_irq(&phba->hbalock);
del_timer(&phba->fcp_poll_timer);
spin_lock_irq(&phba->hbalock);
- creg_val = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+ spin_unlock_irq(&phba->hbalock);
+ return -EINVAL;
+ }
creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 0dd43bb91618..793b9f1131fb 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2009-2010 Emulex. All rights reserved. *
+ * Copyright (C) 2009-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -348,7 +348,10 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
dd_data->context_un.iocb.bmp = bmp;
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
- creg_val = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+ rc = -EIO ;
+ goto free_cmdiocbq;
+ }
creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
@@ -599,7 +602,10 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
dd_data->context_un.iocb.ndlp = ndlp;
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
- creg_val = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+ rc = -EIO;
+ goto linkdown_err;
+ }
creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
@@ -613,6 +619,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
else
rc = -EIO;
+linkdown_err:
pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
job->request_payload.sg_cnt, DMA_TO_DEVICE);
pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
@@ -1357,7 +1364,10 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
dd_data->context_un.iocb.ndlp = ndlp;
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
- creg_val = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+ rc = -IOCB_ERROR;
+ goto issue_ct_rsp_exit;
+ }
creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
@@ -2479,16 +2489,18 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
from = (uint8_t *)dd_data->context_un.mbox.mb;
job = dd_data->context_un.mbox.set_job;
- size = job->reply_payload.payload_len;
- job->reply->reply_payload_rcv_len =
- sg_copy_from_buffer(job->reply_payload.sg_list,
- job->reply_payload.sg_cnt,
- from, size);
- job->reply->result = 0;
+ if (job) {
+ size = job->reply_payload.payload_len;
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ from, size);
+ job->reply->result = 0;
+ job->dd_data = NULL;
+ job->job_done(job);
+ }
dd_data->context_un.mbox.set_job = NULL;
- job->dd_data = NULL;
- job->job_done(job);
/* need to hold the lock until we call job done to hold off
* the timeout handler returning to the midlayer while
* we are stillprocessing the job
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 3d40023f4804..f0b332f4eedb 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2010 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -254,8 +254,8 @@ uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t,
uint32_t);
void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *);
-
-void lpfc_reset_barrier(struct lpfc_hba * phba);
+int lpfc_selective_reset(struct lpfc_hba *);
+void lpfc_reset_barrier(struct lpfc_hba *);
int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
int lpfc_sli_brdkill(struct lpfc_hba *);
int lpfc_sli_brdreset(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 8e28edf9801e..735028fedda5 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -89,7 +89,8 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
return 0;
/* Read the HBA Host Attention Register */
- ha_copy = readl(phba->HAregaddr);
+ if (lpfc_readl(phba->HAregaddr, &ha_copy))
+ return 1;
if (!(ha_copy & HA_LATT))
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 94ae37c5111a..95f11ed79463 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1344,7 +1344,7 @@ typedef struct { /* FireFly BIU registers */
#define HS_FFER1 0x80000000 /* Bit 31 */
#define HS_CRIT_TEMP 0x00000100 /* Bit 8 */
#define HS_FFERM 0xFF000100 /* Mask for error bits 31:24 and 8 */
-
+#define UNPLUG_ERR 0x00000001 /* Indicate pci hot unplug */
/* Host Control Register */
#define HC_REG_OFFSET 12 /* Byte offset from register base address */
@@ -1713,6 +1713,17 @@ struct lpfc_pde6 {
#define pde6_apptagval_WORD word2
};
+struct lpfc_pde7 {
+ uint32_t word0;
+#define pde7_type_SHIFT 24
+#define pde7_type_MASK 0x000000ff
+#define pde7_type_WORD word0
+#define pde7_rsvd0_SHIFT 0
+#define pde7_rsvd0_MASK 0x00ffffff
+#define pde7_rsvd0_WORD word0
+ uint32_t addrHigh;
+ uint32_t addrLow;
+};
/* Structure for MB Command LOAD_SM and DOWN_LOAD */
@@ -3621,7 +3632,7 @@ typedef struct _IOCB { /* IOCB structure */
ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */
QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */
struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */
- struct sli4_bls_acc bls_acc; /* UNSOL ABTS BLS_ACC params */
+ struct sli4_bls_rsp bls_rsp; /* UNSOL ABTS BLS_RSP params */
uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */
} un;
union {
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index c7178d60c7bf..8433ac0d9fb4 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -215,7 +215,7 @@ struct lpfc_sli4_flags {
#define lpfc_fip_flag_WORD word0
};
-struct sli4_bls_acc {
+struct sli4_bls_rsp {
uint32_t word0_rsvd; /* Word0 must be reserved */
uint32_t word1;
#define lpfc_abts_orig_SHIFT 0
@@ -231,6 +231,16 @@ struct sli4_bls_acc {
#define lpfc_abts_oxid_MASK 0x0000FFFF
#define lpfc_abts_oxid_WORD word2
uint32_t word3;
+#define lpfc_vndr_code_SHIFT 0
+#define lpfc_vndr_code_MASK 0x000000FF
+#define lpfc_vndr_code_WORD word3
+#define lpfc_rsn_expln_SHIFT 8
+#define lpfc_rsn_expln_MASK 0x000000FF
+#define lpfc_rsn_expln_WORD word3
+#define lpfc_rsn_code_SHIFT 16
+#define lpfc_rsn_code_MASK 0x000000FF
+#define lpfc_rsn_code_WORD word3
+
uint32_t word4;
uint32_t word5_rsvd; /* Word5 must be reserved */
};
@@ -711,21 +721,27 @@ struct lpfc_sli4_cfg_mhdr {
union lpfc_sli4_cfg_shdr {
struct {
uint32_t word6;
-#define lpfc_mbox_hdr_opcode_SHIFT 0
-#define lpfc_mbox_hdr_opcode_MASK 0x000000FF
-#define lpfc_mbox_hdr_opcode_WORD word6
-#define lpfc_mbox_hdr_subsystem_SHIFT 8
-#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF
-#define lpfc_mbox_hdr_subsystem_WORD word6
-#define lpfc_mbox_hdr_port_number_SHIFT 16
-#define lpfc_mbox_hdr_port_number_MASK 0x000000FF
-#define lpfc_mbox_hdr_port_number_WORD word6
-#define lpfc_mbox_hdr_domain_SHIFT 24
-#define lpfc_mbox_hdr_domain_MASK 0x000000FF
-#define lpfc_mbox_hdr_domain_WORD word6
+#define lpfc_mbox_hdr_opcode_SHIFT 0
+#define lpfc_mbox_hdr_opcode_MASK 0x000000FF
+#define lpfc_mbox_hdr_opcode_WORD word6
+#define lpfc_mbox_hdr_subsystem_SHIFT 8
+#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF
+#define lpfc_mbox_hdr_subsystem_WORD word6
+#define lpfc_mbox_hdr_port_number_SHIFT 16
+#define lpfc_mbox_hdr_port_number_MASK 0x000000FF
+#define lpfc_mbox_hdr_port_number_WORD word6
+#define lpfc_mbox_hdr_domain_SHIFT 24
+#define lpfc_mbox_hdr_domain_MASK 0x000000FF
+#define lpfc_mbox_hdr_domain_WORD word6
uint32_t timeout;
uint32_t request_length;
- uint32_t reserved9;
+ uint32_t word9;
+#define lpfc_mbox_hdr_version_SHIFT 0
+#define lpfc_mbox_hdr_version_MASK 0x000000FF
+#define lpfc_mbox_hdr_version_WORD word9
+#define LPFC_Q_CREATE_VERSION_2 2
+#define LPFC_Q_CREATE_VERSION_1 1
+#define LPFC_Q_CREATE_VERSION_0 0
} request;
struct {
uint32_t word6;
@@ -917,9 +933,12 @@ struct cq_context {
#define LPFC_CQ_CNT_512 0x1
#define LPFC_CQ_CNT_1024 0x2
uint32_t word1;
-#define lpfc_cq_eq_id_SHIFT 22
+#define lpfc_cq_eq_id_SHIFT 22 /* Version 0 Only */
#define lpfc_cq_eq_id_MASK 0x000000FF
#define lpfc_cq_eq_id_WORD word1
+#define lpfc_cq_eq_id_2_SHIFT 0 /* Version 2 Only */
+#define lpfc_cq_eq_id_2_MASK 0x0000FFFF
+#define lpfc_cq_eq_id_2_WORD word1
uint32_t reserved0;
uint32_t reserved1;
};
@@ -929,6 +948,9 @@ struct lpfc_mbx_cq_create {
union {
struct {
uint32_t word0;
+#define lpfc_mbx_cq_create_page_size_SHIFT 16 /* Version 2 Only */
+#define lpfc_mbx_cq_create_page_size_MASK 0x000000FF
+#define lpfc_mbx_cq_create_page_size_WORD word0
#define lpfc_mbx_cq_create_num_pages_SHIFT 0
#define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_num_pages_WORD word0
@@ -969,7 +991,7 @@ struct wq_context {
struct lpfc_mbx_wq_create {
struct mbox_header header;
union {
- struct {
+ struct { /* Version 0 Request */
uint32_t word0;
#define lpfc_mbx_wq_create_num_pages_SHIFT 0
#define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF
@@ -979,6 +1001,23 @@ struct lpfc_mbx_wq_create {
#define lpfc_mbx_wq_create_cq_id_WORD word0
struct dma_address page[LPFC_MAX_WQ_PAGE];
} request;
+ struct { /* Version 1 Request */
+ uint32_t word0; /* Word 0 is the same as in v0 */
+ uint32_t word1;
+#define lpfc_mbx_wq_create_page_size_SHIFT 0
+#define lpfc_mbx_wq_create_page_size_MASK 0x000000FF
+#define lpfc_mbx_wq_create_page_size_WORD word1
+#define lpfc_mbx_wq_create_wqe_size_SHIFT 8
+#define lpfc_mbx_wq_create_wqe_size_MASK 0x0000000F
+#define lpfc_mbx_wq_create_wqe_size_WORD word1
+#define LPFC_WQ_WQE_SIZE_64 0x5
+#define LPFC_WQ_WQE_SIZE_128 0x6
+#define lpfc_mbx_wq_create_wqe_count_SHIFT 16
+#define lpfc_mbx_wq_create_wqe_count_MASK 0x0000FFFF
+#define lpfc_mbx_wq_create_wqe_count_WORD word1
+ uint32_t word2;
+ struct dma_address page[LPFC_MAX_WQ_PAGE-1];
+ } request_1;
struct {
uint32_t word0;
#define lpfc_mbx_wq_create_q_id_SHIFT 0
@@ -1007,13 +1046,22 @@ struct lpfc_mbx_wq_destroy {
#define LPFC_DATA_BUF_SIZE 2048
struct rq_context {
uint32_t word0;
-#define lpfc_rq_context_rq_size_SHIFT 16
-#define lpfc_rq_context_rq_size_MASK 0x0000000F
-#define lpfc_rq_context_rq_size_WORD word0
+#define lpfc_rq_context_rqe_count_SHIFT 16 /* Version 0 Only */
+#define lpfc_rq_context_rqe_count_MASK 0x0000000F
+#define lpfc_rq_context_rqe_count_WORD word0
#define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */
#define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */
#define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */
#define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */
+#define lpfc_rq_context_rqe_count_1_SHIFT 16 /* Version 1 Only */
+#define lpfc_rq_context_rqe_count_1_MASK 0x0000FFFF
+#define lpfc_rq_context_rqe_count_1_WORD word0
+#define lpfc_rq_context_rqe_size_SHIFT 8 /* Version 1 Only */
+#define lpfc_rq_context_rqe_size_MASK 0x0000000F
+#define lpfc_rq_context_rqe_size_WORD word0
+#define lpfc_rq_context_page_size_SHIFT 0 /* Version 1 Only */
+#define lpfc_rq_context_page_size_MASK 0x000000FF
+#define lpfc_rq_context_page_size_WORD word0
uint32_t reserved1;
uint32_t word2;
#define lpfc_rq_context_cq_id_SHIFT 16
@@ -1022,7 +1070,7 @@ struct rq_context {
#define lpfc_rq_context_buf_size_SHIFT 0
#define lpfc_rq_context_buf_size_MASK 0x0000FFFF
#define lpfc_rq_context_buf_size_WORD word2
- uint32_t reserved3;
+ uint32_t buffer_size; /* Version 1 Only */
};
struct lpfc_mbx_rq_create {
@@ -1062,16 +1110,16 @@ struct lpfc_mbx_rq_destroy {
struct mq_context {
uint32_t word0;
-#define lpfc_mq_context_cq_id_SHIFT 22
+#define lpfc_mq_context_cq_id_SHIFT 22 /* Version 0 Only */
#define lpfc_mq_context_cq_id_MASK 0x000003FF
#define lpfc_mq_context_cq_id_WORD word0
-#define lpfc_mq_context_count_SHIFT 16
-#define lpfc_mq_context_count_MASK 0x0000000F
-#define lpfc_mq_context_count_WORD word0
-#define LPFC_MQ_CNT_16 0x5
-#define LPFC_MQ_CNT_32 0x6
-#define LPFC_MQ_CNT_64 0x7
-#define LPFC_MQ_CNT_128 0x8
+#define lpfc_mq_context_ring_size_SHIFT 16
+#define lpfc_mq_context_ring_size_MASK 0x0000000F
+#define lpfc_mq_context_ring_size_WORD word0
+#define LPFC_MQ_RING_SIZE_16 0x5
+#define LPFC_MQ_RING_SIZE_32 0x6
+#define LPFC_MQ_RING_SIZE_64 0x7
+#define LPFC_MQ_RING_SIZE_128 0x8
uint32_t word1;
#define lpfc_mq_context_valid_SHIFT 31
#define lpfc_mq_context_valid_MASK 0x00000001
@@ -1105,9 +1153,12 @@ struct lpfc_mbx_mq_create_ext {
union {
struct {
uint32_t word0;
-#define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0
-#define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF
-#define lpfc_mbx_mq_create_ext_num_pages_WORD word0
+#define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0
+#define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF
+#define lpfc_mbx_mq_create_ext_num_pages_WORD word0
+#define lpfc_mbx_mq_create_ext_cq_id_SHIFT 16 /* Version 1 Only */
+#define lpfc_mbx_mq_create_ext_cq_id_MASK 0x0000FFFF
+#define lpfc_mbx_mq_create_ext_cq_id_WORD word0
uint32_t async_evt_bmap;
#define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK
#define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 35665cfb5689..e6ebe516cfbb 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2010 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -507,7 +507,10 @@ lpfc_config_port_post(struct lpfc_hba *phba)
phba->hba_flag &= ~HBA_ERATT_HANDLED;
/* Enable appropriate host interrupts */
- status = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &status)) {
+ spin_unlock_irq(&phba->hbalock);
+ return -EIO;
+ }
status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
if (psli->num_rings > 0)
status |= HC_R0INT_ENA;
@@ -1222,7 +1225,10 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
/* Wait for the ER1 bit to clear.*/
while (phba->work_hs & HS_FFER1) {
msleep(100);
- phba->work_hs = readl(phba->HSregaddr);
+ if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
+ phba->work_hs = UNPLUG_ERR ;
+ break;
+ }
/* If driver is unloading let the worker thread continue */
if (phba->pport->load_flag & FC_UNLOADING) {
phba->work_hs = 0;
@@ -4474,6 +4480,7 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
{
phba->lpfc_hba_init_link = lpfc_hba_init_link;
phba->lpfc_hba_down_link = lpfc_hba_down_link;
+ phba->lpfc_selective_reset = lpfc_selective_reset;
switch (dev_grp) {
case LPFC_PCI_DEV_LP:
phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
@@ -5385,13 +5392,16 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
int i, port_error = 0;
uint32_t if_type;
+ memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
+ memset(&reg_data, 0, sizeof(reg_data));
if (!phba->sli4_hba.PSMPHRregaddr)
return -ENODEV;
/* Wait up to 30 seconds for the SLI Port POST done and ready */
for (i = 0; i < 3000; i++) {
- portsmphr_reg.word0 = readl(phba->sli4_hba.PSMPHRregaddr);
- if (bf_get(lpfc_port_smphr_perr, &portsmphr_reg)) {
+ if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
+ &portsmphr_reg.word0) ||
+ (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
/* Port has a fatal POST error, break out */
port_error = -ENODEV;
break;
@@ -5472,9 +5482,9 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
break;
case LPFC_SLI_INTF_IF_TYPE_2:
/* Final checks. The port status should be clean. */
- reg_data.word0 =
- readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
- if (bf_get(lpfc_sliport_status_err, &reg_data)) {
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &reg_data.word0) ||
+ bf_get(lpfc_sliport_status_err, &reg_data)) {
phba->work_status[0] =
readl(phba->sli4_hba.u.if_type2.
ERR1regaddr);
@@ -6760,9 +6770,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
* the loop again.
*/
for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
- reg_data.word0 =
- readl(phba->sli4_hba.u.if_type2.
- STATUSregaddr);
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.
+ STATUSregaddr, &reg_data.word0)) {
+ rc = -ENODEV;
+ break;
+ }
if (bf_get(lpfc_sliport_status_rdy, &reg_data))
break;
if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
@@ -6783,8 +6795,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
}
/* Detect any port errors. */
- reg_data.word0 = readl(phba->sli4_hba.u.if_type2.
- STATUSregaddr);
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &reg_data.word0)) {
+ rc = -ENODEV;
+ break;
+ }
if ((bf_get(lpfc_sliport_status_err, &reg_data)) ||
(rdy_chk >= 1000)) {
phba->work_status[0] = readl(
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index bf34178b80bf..2b962b020cfb 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2009 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -1514,10 +1514,11 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct scatterlist *sgpe = NULL; /* s/g prot entry */
struct lpfc_pde5 *pde5 = NULL;
struct lpfc_pde6 *pde6 = NULL;
- struct ulp_bde64 *prot_bde = NULL;
+ struct lpfc_pde7 *pde7 = NULL;
dma_addr_t dataphysaddr, protphysaddr;
unsigned short curr_data = 0, curr_prot = 0;
- unsigned int split_offset, protgroup_len;
+ unsigned int split_offset;
+ unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
unsigned int protgrp_blks, protgrp_bytes;
unsigned int remainder, subtotal;
int status;
@@ -1585,23 +1586,33 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
bpl++;
/* setup the first BDE that points to protection buffer */
- prot_bde = (struct ulp_bde64 *) bpl;
- protphysaddr = sg_dma_address(sgpe);
- prot_bde->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr));
- prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr));
- protgroup_len = sg_dma_len(sgpe);
+ protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
+ protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
/* must be integer multiple of the DIF block length */
BUG_ON(protgroup_len % 8);
+ pde7 = (struct lpfc_pde7 *) bpl;
+ memset(pde7, 0, sizeof(struct lpfc_pde7));
+ bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
+
+ pde7->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr));
+ pde7->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr));
+
protgrp_blks = protgroup_len / 8;
protgrp_bytes = protgrp_blks * blksize;
- prot_bde->tus.f.bdeSize = protgroup_len;
- prot_bde->tus.f.bdeFlags = LPFC_PDE7_DESCRIPTOR;
- prot_bde->tus.w = le32_to_cpu(bpl->tus.w);
+ /* check if this pde is crossing the 4K boundary; if so split */
+ if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
+ protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
+ protgroup_offset += protgroup_remainder;
+ protgrp_blks = protgroup_remainder / 8;
+ protgrp_bytes = protgroup_remainder * blksize;
+ } else {
+ protgroup_offset = 0;
+ curr_prot++;
+ }
- curr_prot++;
num_bde++;
/* setup BDE's for data blocks associated with DIF data */
@@ -1653,6 +1664,13 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
}
+ if (protgroup_offset) {
+ /* update the reference tag */
+ reftag += protgrp_blks;
+ bpl++;
+ continue;
+ }
+
/* are we done ? */
if (curr_prot == protcnt) {
alldone = 1;
@@ -1675,6 +1693,7 @@ out:
return num_bde;
}
+
/*
* Given a SCSI command that supports DIF, determine composition of protection
* groups involved in setting up buffer lists
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 2ee0374a9908..4746dcd756dd 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2009 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -3477,7 +3477,8 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
int retval = 0;
/* Read the HBA Host Status Register */
- status = readl(phba->HSregaddr);
+ if (lpfc_readl(phba->HSregaddr, &status))
+ return 1;
/*
* Check status register every 100ms for 5 retries, then every
@@ -3502,7 +3503,10 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
lpfc_sli_brdrestart(phba);
}
/* Read the HBA Host Status Register */
- status = readl(phba->HSregaddr);
+ if (lpfc_readl(phba->HSregaddr, &status)) {
+ retval = 1;
+ break;
+ }
}
/* Check to see if any errors occurred during init */
@@ -3584,7 +3588,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
uint32_t __iomem *resp_buf;
uint32_t __iomem *mbox_buf;
volatile uint32_t mbox;
- uint32_t hc_copy;
+ uint32_t hc_copy, ha_copy, resp_data;
int i;
uint8_t hdrtype;
@@ -3601,12 +3605,15 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
resp_buf = phba->MBslimaddr;
/* Disable the error attention */
- hc_copy = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &hc_copy))
+ return;
writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
phba->link_flag |= LS_IGNORE_ERATT;
- if (readl(phba->HAregaddr) & HA_ERATT) {
+ if (lpfc_readl(phba->HAregaddr, &ha_copy))
+ return;
+ if (ha_copy & HA_ERATT) {
/* Clear Chip error bit */
writel(HA_ERATT, phba->HAregaddr);
phba->pport->stopped = 1;
@@ -3620,11 +3627,18 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
mbox_buf = phba->MBslimaddr;
writel(mbox, mbox_buf);
- for (i = 0;
- readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++)
- mdelay(1);
-
- if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
+ for (i = 0; i < 50; i++) {
+ if (lpfc_readl((resp_buf + 1), &resp_data))
+ return;
+ if (resp_data != ~(BARRIER_TEST_PATTERN))
+ mdelay(1);
+ else
+ break;
+ }
+ resp_data = 0;
+ if (lpfc_readl((resp_buf + 1), &resp_data))
+ return;
+ if (resp_data != ~(BARRIER_TEST_PATTERN)) {
if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
phba->pport->stopped)
goto restore_hc;
@@ -3633,13 +3647,26 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
}
((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
- for (i = 0; readl(resp_buf) != mbox && i < 500; i++)
- mdelay(1);
+ resp_data = 0;
+ for (i = 0; i < 500; i++) {
+ if (lpfc_readl(resp_buf, &resp_data))
+ return;
+ if (resp_data != mbox)
+ mdelay(1);
+ else
+ break;
+ }
clear_errat:
- while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500)
- mdelay(1);
+ while (++i < 500) {
+ if (lpfc_readl(phba->HAregaddr, &ha_copy))
+ return;
+ if (!(ha_copy & HA_ERATT))
+ mdelay(1);
+ else
+ break;
+ }
if (readl(phba->HAregaddr) & HA_ERATT) {
writel(HA_ERATT, phba->HAregaddr);
@@ -3686,7 +3713,11 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
/* Disable the error attention */
spin_lock_irq(&phba->hbalock);
- status = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &status)) {
+ spin_unlock_irq(&phba->hbalock);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return 1;
+ }
status &= ~HC_ERINT_ENA;
writel(status, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
@@ -3720,11 +3751,12 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
* 3 seconds we still set HBA_ERROR state because the status of the
* board is now undefined.
*/
- ha_copy = readl(phba->HAregaddr);
-
+ if (lpfc_readl(phba->HAregaddr, &ha_copy))
+ return 1;
while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
mdelay(100);
- ha_copy = readl(phba->HAregaddr);
+ if (lpfc_readl(phba->HAregaddr, &ha_copy))
+ return 1;
}
del_timer_sync(&psli->mbox_tmo);
@@ -4018,7 +4050,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
uint32_t status, i = 0;
/* Read the HBA Host Status Register */
- status = readl(phba->HSregaddr);
+ if (lpfc_readl(phba->HSregaddr, &status))
+ return -EIO;
/* Check status register to see what current state is */
i = 0;
@@ -4073,7 +4106,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
lpfc_sli_brdrestart(phba);
}
/* Read the HBA Host Status Register */
- status = readl(phba->HSregaddr);
+ if (lpfc_readl(phba->HSregaddr, &status))
+ return -EIO;
}
/* Check to see if any errors occurred during init */
@@ -5136,7 +5170,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
MAILBOX_t *mb;
struct lpfc_sli *psli = &phba->sli;
uint32_t status, evtctr;
- uint32_t ha_copy;
+ uint32_t ha_copy, hc_copy;
int i;
unsigned long timeout;
unsigned long drvr_flag = 0;
@@ -5202,15 +5236,17 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
goto out_not_finished;
}
- if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
- !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
- spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
+ if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
+ !(hc_copy & HC_MBINT_ENA)) {
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):2528 Mailbox command x%x cannot "
"issue Data: x%x x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0,
pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
- goto out_not_finished;
+ goto out_not_finished;
+ }
}
if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
@@ -5408,11 +5444,19 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
word0 = le32_to_cpu(word0);
} else {
/* First read mbox status word */
- word0 = readl(phba->MBslimaddr);
+ if (lpfc_readl(phba->MBslimaddr, &word0)) {
+ spin_unlock_irqrestore(&phba->hbalock,
+ drvr_flag);
+ goto out_not_finished;
+ }
}
/* Read the HBA Host Attention Register */
- ha_copy = readl(phba->HAregaddr);
+ if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
+ spin_unlock_irqrestore(&phba->hbalock,
+ drvr_flag);
+ goto out_not_finished;
+ }
timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
mb->mbxCommand) *
1000) + jiffies;
@@ -5463,7 +5507,11 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
word0 = readl(phba->MBslimaddr);
}
/* Read the HBA Host Attention Register */
- ha_copy = readl(phba->HAregaddr);
+ if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
+ spin_unlock_irqrestore(&phba->hbalock,
+ drvr_flag);
+ goto out_not_finished;
+ }
}
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
@@ -6263,7 +6311,6 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
bf_set(lpfc_sli4_sge_last, sgl, 1);
else
bf_set(lpfc_sli4_sge_last, sgl, 0);
- sgl->word2 = cpu_to_le32(sgl->word2);
/* swap the size field back to the cpu so we
* can assign it to the sgl.
*/
@@ -6283,6 +6330,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
bf_set(lpfc_sli4_sge_offset, sgl, offset);
offset += bde.tus.f.bdeSize;
}
+ sgl->word2 = cpu_to_le32(sgl->word2);
bpl++;
sgl++;
}
@@ -6528,9 +6576,9 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
sizeof(struct ulp_bde64);
for (i = 0; i < numBdes; i++) {
- if (bpl[i].tus.f.bdeFlags != BUFF_TYPE_BDE_64)
- break;
bde.tus.w = le32_to_cpu(bpl[i].tus.w);
+ if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
+ break;
xmit_len += bde.tus.f.bdeSize;
}
/* word3 iocb=IO_TAG wqe=request_payload_len */
@@ -6620,15 +6668,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
xritag = 0;
break;
case CMD_XMIT_BLS_RSP64_CX:
- /* As BLS ABTS-ACC WQE is very different from other WQEs,
+ /* As BLS ABTS RSP WQE is very different from other WQEs,
* we re-construct this WQE here based on information in
* iocbq from scratch.
*/
memset(wqe, 0, sizeof(union lpfc_wqe));
/* OX_ID is invariable to who sent ABTS to CT exchange */
bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
- bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_acc));
- if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_acc) ==
+ bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
+ if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
LPFC_ABTS_UNSOL_INT) {
/* ABTS sent by initiator to CT exchange, the
* RX_ID field will be filled with the newly
@@ -6642,7 +6690,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
* RX_ID from ABTS.
*/
bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
- bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_acc));
+ bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
}
bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
@@ -6653,6 +6701,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
LPFC_WQE_LENLOC_NONE);
/* Overwrite the pre-set comnd type with OTHER_COMMAND */
command_type = OTHER_COMMAND;
+ if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
+ bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
+ bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
+ bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
+ bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
+ bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
+ bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
+ }
+
break;
case CMD_XRI_ABORTED_CX:
case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
@@ -6701,7 +6758,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
if (piocb->sli4_xritag == NO_XRI) {
if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
- piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+ piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
+ piocb->iocb.ulpCommand == CMD_XMIT_BLS_RSP64_CX)
sglq = NULL;
else {
if (pring->txq_cnt) {
@@ -8194,7 +8252,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
piocb->iocb_flag &= ~LPFC_IO_WAKE;
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
- creg_val = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &creg_val))
+ return IOCB_ERROR;
creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
@@ -8236,7 +8295,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
}
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
- creg_val = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &creg_val))
+ return IOCB_ERROR;
creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
@@ -8387,10 +8447,13 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba)
uint32_t ha_copy;
/* Read chip Host Attention (HA) register */
- ha_copy = readl(phba->HAregaddr);
+ if (lpfc_readl(phba->HAregaddr, &ha_copy))
+ goto unplug_err;
+
if (ha_copy & HA_ERATT) {
/* Read host status register to retrieve error event */
- lpfc_sli_read_hs(phba);
+ if (lpfc_sli_read_hs(phba))
+ goto unplug_err;
/* Check if there is a deferred error condition is active */
if ((HS_FFER1 & phba->work_hs) &&
@@ -8409,6 +8472,15 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba)
return 1;
}
return 0;
+
+unplug_err:
+ /* Set the driver HS work bitmap */
+ phba->work_hs |= UNPLUG_ERR;
+ /* Set the driver HA work bitmap */
+ phba->work_ha |= HA_ERATT;
+ /* Indicate polling handles this ERATT */
+ phba->hba_flag |= HBA_ERATT_HANDLED;
+ return 1;
}
/**
@@ -8436,8 +8508,15 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
switch (if_type) {
case LPFC_SLI_INTF_IF_TYPE_0:
- uerr_sta_lo = readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
- uerr_sta_hi = readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
+ if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
+ &uerr_sta_lo) ||
+ lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
+ &uerr_sta_hi)) {
+ phba->work_hs |= UNPLUG_ERR;
+ phba->work_ha |= HA_ERATT;
+ phba->hba_flag |= HBA_ERATT_HANDLED;
+ return 1;
+ }
if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
(~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8456,9 +8535,15 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
}
break;
case LPFC_SLI_INTF_IF_TYPE_2:
- portstat_reg.word0 =
- readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
- portsmphr = readl(phba->sli4_hba.PSMPHRregaddr);
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &portstat_reg.word0) ||
+ lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
+ &portsmphr)){
+ phba->work_hs |= UNPLUG_ERR;
+ phba->work_ha |= HA_ERATT;
+ phba->hba_flag |= HBA_ERATT_HANDLED;
+ return 1;
+ }
if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
phba->work_status[0] =
readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
@@ -8639,7 +8724,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
return IRQ_NONE;
/* Need to read HA REG for slow-path events */
spin_lock_irqsave(&phba->hbalock, iflag);
- ha_copy = readl(phba->HAregaddr);
+ if (lpfc_readl(phba->HAregaddr, &ha_copy))
+ goto unplug_error;
/* If somebody is waiting to handle an eratt don't process it
* here. The brdkill function will do this.
*/
@@ -8665,7 +8751,9 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
}
/* Clear up only attention source related to slow-path */
- hc_copy = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &hc_copy))
+ goto unplug_error;
+
writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
HC_LAINT_ENA | HC_ERINT_ENA),
phba->HCregaddr);
@@ -8688,7 +8776,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
*/
spin_lock_irqsave(&phba->hbalock, iflag);
phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
- control = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &control))
+ goto unplug_error;
control &= ~HC_LAINT_ENA;
writel(control, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
@@ -8708,7 +8797,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
status >>= (4*LPFC_ELS_RING);
if (status & HA_RXMASK) {
spin_lock_irqsave(&phba->hbalock, iflag);
- control = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &control))
+ goto unplug_error;
lpfc_debugfs_slow_ring_trc(phba,
"ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
@@ -8741,7 +8831,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
}
spin_lock_irqsave(&phba->hbalock, iflag);
if (work_ha_copy & HA_ERATT) {
- lpfc_sli_read_hs(phba);
+ if (lpfc_sli_read_hs(phba))
+ goto unplug_error;
/*
* Check if there is a deferred error condition
* is active
@@ -8872,6 +8963,9 @@ send_current_mbox:
lpfc_worker_wake_up(phba);
}
return IRQ_HANDLED;
+unplug_error:
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return IRQ_HANDLED;
} /* lpfc_sli_sp_intr_handler */
@@ -8919,7 +9013,8 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id)
if (lpfc_intr_state_check(phba))
return IRQ_NONE;
/* Need to read HA REG for FCP ring and other ring events */
- ha_copy = readl(phba->HAregaddr);
+ if (lpfc_readl(phba->HAregaddr, &ha_copy))
+ return IRQ_HANDLED;
/* Clear up only attention source related to fast-path */
spin_lock_irqsave(&phba->hbalock, iflag);
/*
@@ -9004,7 +9099,11 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
return IRQ_NONE;
spin_lock(&phba->hbalock);
- phba->ha_copy = readl(phba->HAregaddr);
+ if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
+ spin_unlock(&phba->hbalock);
+ return IRQ_HANDLED;
+ }
+
if (unlikely(!phba->ha_copy)) {
spin_unlock(&phba->hbalock);
return IRQ_NONE;
@@ -9026,7 +9125,10 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
}
/* Clear attention sources except link and error attentions */
- hc_copy = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
+ spin_unlock(&phba->hbalock);
+ return IRQ_HANDLED;
+ }
writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
| HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
phba->HCregaddr);
@@ -10403,7 +10505,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = SLI4_PAGE_SIZE;
-
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
@@ -10413,11 +10514,22 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
LPFC_MBOX_OPCODE_CQ_CREATE,
length, LPFC_SLI4_MBX_EMBED);
cq_create = &mbox->u.mqe.un.cq_create;
+ shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
cq->page_count);
bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
- bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id);
+ bf_set(lpfc_mbox_hdr_version, &shdr->request,
+ phba->sli4_hba.pc_sli4_params.cqv);
+ if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
+ bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
+ (PAGE_SIZE/SLI4_PAGE_SIZE));
+ bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
+ eq->queue_id);
+ } else {
+ bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
+ eq->queue_id);
+ }
switch (cq->entry_count) {
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -10449,7 +10561,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
- shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
@@ -10515,20 +10626,20 @@ lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
switch (mq->entry_count) {
case 16:
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
- LPFC_MQ_CNT_16);
+ bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+ LPFC_MQ_RING_SIZE_16);
break;
case 32:
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
- LPFC_MQ_CNT_32);
+ bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+ LPFC_MQ_RING_SIZE_32);
break;
case 64:
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
- LPFC_MQ_CNT_64);
+ bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+ LPFC_MQ_RING_SIZE_64);
break;
case 128:
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
- LPFC_MQ_CNT_128);
+ bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+ LPFC_MQ_RING_SIZE_128);
break;
}
list_for_each_entry(dmabuf, &mq->page_list, list) {
@@ -10586,6 +10697,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
length, LPFC_SLI4_MBX_EMBED);
mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
+ shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
bf_set(lpfc_mbx_mq_create_ext_num_pages,
&mq_create_ext->u.request, mq->page_count);
bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
@@ -10598,9 +10710,15 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
&mq_create_ext->u.request, 1);
bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
&mq_create_ext->u.request, 1);
- bf_set(lpfc_mq_context_cq_id,
- &mq_create_ext->u.request.context, cq->queue_id);
bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
+ bf_set(lpfc_mbox_hdr_version, &shdr->request,
+ phba->sli4_hba.pc_sli4_params.mqv);
+ if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
+ bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
+ cq->queue_id);
+ else
+ bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
+ cq->queue_id);
switch (mq->entry_count) {
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -10610,20 +10728,24 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
return -EINVAL;
/* otherwise default to smallest count (drop through) */
case 16:
- bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
- LPFC_MQ_CNT_16);
+ bf_set(lpfc_mq_context_ring_size,
+ &mq_create_ext->u.request.context,
+ LPFC_MQ_RING_SIZE_16);
break;
case 32:
- bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
- LPFC_MQ_CNT_32);
+ bf_set(lpfc_mq_context_ring_size,
+ &mq_create_ext->u.request.context,
+ LPFC_MQ_RING_SIZE_32);
break;
case 64:
- bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
- LPFC_MQ_CNT_64);
+ bf_set(lpfc_mq_context_ring_size,
+ &mq_create_ext->u.request.context,
+ LPFC_MQ_RING_SIZE_64);
break;
case 128:
- bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
- LPFC_MQ_CNT_128);
+ bf_set(lpfc_mq_context_ring_size,
+ &mq_create_ext->u.request.context,
+ LPFC_MQ_RING_SIZE_128);
break;
}
list_for_each_entry(dmabuf, &mq->page_list, list) {
@@ -10634,7 +10756,6 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
putPaddrHigh(dmabuf->phys);
}
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
- shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
&mq_create_ext->u.response);
if (rc != MBX_SUCCESS) {
@@ -10711,6 +10832,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ struct dma_address *page;
if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = SLI4_PAGE_SIZE;
@@ -10724,20 +10846,42 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
length, LPFC_SLI4_MBX_EMBED);
wq_create = &mbox->u.mqe.un.wq_create;
+ shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
wq->page_count);
bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
cq->queue_id);
+ bf_set(lpfc_mbox_hdr_version, &shdr->request,
+ phba->sli4_hba.pc_sli4_params.wqv);
+ if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
+ bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
+ wq->entry_count);
+ switch (wq->entry_size) {
+ default:
+ case 64:
+ bf_set(lpfc_mbx_wq_create_wqe_size,
+ &wq_create->u.request_1,
+ LPFC_WQ_WQE_SIZE_64);
+ break;
+ case 128:
+ bf_set(lpfc_mbx_wq_create_wqe_size,
+ &wq_create->u.request_1,
+ LPFC_WQ_WQE_SIZE_128);
+ break;
+ }
+ bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
+ (PAGE_SIZE/SLI4_PAGE_SIZE));
+ page = wq_create->u.request_1.page;
+ } else {
+ page = wq_create->u.request.page;
+ }
list_for_each_entry(dmabuf, &wq->page_list, list) {
memset(dmabuf->virt, 0, hw_page_size);
- wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
- putPaddrLow(dmabuf->phys);
- wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
- putPaddrHigh(dmabuf->phys);
+ page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
+ page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
}
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
- shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
@@ -10815,37 +10959,51 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
length, LPFC_SLI4_MBX_EMBED);
rq_create = &mbox->u.mqe.un.rq_create;
- switch (hrq->entry_count) {
- default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2535 Unsupported RQ count. (%d)\n",
- hrq->entry_count);
- if (hrq->entry_count < 512)
- return -EINVAL;
- /* otherwise default to smallest count (drop through) */
- case 512:
- bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
- LPFC_RQ_RING_SIZE_512);
- break;
- case 1024:
- bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
- LPFC_RQ_RING_SIZE_1024);
- break;
- case 2048:
- bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
- LPFC_RQ_RING_SIZE_2048);
- break;
- case 4096:
- bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
- LPFC_RQ_RING_SIZE_4096);
- break;
+ shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
+ bf_set(lpfc_mbox_hdr_version, &shdr->request,
+ phba->sli4_hba.pc_sli4_params.rqv);
+ if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
+ bf_set(lpfc_rq_context_rqe_count_1,
+ &rq_create->u.request.context,
+ hrq->entry_count);
+ rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
+ } else {
+ switch (hrq->entry_count) {
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2535 Unsupported RQ count. (%d)\n",
+ hrq->entry_count);
+ if (hrq->entry_count < 512)
+ return -EINVAL;
+ /* otherwise default to smallest count (drop through) */
+ case 512:
+ bf_set(lpfc_rq_context_rqe_count,
+ &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_512);
+ break;
+ case 1024:
+ bf_set(lpfc_rq_context_rqe_count,
+ &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_1024);
+ break;
+ case 2048:
+ bf_set(lpfc_rq_context_rqe_count,
+ &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_2048);
+ break;
+ case 4096:
+ bf_set(lpfc_rq_context_rqe_count,
+ &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_4096);
+ break;
+ }
+ bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
+ LPFC_HDR_BUF_SIZE);
}
bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
cq->queue_id);
bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
hrq->page_count);
- bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
- LPFC_HDR_BUF_SIZE);
list_for_each_entry(dmabuf, &hrq->page_list, list) {
memset(dmabuf->virt, 0, hw_page_size);
rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
@@ -10855,7 +11013,6 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
}
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
- shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
@@ -10881,37 +11038,50 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
length, LPFC_SLI4_MBX_EMBED);
- switch (drq->entry_count) {
- default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2536 Unsupported RQ count. (%d)\n",
- drq->entry_count);
- if (drq->entry_count < 512)
- return -EINVAL;
- /* otherwise default to smallest count (drop through) */
- case 512:
- bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
- LPFC_RQ_RING_SIZE_512);
- break;
- case 1024:
- bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
- LPFC_RQ_RING_SIZE_1024);
- break;
- case 2048:
- bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
- LPFC_RQ_RING_SIZE_2048);
- break;
- case 4096:
- bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
- LPFC_RQ_RING_SIZE_4096);
- break;
+ bf_set(lpfc_mbox_hdr_version, &shdr->request,
+ phba->sli4_hba.pc_sli4_params.rqv);
+ if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
+ bf_set(lpfc_rq_context_rqe_count_1,
+ &rq_create->u.request.context,
+ hrq->entry_count);
+ rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
+ } else {
+ switch (drq->entry_count) {
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2536 Unsupported RQ count. (%d)\n",
+ drq->entry_count);
+ if (drq->entry_count < 512)
+ return -EINVAL;
+ /* otherwise default to smallest count (drop through) */
+ case 512:
+ bf_set(lpfc_rq_context_rqe_count,
+ &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_512);
+ break;
+ case 1024:
+ bf_set(lpfc_rq_context_rqe_count,
+ &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_1024);
+ break;
+ case 2048:
+ bf_set(lpfc_rq_context_rqe_count,
+ &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_2048);
+ break;
+ case 4096:
+ bf_set(lpfc_rq_context_rqe_count,
+ &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_4096);
+ break;
+ }
+ bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
+ LPFC_DATA_BUF_SIZE);
}
bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
cq->queue_id);
bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
drq->page_count);
- bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
- LPFC_DATA_BUF_SIZE);
list_for_each_entry(dmabuf, &drq->page_list, list) {
rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
@@ -11580,6 +11750,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
static char *rctl_names[] = FC_RCTL_NAMES_INIT;
char *type_names[] = FC_TYPE_NAMES_INIT;
struct fc_vft_header *fc_vft_hdr;
+ uint32_t *header = (uint32_t *) fc_hdr;
switch (fc_hdr->fh_r_ctl) {
case FC_RCTL_DD_UNCAT: /* uncategorized information */
@@ -11628,10 +11799,15 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
default:
goto drop;
}
+
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "2538 Received frame rctl:%s type:%s\n",
+ "2538 Received frame rctl:%s type:%s "
+ "Frame Data:%08x %08x %08x %08x %08x %08x\n",
rctl_names[fc_hdr->fh_r_ctl],
- type_names[fc_hdr->fh_type]);
+ type_names[fc_hdr->fh_type],
+ be32_to_cpu(header[0]), be32_to_cpu(header[1]),
+ be32_to_cpu(header[2]), be32_to_cpu(header[3]),
+ be32_to_cpu(header[4]), be32_to_cpu(header[5]));
return 0;
drop:
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
@@ -11928,17 +12104,17 @@ lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
}
/**
- * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler
+ * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
* @phba: Pointer to HBA context object.
* @cmd_iocbq: pointer to the command iocbq structure.
* @rsp_iocbq: pointer to the response iocbq structure.
*
- * This function handles the sequence abort accept iocb command complete
+ * This function handles the sequence abort response iocb command complete
* event. It properly releases the memory allocated to the sequence abort
* accept iocb.
**/
static void
-lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba,
+lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmd_iocbq,
struct lpfc_iocbq *rsp_iocbq)
{
@@ -11947,15 +12123,15 @@ lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba,
}
/**
- * lpfc_sli4_seq_abort_acc - Accept sequence abort
+ * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
* @phba: Pointer to HBA context object.
* @fc_hdr: pointer to a FC frame header.
*
- * This function sends a basic accept to a previous unsol sequence abort
+ * This function sends a basic response to a previous unsol sequence abort
* event after aborting the sequence handling.
**/
static void
-lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
+lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
struct fc_frame_header *fc_hdr)
{
struct lpfc_iocbq *ctiocb = NULL;
@@ -11963,6 +12139,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
uint16_t oxid, rxid;
uint32_t sid, fctl;
IOCB_t *icmd;
+ int rc;
if (!lpfc_is_link_up(phba))
return;
@@ -11983,7 +12160,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
+ phba->sli4_hba.max_cfg_param.xri_base))
lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
- /* Allocate buffer for acc iocb */
+ /* Allocate buffer for rsp iocb */
ctiocb = lpfc_sli_get_iocbq(phba);
if (!ctiocb)
return;
@@ -12008,32 +12185,54 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
ctiocb->iocb_cmpl = NULL;
ctiocb->vport = phba->pport;
- ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl;
+ ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
+ ctiocb->sli4_xritag = NO_XRI;
+
+ /* If the oxid maps to the FCP XRI range or if it is out of range,
+ * send a BLS_RJT. The driver no longer has that exchange.
+ * Override the IOCB for a BA_RJT.
+ */
+ if (oxid > (phba->sli4_hba.max_cfg_param.max_xri +
+ phba->sli4_hba.max_cfg_param.xri_base) ||
+ oxid > (lpfc_sli4_get_els_iocb_cnt(phba) +
+ phba->sli4_hba.max_cfg_param.xri_base)) {
+ icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
+ bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
+ bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
+ bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
+ }
if (fctl & FC_FC_EX_CTX) {
/* ABTS sent by responder to CT exchange, construction
* of BA_ACC will use OX_ID from ABTS for the XRI_TAG
* field and RX_ID from ABTS for RX_ID field.
*/
- bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_RSP);
- bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, rxid);
- ctiocb->sli4_xritag = oxid;
+ bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
+ bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
} else {
/* ABTS sent by initiator to CT exchange, construction
* of BA_ACC will need to allocate a new XRI as for the
* XRI_TAG and RX_ID fields.
*/
- bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_INT);
- bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, NO_XRI);
- ctiocb->sli4_xritag = NO_XRI;
+ bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
+ bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, NO_XRI);
}
- bf_set(lpfc_abts_oxid, &icmd->un.bls_acc, oxid);
+ bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
- /* Xmit CT abts accept on exchange <xid> */
+ /* Xmit CT abts response on exchange <xid> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n",
- CMD_XMIT_BLS_RSP64_CX, phba->link_state);
- lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
+ "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
+ icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
+ if (rc == IOCB_ERROR) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "2925 Failed to issue CT ABTS RSP x%x on "
+ "xri x%x, Data x%x\n",
+ icmd->un.xseq64.w5.hcsw.Rctl, oxid,
+ phba->link_state);
+ lpfc_sli_release_iocbq(phba, ctiocb);
+ }
}
/**
@@ -12081,7 +12280,7 @@ lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
lpfc_in_buf_free(phba, &dmabuf->dbuf);
}
/* Send basic accept (BA_ACC) to the abort requester */
- lpfc_sli4_seq_abort_acc(phba, &fc_hdr);
+ lpfc_sli4_seq_abort_rsp(phba, &fc_hdr);
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 595056b89608..1a3cbf88f2ce 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2009 Emulex. All rights reserved. *
+ * Copyright (C) 2009-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 0a4d376dbca5..2404d1d65563 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.21"
+#define LPFC_DRIVER_VERSION "8.3.22"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index e8a6f1cf1e4b..5e001ffd4c13 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -1748,6 +1748,54 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
}
/**
+ * _base_display_hp_branding - Display branding string
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_display_hp_branding(struct MPT2SAS_ADAPTER *ioc)
+{
+ if (ioc->pdev->subsystem_vendor != MPT2SAS_HP_3PAR_SSVID)
+ return;
+
+ switch (ioc->pdev->device) {
+ case MPI2_MFGPAGE_DEVID_SAS2004:
+ switch (ioc->pdev->subsystem_device) {
+ case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
+ break;
+ default:
+ break;
+ }
+ case MPI2_MFGPAGE_DEVID_SAS2308_2:
+ switch (ioc->pdev->subsystem_device) {
+ case MPT2SAS_HP_2_4_INTERNAL_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_HP_2_4_INTERNAL_BRANDING);
+ break;
+ case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
+ break;
+ case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
+ break;
+ case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
+ break;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+/**
* _base_display_ioc_capabilities - Disply IOC's capabilities.
* @ioc: per adapter object
*
@@ -1778,6 +1826,7 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
_base_display_dell_branding(ioc);
_base_display_intel_branding(ioc);
+ _base_display_hp_branding(ioc);
printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index a3f8aa9baea4..500328245f61 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -168,6 +168,26 @@
#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E
#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F
+
+/*
+ * HP HBA branding
+ */
+#define MPT2SAS_HP_3PAR_SSVID 0x1590
+#define MPT2SAS_HP_2_4_INTERNAL_BRANDING "HP H220 Host Bus Adapter"
+#define MPT2SAS_HP_2_4_EXTERNAL_BRANDING "HP H221 Host Bus Adapter"
+#define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING "HP H222 Host Bus Adapter"
+#define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING "HP H220i Host Bus Adapter"
+#define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING "HP H210i Host Bus Adapter"
+
+/*
+ * HO HBA SSDIDs
+ */
+#define MPT2SAS_HP_2_4_INTERNAL_SSDID 0x0041
+#define MPT2SAS_HP_2_4_EXTERNAL_SSDID 0x0042
+#define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID 0x0043
+#define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID 0x0044
+#define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID 0x0046
+
/*
* per target private data
*/
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 19ad34f381a5..938d045e4180 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -663,6 +663,13 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = {
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
{ PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
+ { PCI_VDEVICE(TTI, 0x2710), chip_9480 },
+ { PCI_VDEVICE(TTI, 0x2720), chip_9480 },
+ { PCI_VDEVICE(TTI, 0x2721), chip_9480 },
+ { PCI_VDEVICE(TTI, 0x2722), chip_9480 },
+ { PCI_VDEVICE(TTI, 0x2740), chip_9480 },
+ { PCI_VDEVICE(TTI, 0x2744), chip_9480 },
+ { PCI_VDEVICE(TTI, 0x2760), chip_9480 },
{ } /* terminate list */
};
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 2fc0045b1a52..c1f8d1b150f7 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -53,6 +53,9 @@
#define PCI_DEVICE_ID_QLOGIC_ISP8022 0x8022
#endif
+#define ISP4XXX_PCI_FN_1 0x1
+#define ISP4XXX_PCI_FN_2 0x3
+
#define QLA_SUCCESS 0
#define QLA_ERROR 1
@@ -233,9 +236,6 @@ struct ddb_entry {
unsigned long flags; /* DDB Flags */
- unsigned long dev_scan_wait_to_start_relogin;
- unsigned long dev_scan_wait_to_complete_relogin;
-
uint16_t fw_ddb_index; /* DDB firmware index */
uint16_t options;
uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */
@@ -289,8 +289,6 @@ struct ddb_entry {
* DDB flags.
*/
#define DF_RELOGIN 0 /* Relogin to device */
-#define DF_NO_RELOGIN 1 /* Do not relogin if IOCTL
- * logged it out */
#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */
#define DF_FO_MASKED 3
@@ -376,7 +374,7 @@ struct scsi_qla_host {
#define AF_LINK_UP 8 /* 0x00000100 */
#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
#define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */
-#define AF_HBA_GOING_AWAY 12 /* 0x00001000 */
+#define AF_HA_REMOVAL 12 /* 0x00001000 */
#define AF_INTx_ENABLED 15 /* 0x00008000 */
#define AF_MSI_ENABLED 16 /* 0x00010000 */
#define AF_MSIX_ENABLED 17 /* 0x00020000 */
@@ -479,7 +477,6 @@ struct scsi_qla_host {
uint32_t timer_active;
/* Recovery Timers */
- uint32_t discovery_wait;
atomic_t check_relogin_timeouts;
uint32_t retry_reset_ha_cnt;
uint32_t isp_reset_timer; /* reset test timer */
@@ -765,6 +762,5 @@ static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a)
/* Defines for process_aen() */
#define PROCESS_ALL_AENS 0
#define FLUSH_DDB_CHANGED_AENS 1
-#define RELOGIN_DDB_CHANGED_AENS 2
#endif /*_QLA4XXX_H */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index c1985792f034..31e2bf97198c 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -455,6 +455,7 @@ struct addr_ctrl_blk {
uint8_t res0; /* 07 */
uint16_t eth_mtu_size; /* 08-09 */
uint16_t add_fw_options; /* 0A-0B */
+#define SERIALIZE_TASK_MGMT 0x0400
uint8_t hb_interval; /* 0C */
uint8_t inst_num; /* 0D */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 8fad99b7eef4..cc53e3fbd78c 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -136,7 +136,6 @@ void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha);
void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha);
extern int ql4xextended_error_logging;
-extern int ql4xdiscoverywait;
extern int ql4xdontresethba;
extern int ql4xenablemsix;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 1629c48c35ef..bbb2e903d38a 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -723,13 +723,38 @@ int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err)
return relogin;
}
+static void qla4xxx_flush_AENS(struct scsi_qla_host *ha)
+{
+ unsigned long wtime;
+
+ /* Flush the 0x8014 AEN from the firmware as a result of
+ * Auto connect. We are basically doing get_firmware_ddb()
+ * to determine whether we need to log back in or not.
+ * Trying to do a set ddb before we have processed 0x8014
+ * will result in another set_ddb() for the same ddb. In other
+ * words there will be stale entries in the aen_q.
+ */
+ wtime = jiffies + (2 * HZ);
+ do {
+ if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS)
+ if (ha->firmware_state & (BIT_2 | BIT_0))
+ return;
+
+ if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
+ qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
+
+ msleep(1000);
+ } while (!time_after_eq(jiffies, wtime));
+}
+
/**
- * qla4xxx_configure_ddbs - builds driver ddb list
+ * qla4xxx_build_ddb_list - builds driver ddb list
* @ha: Pointer to host adapter structure.
*
* This routine searches for all valid firmware ddb entries and builds
* an internal ddb list. Ddbs that are considered valid are those with
* a device state of SESSION_ACTIVE.
+ * A relogin (set_ddb) is issued for DDBs that are not online.
**/
static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
{
@@ -744,6 +769,8 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
uint32_t ipv6_device;
uint32_t new_tgt;
+ qla4xxx_flush_AENS(ha);
+
fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
&fw_ddb_entry_dma, GFP_KERNEL);
if (fw_ddb_entry == NULL) {
@@ -847,144 +874,6 @@ exit_build_ddb_list_no_free:
return status;
}
-struct qla4_relog_scan {
- int halt_wait;
- uint32_t conn_err;
- uint32_t fw_ddb_index;
- uint32_t next_fw_ddb_index;
- uint32_t fw_ddb_device_state;
-};
-
-static int qla4_test_rdy(struct scsi_qla_host *ha, struct qla4_relog_scan *rs)
-{
- struct ddb_entry *ddb_entry;
-
- if (qla4_is_relogin_allowed(ha, rs->conn_err)) {
- /* We either have a device that is in
- * the process of relogging in or a
- * device that is waiting to be
- * relogged in */
- rs->halt_wait = 0;
-
- ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha,
- rs->fw_ddb_index);
- if (ddb_entry == NULL)
- return QLA_ERROR;
-
- if (ddb_entry->dev_scan_wait_to_start_relogin != 0
- && time_after_eq(jiffies,
- ddb_entry->
- dev_scan_wait_to_start_relogin))
- {
- ddb_entry->dev_scan_wait_to_start_relogin = 0;
- qla4xxx_set_ddb_entry(ha, rs->fw_ddb_index, 0);
- }
- }
- return QLA_SUCCESS;
-}
-
-static int qla4_scan_for_relogin(struct scsi_qla_host *ha,
- struct qla4_relog_scan *rs)
-{
- int error;
-
- /* scan for relogins
- * ----------------- */
- for (rs->fw_ddb_index = 0; rs->fw_ddb_index < MAX_DDB_ENTRIES;
- rs->fw_ddb_index = rs->next_fw_ddb_index) {
- if (qla4xxx_get_fwddb_entry(ha, rs->fw_ddb_index, NULL, 0,
- NULL, &rs->next_fw_ddb_index,
- &rs->fw_ddb_device_state,
- &rs->conn_err, NULL, NULL)
- == QLA_ERROR)
- return QLA_ERROR;
-
- if (rs->fw_ddb_device_state == DDB_DS_LOGIN_IN_PROCESS)
- rs->halt_wait = 0;
-
- if (rs->fw_ddb_device_state == DDB_DS_SESSION_FAILED ||
- rs->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE) {
- error = qla4_test_rdy(ha, rs);
- if (error)
- return error;
- }
-
- /* We know we've reached the last device when
- * next_fw_ddb_index is 0 */
- if (rs->next_fw_ddb_index == 0)
- break;
- }
- return QLA_SUCCESS;
-}
-
-/**
- * qla4xxx_devices_ready - wait for target devices to be logged in
- * @ha: pointer to adapter structure
- *
- * This routine waits up to ql4xdiscoverywait seconds
- * F/W database during driver load time.
- **/
-static int qla4xxx_devices_ready(struct scsi_qla_host *ha)
-{
- int error;
- unsigned long discovery_wtime;
- struct qla4_relog_scan rs;
-
- discovery_wtime = jiffies + (ql4xdiscoverywait * HZ);
-
- DEBUG(printk("Waiting (%d) for devices ...\n", ql4xdiscoverywait));
- do {
- /* poll for AEN. */
- qla4xxx_get_firmware_state(ha);
- if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) {
- /* Set time-between-relogin timer */
- qla4xxx_process_aen(ha, RELOGIN_DDB_CHANGED_AENS);
- }
-
- /* if no relogins active or needed, halt discvery wait */
- rs.halt_wait = 1;
-
- error = qla4_scan_for_relogin(ha, &rs);
-
- if (rs.halt_wait) {
- DEBUG2(printk("scsi%ld: %s: Delay halted. Devices "
- "Ready.\n", ha->host_no, __func__));
- return QLA_SUCCESS;
- }
-
- msleep(2000);
- } while (!time_after_eq(jiffies, discovery_wtime));
-
- DEBUG3(qla4xxx_get_conn_event_log(ha));
-
- return QLA_SUCCESS;
-}
-
-static void qla4xxx_flush_AENS(struct scsi_qla_host *ha)
-{
- unsigned long wtime;
-
- /* Flush the 0x8014 AEN from the firmware as a result of
- * Auto connect. We are basically doing get_firmware_ddb()
- * to determine whether we need to log back in or not.
- * Trying to do a set ddb before we have processed 0x8014
- * will result in another set_ddb() for the same ddb. In other
- * words there will be stale entries in the aen_q.
- */
- wtime = jiffies + (2 * HZ);
- do {
- if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS)
- if (ha->firmware_state & (BIT_2 | BIT_0))
- return;
-
- if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
- qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
-
- msleep(1000);
- } while (!time_after_eq(jiffies, wtime));
-
-}
-
static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha)
{
uint16_t fw_ddb_index;
@@ -996,29 +885,12 @@ static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha)
for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES; fw_ddb_index++)
ha->fw_ddb_index_map[fw_ddb_index] =
- (struct ddb_entry *)INVALID_ENTRY;
+ (struct ddb_entry *)INVALID_ENTRY;
ha->tot_ddbs = 0;
- qla4xxx_flush_AENS(ha);
-
- /* Wait for an AEN */
- qla4xxx_devices_ready(ha);
-
- /*
- * First perform device discovery for active
- * fw ddb indexes and build
- * ddb list.
- */
- if ((status = qla4xxx_build_ddb_list(ha)) == QLA_ERROR)
- return status;
-
- /*
- * Targets can come online after the inital discovery, so processing
- * the aens here will catch them.
- */
- if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
- qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
+ /* Perform device discovery and build ddb list. */
+ status = qla4xxx_build_ddb_list(ha);
return status;
}
@@ -1537,7 +1409,6 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
uint32_t state, uint32_t conn_err)
{
struct ddb_entry * ddb_entry;
- uint32_t old_fw_ddb_device_state;
/* check for out of range index */
if (fw_ddb_index >= MAX_DDB_ENTRIES)
@@ -1553,27 +1424,18 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
}
/* Device already exists in our database. */
- old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
DEBUG2(printk("scsi%ld: %s DDB - old state= 0x%x, new state=0x%x for "
"index [%d]\n", ha->host_no, __func__,
ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
- if (old_fw_ddb_device_state == state &&
- state == DDB_DS_SESSION_ACTIVE) {
- if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
- atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
- iscsi_unblock_session(ddb_entry->sess);
- }
- return QLA_SUCCESS;
- }
ddb_entry->fw_ddb_device_state = state;
/* Device is back online. */
- if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
+ if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
+ (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
atomic_set(&ddb_entry->relogin_retry_count, 0);
atomic_set(&ddb_entry->relogin_timer, 0);
clear_bit(DF_RELOGIN, &ddb_entry->flags);
- clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
iscsi_unblock_session(ddb_entry->sess);
iscsi_session_event(ddb_entry->sess,
ISCSI_KEVENT_CREATE_SESSION);
@@ -1581,7 +1443,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
* Change the lun state to READY in case the lun TIMEOUT before
* the device came back.
*/
- } else {
+ } else if (ddb_entry->fw_ddb_device_state != DDB_DS_SESSION_ACTIVE) {
/* Device went away, mark device missing */
if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) {
DEBUG2(ql4_printk(KERN_INFO, ha, "%s mark missing "
@@ -1598,7 +1460,6 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
*/
if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_FAILED &&
!test_bit(DF_RELOGIN, &ddb_entry->flags) &&
- !test_bit(DF_NO_RELOGIN, &ddb_entry->flags) &&
qla4_is_relogin_allowed(ha, conn_err)) {
/*
* This triggers a relogin. After the relogin_timer
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 03e028e6e809..2f40ac761cd4 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -801,7 +801,7 @@ irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
&ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
- if (!test_bit(AF_HBA_GOING_AWAY, &ha->flags))
+ if (!test_bit(AF_HA_REMOVAL, &ha->flags))
set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
break;
@@ -1008,34 +1008,9 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
mbox_sts[0], mbox_sts[2],
mbox_sts[3]));
break;
- } else if (process_aen == RELOGIN_DDB_CHANGED_AENS) {
- /* for use during init time, we only want to
- * relogin non-active ddbs */
- struct ddb_entry *ddb_entry;
-
- ddb_entry =
- /* FIXME: name length? */
- qla4xxx_lookup_ddb_by_fw_index(ha,
- mbox_sts[2]);
- if (!ddb_entry)
- break;
-
- ddb_entry->dev_scan_wait_to_complete_relogin =
- 0;
- ddb_entry->dev_scan_wait_to_start_relogin =
- jiffies +
- ((ddb_entry->default_time2wait +
- 4) * HZ);
-
- DEBUG2(printk("scsi%ld: ddb [%d] initiate"
- " RELOGIN after %d seconds\n",
- ha->host_no,
- ddb_entry->fw_ddb_index,
- ddb_entry->default_time2wait +
- 4));
- break;
}
-
+ case PROCESS_ALL_AENS:
+ default:
if (mbox_sts[1] == 0) { /* Global DB change. */
qla4xxx_reinitialize_ddb_list(ha);
} else if (mbox_sts[1] == 1) { /* Specific device. */
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index f65626aec7c1..f9d81c8372c3 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -32,6 +32,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
u_long wait_count;
uint32_t intr_status;
unsigned long flags = 0;
+ uint32_t dev_state;
/* Make sure that pointers are valid */
if (!mbx_cmd || !mbx_sts) {
@@ -40,12 +41,23 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
return status;
}
- if (is_qla8022(ha) &&
- test_bit(AF_FW_RECOVERY, &ha->flags)) {
- DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: prematurely "
- "completing mbx cmd as firmware recovery detected\n",
- ha->host_no, __func__));
- return status;
+ if (is_qla8022(ha)) {
+ if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
+ "prematurely completing mbx cmd as firmware "
+ "recovery detected\n", ha->host_no, __func__));
+ return status;
+ }
+ /* Do not send any mbx cmd if h/w is in failed state*/
+ qla4_8xxx_idc_lock(ha);
+ dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ qla4_8xxx_idc_unlock(ha);
+ if (dev_state == QLA82XX_DEV_FAILED) {
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: H/W is in "
+ "failed state, do not send any mailbox commands\n",
+ ha->host_no, __func__);
+ return status;
+ }
}
if ((is_aer_supported(ha)) &&
@@ -139,7 +151,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
test_bit(AF_ONLINE, &ha->flags) &&
- !test_bit(AF_HBA_GOING_AWAY, &ha->flags)) {
+ !test_bit(AF_HA_REMOVAL, &ha->flags)) {
/* Do not poll for completion. Use completion queue */
set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ);
@@ -395,9 +407,6 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
/*memcpy(ha->alias, init_fw_cb->Alias,
min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
- /* Save Command Line Paramater info */
- ha->discovery_wait = ql4xdiscoverywait;
-
if (ha->acb_version == ACB_SUPPORTED) {
ha->ipv6_options = init_fw_cb->ipv6_opts;
ha->ipv6_addl_options = init_fw_cb->ipv6_addtl_opts;
@@ -467,6 +476,11 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
+ /* Set bit for "serialize task mgmt" all other bits need to be zero */
+ init_fw_cb->add_fw_options = 0;
+ init_fw_cb->add_fw_options |=
+ __constant_cpu_to_le16(SERIALIZE_TASK_MGMT);
+
if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)
!= QLA_SUCCESS) {
DEBUG2(printk(KERN_WARNING
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 3d5ef2df4134..35381cb0936e 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -2304,14 +2304,13 @@ qla4_8xxx_enable_intrs(struct scsi_qla_host *ha)
void
qla4_8xxx_disable_intrs(struct scsi_qla_host *ha)
{
- if (test_bit(AF_INTERRUPTS_ON, &ha->flags))
+ if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
qla4_8xxx_mbx_intr_disable(ha);
spin_lock_irq(&ha->hardware_lock);
/* BIT 10 - set */
qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
spin_unlock_irq(&ha->hardware_lock);
- clear_bit(AF_INTERRUPTS_ON, &ha->flags);
}
struct ql4_init_msix_entry {
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 967836ef5ab2..a4acb0dd7beb 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -29,10 +29,6 @@ static struct kmem_cache *srb_cachep;
/*
* Module parameter information and variables
*/
-int ql4xdiscoverywait = 60;
-module_param(ql4xdiscoverywait, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(ql4xdiscoverywait, "Discovery wait time");
-
int ql4xdontresethba = 0;
module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xdontresethba,
@@ -55,6 +51,17 @@ MODULE_PARM_DESC(ql4xenablemsix,
" 2 = enable MSI interrupt mechanism.");
#define QL4_DEF_QDEPTH 32
+static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
+module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xmaxqdepth,
+ "Maximum queue depth to report for target devices.\n"
+ " Default: 32.");
+
+static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
+module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
+MODULE_PARM_DESC(ql4xsess_recovery_tmo,
+ "Target Session Recovery Timeout.\n"
+ " Default: 30 sec.");
/*
* SCSI host template entry points
@@ -165,7 +172,7 @@ static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session)
DEBUG2(printk("scsi%ld: %s: ddb [%d] session recovery timeout "
"of (%d) secs exhausted, marking device DEAD.\n",
ha->host_no, __func__, ddb_entry->fw_ddb_index,
- QL4_SESS_RECOVERY_TMO));
+ ddb_entry->sess->recovery_tmo));
}
}
@@ -295,7 +302,7 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
{
int err;
- ddb_entry->sess->recovery_tmo = QL4_SESS_RECOVERY_TMO;
+ ddb_entry->sess->recovery_tmo = ql4xsess_recovery_tmo;
err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index);
if (err) {
@@ -753,12 +760,6 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
if (!pci_channel_offline(ha->pdev))
pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
- if (test_bit(AF_HBA_GOING_AWAY, &ha->flags)) {
- DEBUG2(ql4_printk(KERN_INFO, ha, "%s exited. HBA GOING AWAY\n",
- __func__));
- return;
- }
-
if (is_qla8022(ha)) {
qla4_8xxx_watchdog(ha);
}
@@ -1067,7 +1068,6 @@ void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
/* Disable the board */
ql4_printk(KERN_INFO, ha, "Disabling the board\n");
- set_bit(AF_HBA_GOING_AWAY, &ha->flags);
qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
qla4xxx_mark_all_devices_missing(ha);
@@ -1218,6 +1218,27 @@ recover_ha_init_adapter:
return status;
}
+static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
+{
+ struct ddb_entry *ddb_entry, *dtemp;
+
+ list_for_each_entry_safe(ddb_entry, dtemp, &ha->ddb_list, list) {
+ if ((atomic_read(&ddb_entry->state) == DDB_STATE_MISSING) ||
+ (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD)) {
+ if (ddb_entry->fw_ddb_device_state ==
+ DDB_DS_SESSION_ACTIVE) {
+ atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+ " marked ONLINE\n", ha->host_no, __func__,
+ ddb_entry->fw_ddb_index);
+
+ iscsi_unblock_session(ddb_entry->sess);
+ } else
+ qla4xxx_relogin_device(ha, ddb_entry);
+ }
+ }
+}
+
void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
{
if (ha->dpc_thread &&
@@ -1259,11 +1280,6 @@ static void qla4xxx_do_dpc(struct work_struct *work)
goto do_dpc_exit;
}
- /* HBA is in the process of being permanently disabled.
- * Don't process anything */
- if (test_bit(AF_HBA_GOING_AWAY, &ha->flags))
- return;
-
if (is_qla8022(ha)) {
if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
qla4_8xxx_idc_lock(ha);
@@ -1331,13 +1347,7 @@ dpc_post_reset_ha:
if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
if (!test_bit(AF_LINK_UP, &ha->flags)) {
/* ---- link down? --- */
- list_for_each_entry_safe(ddb_entry, dtemp,
- &ha->ddb_list, list) {
- if (atomic_read(&ddb_entry->state) ==
- DDB_STATE_ONLINE)
- qla4xxx_mark_device_missing(ha,
- ddb_entry);
- }
+ qla4xxx_mark_all_devices_missing(ha);
} else {
/* ---- link up? --- *
* F/W will auto login to all devices ONLY ONCE after
@@ -1346,30 +1356,7 @@ dpc_post_reset_ha:
* manually relogin to devices when recovering from
* connection failures, logouts, expired KATO, etc. */
- list_for_each_entry_safe(ddb_entry, dtemp,
- &ha->ddb_list, list) {
- if ((atomic_read(&ddb_entry->state) ==
- DDB_STATE_MISSING) ||
- (atomic_read(&ddb_entry->state) ==
- DDB_STATE_DEAD)) {
- if (ddb_entry->fw_ddb_device_state ==
- DDB_DS_SESSION_ACTIVE) {
- atomic_set(&ddb_entry->state,
- DDB_STATE_ONLINE);
- ql4_printk(KERN_INFO, ha,
- "scsi%ld: %s: ddb[%d]"
- " marked ONLINE\n",
- ha->host_no, __func__,
- ddb_entry->fw_ddb_index);
-
- iscsi_unblock_session(
- ddb_entry->sess);
- } else
- qla4xxx_relogin_device(
- ha, ddb_entry);
- }
-
- }
+ qla4xxx_relogin_all_devices(ha);
}
}
@@ -1630,6 +1617,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
uint8_t init_retry_count = 0;
char buf[34];
struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
+ uint32_t dev_state;
if (pci_enable_device(pdev))
return -1;
@@ -1713,6 +1701,18 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST);
while ((!test_bit(AF_ONLINE, &ha->flags)) &&
init_retry_count++ < MAX_INIT_RETRIES) {
+
+ if (is_qla8022(ha)) {
+ qla4_8xxx_idc_lock(ha);
+ dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ qla4_8xxx_idc_unlock(ha);
+ if (dev_state == QLA82XX_DEV_FAILED) {
+ ql4_printk(KERN_WARNING, ha, "%s: don't retry "
+ "initialize adapter. H/W is in failed state\n",
+ __func__);
+ break;
+ }
+ }
DEBUG2(printk("scsi: %s: retrying adapter initialization "
"(%d)\n", __func__, init_retry_count));
@@ -1815,6 +1815,44 @@ probe_disable_device:
}
/**
+ * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
+ * @ha: pointer to adapter structure
+ *
+ * Mark the other ISP-4xxx port to indicate that the driver is being removed,
+ * so that the other port will not re-initialize while in the process of
+ * removing the ha due to driver unload or hba hotplug.
+ **/
+static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
+{
+ struct scsi_qla_host *other_ha = NULL;
+ struct pci_dev *other_pdev = NULL;
+ int fn = ISP4XXX_PCI_FN_2;
+
+ /*iscsi function numbers for ISP4xxx is 1 and 3*/
+ if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
+ fn = ISP4XXX_PCI_FN_1;
+
+ other_pdev =
+ pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
+ ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
+ fn));
+
+ /* Get other_ha if other_pdev is valid and state is enable*/
+ if (other_pdev) {
+ if (atomic_read(&other_pdev->enable_cnt)) {
+ other_ha = pci_get_drvdata(other_pdev);
+ if (other_ha) {
+ set_bit(AF_HA_REMOVAL, &other_ha->flags);
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
+ "Prevent %s reinit\n", __func__,
+ dev_name(&other_ha->pdev->dev)));
+ }
+ }
+ pci_dev_put(other_pdev);
+ }
+}
+
+/**
* qla4xxx_remove_adapter - calback function to remove adapter.
* @pci_dev: PCI device pointer
**/
@@ -1824,7 +1862,8 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
ha = pci_get_drvdata(pdev);
- set_bit(AF_HBA_GOING_AWAY, &ha->flags);
+ if (!is_qla8022(ha))
+ qla4xxx_prevent_other_port_reinit(ha);
/* remove devs from iscsi_sessions to scsi_devices */
qla4xxx_free_ddb_list(ha);
@@ -1868,10 +1907,15 @@ static int qla4xxx_slave_alloc(struct scsi_device *sdev)
{
struct iscsi_cls_session *sess = starget_to_session(sdev->sdev_target);
struct ddb_entry *ddb = sess->dd_data;
+ int queue_depth = QL4_DEF_QDEPTH;
sdev->hostdata = ddb;
sdev->tagged_supported = 1;
- scsi_activate_tcq(sdev, QL4_DEF_QDEPTH);
+
+ if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
+ queue_depth = ql4xmaxqdepth;
+
+ scsi_activate_tcq(sdev, queue_depth);
return 0;
}
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 8475b308e01b..603155769407 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k5"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k6"
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index b4218390941e..3fd16d7212de 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1917,7 +1917,7 @@ store_priv_session_##field(struct device *dev, \
#define iscsi_priv_session_rw_attr(field, format) \
iscsi_priv_session_attr_show(field, format) \
iscsi_priv_session_attr_store(field) \
-static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUGO, \
+static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUSR, \
show_priv_session_##field, \
store_priv_session_##field)
iscsi_priv_session_rw_attr(recovery_tmo, "%d");
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 7ff61d76b4c5..b61ebec6bca7 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2027,14 +2027,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
int old_rcd = sdkp->RCD;
int old_dpofua = sdkp->DPOFUA;
- if (sdp->skip_ms_page_8) {
- if (sdp->type == TYPE_RBC)
- goto defaults;
- else {
- modepage = 0x3F;
- dbd = 0;
- }
- } else if (sdp->type == TYPE_RBC) {
+ if (sdp->skip_ms_page_8)
+ goto defaults;
+
+ if (sdp->type == TYPE_RBC) {
modepage = 6;
dbd = 8;
} else {
@@ -2062,11 +2058,13 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
*/
if (len < 3)
goto bad_sense;
- else if (len > SD_BUF_SIZE) {
- sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
- "data from %d to %d bytes\n", len, SD_BUF_SIZE);
- len = SD_BUF_SIZE;
- }
+ if (len > 20)
+ len = 20;
+
+ /* Take headers and block descriptors into account */
+ len += data.header_length + data.block_descriptor_length;
+ if (len > SD_BUF_SIZE)
+ goto bad_sense;
/* Get the data */
res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr);
@@ -2074,45 +2072,16 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
if (scsi_status_is_good(res)) {
int offset = data.header_length + data.block_descriptor_length;
- while (offset < len) {
- u8 page_code = buffer[offset] & 0x3F;
- u8 spf = buffer[offset] & 0x40;
-
- if (page_code == 8 || page_code == 6) {
- /* We're interested only in the first 3 bytes.
- */
- if (len - offset <= 2) {
- sd_printk(KERN_ERR, sdkp, "Incomplete "
- "mode parameter data\n");
- goto defaults;
- } else {
- modepage = page_code;
- goto Page_found;
- }
- } else {
- /* Go to the next page */
- if (spf && len - offset > 3)
- offset += 4 + (buffer[offset+2] << 8) +
- buffer[offset+3];
- else if (!spf && len - offset > 1)
- offset += 2 + buffer[offset+1];
- else {
- sd_printk(KERN_ERR, sdkp, "Incomplete "
- "mode parameter data\n");
- goto defaults;
- }
- }
+ if (offset >= SD_BUF_SIZE - 2) {
+ sd_printk(KERN_ERR, sdkp, "Malformed MODE SENSE response\n");
+ goto defaults;
}
- if (modepage == 0x3F) {
- sd_printk(KERN_ERR, sdkp, "No Caching mode page "
- "present\n");
- goto defaults;
- } else if ((buffer[offset] & 0x3f) != modepage) {
+ if ((buffer[offset] & 0x3f) != modepage) {
sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
goto defaults;
}
- Page_found:
+
if (modepage == 8) {
sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 7f5a6a86f820..eb7a3e85304f 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -35,9 +35,11 @@
struct ses_device {
unsigned char *page1;
+ unsigned char *page1_types;
unsigned char *page2;
unsigned char *page10;
short page1_len;
+ short page1_num_types;
short page2_len;
short page10_len;
};
@@ -110,12 +112,12 @@ static int ses_set_page2_descriptor(struct enclosure_device *edev,
int i, j, count = 0, descriptor = ecomp->number;
struct scsi_device *sdev = to_scsi_device(edev->edev.parent);
struct ses_device *ses_dev = edev->scratch;
- unsigned char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
+ unsigned char *type_ptr = ses_dev->page1_types;
unsigned char *desc_ptr = ses_dev->page2 + 8;
/* Clear everything */
memset(desc_ptr, 0, ses_dev->page2_len - 8);
- for (i = 0; i < ses_dev->page1[10]; i++, type_ptr += 4) {
+ for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) {
for (j = 0; j < type_ptr[1]; j++) {
desc_ptr += 4;
if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE &&
@@ -140,12 +142,12 @@ static unsigned char *ses_get_page2_descriptor(struct enclosure_device *edev,
int i, j, count = 0, descriptor = ecomp->number;
struct scsi_device *sdev = to_scsi_device(edev->edev.parent);
struct ses_device *ses_dev = edev->scratch;
- unsigned char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
+ unsigned char *type_ptr = ses_dev->page1_types;
unsigned char *desc_ptr = ses_dev->page2 + 8;
ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len);
- for (i = 0; i < ses_dev->page1[10]; i++, type_ptr += 4) {
+ for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) {
for (j = 0; j < type_ptr[1]; j++) {
desc_ptr += 4;
if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE &&
@@ -358,7 +360,7 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
unsigned char *buf = NULL, *type_ptr, *desc_ptr, *addl_desc_ptr = NULL;
int i, j, page7_len, len, components;
struct ses_device *ses_dev = edev->scratch;
- int types = ses_dev->page1[10];
+ int types = ses_dev->page1_num_types;
unsigned char *hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL);
if (!hdr_buf)
@@ -390,10 +392,10 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
len = (desc_ptr[2] << 8) + desc_ptr[3];
/* skip past overall descriptor */
desc_ptr += len + 4;
- if (ses_dev->page10)
- addl_desc_ptr = ses_dev->page10 + 8;
}
- type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
+ if (ses_dev->page10)
+ addl_desc_ptr = ses_dev->page10 + 8;
+ type_ptr = ses_dev->page1_types;
components = 0;
for (i = 0; i < types; i++, type_ptr += 4) {
for (j = 0; j < type_ptr[1]; j++) {
@@ -503,6 +505,7 @@ static int ses_intf_add(struct device *cdev,
u32 result;
int i, types, len, components = 0;
int err = -ENOMEM;
+ int num_enclosures;
struct enclosure_device *edev;
struct ses_component *scomp = NULL;
@@ -530,16 +533,6 @@ static int ses_intf_add(struct device *cdev,
if (result)
goto recv_failed;
- if (hdr_buf[1] != 0) {
- /* FIXME: need subenclosure support; I've just never
- * seen a device with subenclosures and it makes the
- * traversal routines more complex */
- sdev_printk(KERN_ERR, sdev,
- "FIXME driver has no support for subenclosures (%d)\n",
- hdr_buf[1]);
- goto err_free;
- }
-
len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
buf = kzalloc(len, GFP_KERNEL);
if (!buf)
@@ -549,11 +542,24 @@ static int ses_intf_add(struct device *cdev,
if (result)
goto recv_failed;
- types = buf[10];
+ types = 0;
- type_ptr = buf + 12 + buf[11];
+ /* we always have one main enclosure and the rest are referred
+ * to as secondary subenclosures */
+ num_enclosures = buf[1] + 1;
- for (i = 0; i < types; i++, type_ptr += 4) {
+ /* begin at the enclosure descriptor */
+ type_ptr = buf + 8;
+ /* skip all the enclosure descriptors */
+ for (i = 0; i < num_enclosures && type_ptr < buf + len; i++) {
+ types += type_ptr[2];
+ type_ptr += type_ptr[3] + 4;
+ }
+
+ ses_dev->page1_types = type_ptr;
+ ses_dev->page1_num_types = types;
+
+ for (i = 0; i < types && type_ptr < buf + len; i++, type_ptr += 4) {
if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE)
components += type_ptr[1];
diff --git a/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c b/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c
index 842cd9214a5e..289729daba80 100644
--- a/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c
+++ b/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c
@@ -1191,7 +1191,7 @@ static int cyasblkdev_add_disks(int bus_num,
bd->user_disk_1->first_minor = (devidx + 1) << CYASBLKDEV_SHIFT;
bd->user_disk_1->minors = 8;
bd->user_disk_1->fops = &cyasblkdev_bdops;
- bd->user_disk_0->events = DISK_EVENT_MEDIA_CHANGE;
+ bd->user_disk_1->events = DISK_EVENT_MEDIA_CHANGE;
bd->user_disk_1->private_data = bd;
bd->user_disk_1->queue = bd->queue.queue;
bd->dbgprn_flags = DBGPRN_RD_RQ;
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index 2fac3be209ac..9ef2dbbfa62b 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -29,4 +29,6 @@ config TCM_PSCSI
Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered
passthrough access to Linux/SCSI device
+source "drivers/target/loopback/Kconfig"
+
endif
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 973bb190ef57..1178bbfc68fe 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -1,4 +1,3 @@
-EXTRA_CFLAGS += -I$(srctree)/drivers/target/ -I$(srctree)/drivers/scsi/
target_core_mod-y := target_core_configfs.o \
target_core_device.o \
@@ -13,7 +12,8 @@ target_core_mod-y := target_core_configfs.o \
target_core_transport.o \
target_core_cdb.o \
target_core_ua.o \
- target_core_rd.o
+ target_core_rd.o \
+ target_core_stat.o
obj-$(CONFIG_TARGET_CORE) += target_core_mod.o
@@ -21,3 +21,6 @@ obj-$(CONFIG_TARGET_CORE) += target_core_mod.o
obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o
obj-$(CONFIG_TCM_FILEIO) += target_core_file.o
obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o
+
+# Fabric modules
+obj-$(CONFIG_LOOPBACK_TARGET) += loopback/
diff --git a/drivers/target/loopback/Kconfig b/drivers/target/loopback/Kconfig
new file mode 100644
index 000000000000..57dcbc2d711b
--- /dev/null
+++ b/drivers/target/loopback/Kconfig
@@ -0,0 +1,11 @@
+config LOOPBACK_TARGET
+ tristate "TCM Virtual SAS target and Linux/SCSI LDD fabric loopback module"
+ help
+ Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD
+ fabric loopback module.
+
+config LOOPBACK_TARGET_CDB_DEBUG
+ bool "TCM loopback fabric module CDB debug code"
+ depends on LOOPBACK_TARGET
+ help
+ Say Y here to enable the TCM loopback fabric module CDB debug code
diff --git a/drivers/target/loopback/Makefile b/drivers/target/loopback/Makefile
new file mode 100644
index 000000000000..6abebdf95659
--- /dev/null
+++ b/drivers/target/loopback/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_LOOPBACK_TARGET) += tcm_loop.o
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
new file mode 100644
index 000000000000..aed4e464d31c
--- /dev/null
+++ b/drivers/target/loopback/tcm_loop.c
@@ -0,0 +1,1579 @@
+/*******************************************************************************
+ *
+ * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
+ * for emulated SAS initiator ports
+ *
+ * © Copyright 2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/configfs.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/libsas.h> /* For TASK_ATTR_* */
+
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_fabric_lib.h>
+#include <target/target_core_configfs.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_tmr.h>
+
+#include "tcm_loop.h"
+
+#define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
+
+/* Local pointer to allocated TCM configfs fabric module */
+static struct target_fabric_configfs *tcm_loop_fabric_configfs;
+
+static struct kmem_cache *tcm_loop_cmd_cache;
+
+static int tcm_loop_hba_no_cnt;
+
+/*
+ * Allocate a tcm_loop cmd descriptor from target_core_mod code
+ *
+ * Can be called from interrupt context in tcm_loop_queuecommand() below
+ */
+static struct se_cmd *tcm_loop_allocate_core_cmd(
+ struct tcm_loop_hba *tl_hba,
+ struct se_portal_group *se_tpg,
+ struct scsi_cmnd *sc)
+{
+ struct se_cmd *se_cmd;
+ struct se_session *se_sess;
+ struct tcm_loop_nexus *tl_nexus = tl_hba->tl_nexus;
+ struct tcm_loop_cmd *tl_cmd;
+ int sam_task_attr;
+
+ if (!tl_nexus) {
+ scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
+ " does not exist\n");
+ set_host_byte(sc, DID_ERROR);
+ return NULL;
+ }
+ se_sess = tl_nexus->se_sess;
+
+ tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
+ if (!tl_cmd) {
+ printk(KERN_ERR "Unable to allocate struct tcm_loop_cmd\n");
+ set_host_byte(sc, DID_ERROR);
+ return NULL;
+ }
+ se_cmd = &tl_cmd->tl_se_cmd;
+ /*
+ * Save the pointer to struct scsi_cmnd *sc
+ */
+ tl_cmd->sc = sc;
+ /*
+ * Locate the SAM Task Attr from struct scsi_cmnd *
+ */
+ if (sc->device->tagged_supported) {
+ switch (sc->tag) {
+ case HEAD_OF_QUEUE_TAG:
+ sam_task_attr = TASK_ATTR_HOQ;
+ break;
+ case ORDERED_QUEUE_TAG:
+ sam_task_attr = TASK_ATTR_ORDERED;
+ break;
+ default:
+ sam_task_attr = TASK_ATTR_SIMPLE;
+ break;
+ }
+ } else
+ sam_task_attr = TASK_ATTR_SIMPLE;
+
+ /*
+ * Initialize struct se_cmd descriptor from target_core_mod infrastructure
+ */
+ transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
+ scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
+ &tl_cmd->tl_sense_buf[0]);
+
+ /*
+ * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
+ */
+ if (scsi_bidi_cmnd(sc))
+ T_TASK(se_cmd)->t_tasks_bidi = 1;
+ /*
+ * Locate the struct se_lun pointer and attach it to struct se_cmd
+ */
+ if (transport_get_lun_for_cmd(se_cmd, NULL, tl_cmd->sc->device->lun) < 0) {
+ kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+ set_host_byte(sc, DID_NO_CONNECT);
+ return NULL;
+ }
+
+ transport_device_setup_cmd(se_cmd);
+ return se_cmd;
+}
+
+/*
+ * Called by struct target_core_fabric_ops->new_cmd_map()
+ *
+ * Always called in process context. A non zero return value
+ * here will signal to handle an exception based on the return code.
+ */
+static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
+{
+ struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
+ struct tcm_loop_cmd, tl_se_cmd);
+ struct scsi_cmnd *sc = tl_cmd->sc;
+ void *mem_ptr, *mem_bidi_ptr = NULL;
+ u32 sg_no_bidi = 0;
+ int ret;
+ /*
+ * Allocate the necessary tasks to complete the received CDB+data
+ */
+ ret = transport_generic_allocate_tasks(se_cmd, tl_cmd->sc->cmnd);
+ if (ret == -1) {
+ /* Out of Resources */
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ } else if (ret == -2) {
+ /*
+ * Handle case for SAM_STAT_RESERVATION_CONFLICT
+ */
+ if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ /*
+ * Otherwise, return SAM_STAT_CHECK_CONDITION and return
+ * sense data.
+ */
+ return PYX_TRANSPORT_USE_SENSE_REASON;
+ }
+ /*
+ * Setup the struct scatterlist memory from the received
+ * struct scsi_cmnd.
+ */
+ if (scsi_sg_count(sc)) {
+ se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM;
+ mem_ptr = (void *)scsi_sglist(sc);
+ /*
+ * For BIDI commands, pass in the extra READ buffer
+ * to transport_generic_map_mem_to_cmd() below..
+ */
+ if (T_TASK(se_cmd)->t_tasks_bidi) {
+ struct scsi_data_buffer *sdb = scsi_in(sc);
+
+ mem_bidi_ptr = (void *)sdb->table.sgl;
+ sg_no_bidi = sdb->table.nents;
+ }
+ } else {
+ /*
+ * Used for DMA_NONE
+ */
+ mem_ptr = NULL;
+ }
+ /*
+ * Map the SG memory into struct se_mem->page linked list using the same
+ * physical memory at sg->page_link.
+ */
+ ret = transport_generic_map_mem_to_cmd(se_cmd, mem_ptr,
+ scsi_sg_count(sc), mem_bidi_ptr, sg_no_bidi);
+ if (ret < 0)
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+
+ return 0;
+}
+
+/*
+ * Called from struct target_core_fabric_ops->check_stop_free()
+ */
+static void tcm_loop_check_stop_free(struct se_cmd *se_cmd)
+{
+ /*
+ * Do not release struct se_cmd's containing a valid TMR
+ * pointer. These will be released directly in tcm_loop_device_reset()
+ * with transport_generic_free_cmd().
+ */
+ if (se_cmd->se_tmr_req)
+ return;
+ /*
+ * Release the struct se_cmd, which will make a callback to release
+ * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
+ */
+ transport_generic_free_cmd(se_cmd, 0, 1, 0);
+}
+
+/*
+ * Called from struct target_core_fabric_ops->release_cmd_to_pool()
+ */
+static void tcm_loop_deallocate_core_cmd(struct se_cmd *se_cmd)
+{
+ struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
+ struct tcm_loop_cmd, tl_se_cmd);
+
+ kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+}
+
+static int tcm_loop_proc_info(struct Scsi_Host *host, char *buffer,
+ char **start, off_t offset,
+ int length, int inout)
+{
+ return sprintf(buffer, "tcm_loop_proc_info()\n");
+}
+
+static int tcm_loop_driver_probe(struct device *);
+static int tcm_loop_driver_remove(struct device *);
+
+static int pseudo_lld_bus_match(struct device *dev,
+ struct device_driver *dev_driver)
+{
+ return 1;
+}
+
+static struct bus_type tcm_loop_lld_bus = {
+ .name = "tcm_loop_bus",
+ .match = pseudo_lld_bus_match,
+ .probe = tcm_loop_driver_probe,
+ .remove = tcm_loop_driver_remove,
+};
+
+static struct device_driver tcm_loop_driverfs = {
+ .name = "tcm_loop",
+ .bus = &tcm_loop_lld_bus,
+};
+/*
+ * Used with root_device_register() in tcm_loop_alloc_core_bus() below
+ */
+struct device *tcm_loop_primary;
+
+/*
+ * Copied from drivers/scsi/libfc/fc_fcp.c:fc_change_queue_depth() and
+ * drivers/scsi/libiscsi.c:iscsi_change_queue_depth()
+ */
+static int tcm_loop_change_queue_depth(
+ struct scsi_device *sdev,
+ int depth,
+ int reason)
+{
+ switch (reason) {
+ case SCSI_QDEPTH_DEFAULT:
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
+ break;
+ case SCSI_QDEPTH_QFULL:
+ scsi_track_queue_full(sdev, depth);
+ break;
+ case SCSI_QDEPTH_RAMP_UP:
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return sdev->queue_depth;
+}
+
+/*
+ * Main entry point from struct scsi_host_template for incoming SCSI CDB+Data
+ * from Linux/SCSI subsystem for SCSI low level device drivers (LLDs)
+ */
+static int tcm_loop_queuecommand(
+ struct Scsi_Host *sh,
+ struct scsi_cmnd *sc)
+{
+ struct se_cmd *se_cmd;
+ struct se_portal_group *se_tpg;
+ struct tcm_loop_hba *tl_hba;
+ struct tcm_loop_tpg *tl_tpg;
+
+ TL_CDB_DEBUG("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
+ " scsi_buf_len: %u\n", sc->device->host->host_no,
+ sc->device->id, sc->device->channel, sc->device->lun,
+ sc->cmnd[0], scsi_bufflen(sc));
+ /*
+ * Locate the tcm_loop_hba_t pointer
+ */
+ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+ tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+ se_tpg = &tl_tpg->tl_se_tpg;
+ /*
+ * Determine the SAM Task Attribute and allocate tl_cmd and
+ * tl_cmd->tl_se_cmd from TCM infrastructure
+ */
+ se_cmd = tcm_loop_allocate_core_cmd(tl_hba, se_tpg, sc);
+ if (!se_cmd) {
+ sc->scsi_done(sc);
+ return 0;
+ }
+ /*
+ * Queue up the newly allocated to be processed in TCM thread context.
+ */
+ transport_generic_handle_cdb_map(se_cmd);
+ return 0;
+}
+
+/*
+ * Called from SCSI EH process context to issue a LUN_RESET TMR
+ * to struct scsi_device
+ */
+static int tcm_loop_device_reset(struct scsi_cmnd *sc)
+{
+ struct se_cmd *se_cmd = NULL;
+ struct se_portal_group *se_tpg;
+ struct se_session *se_sess;
+ struct tcm_loop_cmd *tl_cmd = NULL;
+ struct tcm_loop_hba *tl_hba;
+ struct tcm_loop_nexus *tl_nexus;
+ struct tcm_loop_tmr *tl_tmr = NULL;
+ struct tcm_loop_tpg *tl_tpg;
+ int ret = FAILED;
+ /*
+ * Locate the tcm_loop_hba_t pointer
+ */
+ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+ /*
+ * Locate the tl_nexus and se_sess pointers
+ */
+ tl_nexus = tl_hba->tl_nexus;
+ if (!tl_nexus) {
+ printk(KERN_ERR "Unable to perform device reset without"
+ " active I_T Nexus\n");
+ return FAILED;
+ }
+ se_sess = tl_nexus->se_sess;
+ /*
+ * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
+ */
+ tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+ se_tpg = &tl_tpg->tl_se_tpg;
+
+ tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
+ if (!tl_cmd) {
+ printk(KERN_ERR "Unable to allocate memory for tl_cmd\n");
+ return FAILED;
+ }
+
+ tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
+ if (!tl_tmr) {
+ printk(KERN_ERR "Unable to allocate memory for tl_tmr\n");
+ goto release;
+ }
+ init_waitqueue_head(&tl_tmr->tl_tmr_wait);
+
+ se_cmd = &tl_cmd->tl_se_cmd;
+ /*
+ * Initialize struct se_cmd descriptor from target_core_mod infrastructure
+ */
+ transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
+ DMA_NONE, TASK_ATTR_SIMPLE,
+ &tl_cmd->tl_sense_buf[0]);
+ /*
+ * Allocate the LUN_RESET TMR
+ */
+ se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr,
+ TMR_LUN_RESET);
+ if (!se_cmd->se_tmr_req)
+ goto release;
+ /*
+ * Locate the underlying TCM struct se_lun from sc->device->lun
+ */
+ if (transport_get_lun_for_tmr(se_cmd, sc->device->lun) < 0)
+ goto release;
+ /*
+ * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp()
+ * to wake us up.
+ */
+ transport_generic_handle_tmr(se_cmd);
+ wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
+ /*
+ * The TMR LUN_RESET has completed, check the response status and
+ * then release allocations.
+ */
+ ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
+ SUCCESS : FAILED;
+release:
+ if (se_cmd)
+ transport_generic_free_cmd(se_cmd, 1, 1, 0);
+ else
+ kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+ kfree(tl_tmr);
+ return ret;
+}
+
+static int tcm_loop_slave_alloc(struct scsi_device *sd)
+{
+ set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
+ return 0;
+}
+
+static int tcm_loop_slave_configure(struct scsi_device *sd)
+{
+ return 0;
+}
+
+static struct scsi_host_template tcm_loop_driver_template = {
+ .proc_info = tcm_loop_proc_info,
+ .proc_name = "tcm_loopback",
+ .name = "TCM_Loopback",
+ .queuecommand = tcm_loop_queuecommand,
+ .change_queue_depth = tcm_loop_change_queue_depth,
+ .eh_device_reset_handler = tcm_loop_device_reset,
+ .can_queue = TL_SCSI_CAN_QUEUE,
+ .this_id = -1,
+ .sg_tablesize = TL_SCSI_SG_TABLESIZE,
+ .cmd_per_lun = TL_SCSI_CMD_PER_LUN,
+ .max_sectors = TL_SCSI_MAX_SECTORS,
+ .use_clustering = DISABLE_CLUSTERING,
+ .slave_alloc = tcm_loop_slave_alloc,
+ .slave_configure = tcm_loop_slave_configure,
+ .module = THIS_MODULE,
+};
+
+static int tcm_loop_driver_probe(struct device *dev)
+{
+ struct tcm_loop_hba *tl_hba;
+ struct Scsi_Host *sh;
+ int error;
+
+ tl_hba = to_tcm_loop_hba(dev);
+
+ sh = scsi_host_alloc(&tcm_loop_driver_template,
+ sizeof(struct tcm_loop_hba));
+ if (!sh) {
+ printk(KERN_ERR "Unable to allocate struct scsi_host\n");
+ return -ENODEV;
+ }
+ tl_hba->sh = sh;
+
+ /*
+ * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
+ */
+ *((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
+ /*
+ * Setup single ID, Channel and LUN for now..
+ */
+ sh->max_id = 2;
+ sh->max_lun = 0;
+ sh->max_channel = 0;
+ sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN;
+
+ error = scsi_add_host(sh, &tl_hba->dev);
+ if (error) {
+ printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
+ scsi_host_put(sh);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int tcm_loop_driver_remove(struct device *dev)
+{
+ struct tcm_loop_hba *tl_hba;
+ struct Scsi_Host *sh;
+
+ tl_hba = to_tcm_loop_hba(dev);
+ sh = tl_hba->sh;
+
+ scsi_remove_host(sh);
+ scsi_host_put(sh);
+ return 0;
+}
+
+static void tcm_loop_release_adapter(struct device *dev)
+{
+ struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
+
+ kfree(tl_hba);
+}
+
+/*
+ * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
+ */
+static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
+{
+ int ret;
+
+ tl_hba->dev.bus = &tcm_loop_lld_bus;
+ tl_hba->dev.parent = tcm_loop_primary;
+ tl_hba->dev.release = &tcm_loop_release_adapter;
+ dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
+
+ ret = device_register(&tl_hba->dev);
+ if (ret) {
+ printk(KERN_ERR "device_register() failed for"
+ " tl_hba->dev: %d\n", ret);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/*
+ * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
+ * tcm_loop SCSI bus.
+ */
+static int tcm_loop_alloc_core_bus(void)
+{
+ int ret;
+
+ tcm_loop_primary = root_device_register("tcm_loop_0");
+ if (IS_ERR(tcm_loop_primary)) {
+ printk(KERN_ERR "Unable to allocate tcm_loop_primary\n");
+ return PTR_ERR(tcm_loop_primary);
+ }
+
+ ret = bus_register(&tcm_loop_lld_bus);
+ if (ret) {
+ printk(KERN_ERR "bus_register() failed for tcm_loop_lld_bus\n");
+ goto dev_unreg;
+ }
+
+ ret = driver_register(&tcm_loop_driverfs);
+ if (ret) {
+ printk(KERN_ERR "driver_register() failed for"
+ "tcm_loop_driverfs\n");
+ goto bus_unreg;
+ }
+
+ printk(KERN_INFO "Initialized TCM Loop Core Bus\n");
+ return ret;
+
+bus_unreg:
+ bus_unregister(&tcm_loop_lld_bus);
+dev_unreg:
+ root_device_unregister(tcm_loop_primary);
+ return ret;
+}
+
+static void tcm_loop_release_core_bus(void)
+{
+ driver_unregister(&tcm_loop_driverfs);
+ bus_unregister(&tcm_loop_lld_bus);
+ root_device_unregister(tcm_loop_primary);
+
+ printk(KERN_INFO "Releasing TCM Loop Core BUS\n");
+}
+
+static char *tcm_loop_get_fabric_name(void)
+{
+ return "loopback";
+}
+
+static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+ struct tcm_loop_tpg *tl_tpg =
+ (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+ struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+ /*
+ * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba()
+ * time based on the protocol dependent prefix of the passed configfs group.
+ *
+ * Based upon tl_proto_id, TCM_Loop emulates the requested fabric
+ * ProtocolID using target_core_fabric_lib.c symbols.
+ */
+ switch (tl_hba->tl_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ return sas_get_fabric_proto_ident(se_tpg);
+ case SCSI_PROTOCOL_FCP:
+ return fc_get_fabric_proto_ident(se_tpg);
+ case SCSI_PROTOCOL_ISCSI:
+ return iscsi_get_fabric_proto_ident(se_tpg);
+ default:
+ printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
+ " SAS emulation\n", tl_hba->tl_proto_id);
+ break;
+ }
+
+ return sas_get_fabric_proto_ident(se_tpg);
+}
+
+static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
+{
+ struct tcm_loop_tpg *tl_tpg =
+ (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+ /*
+ * Return the passed NAA identifier for the SAS Target Port
+ */
+ return &tl_tpg->tl_hba->tl_wwn_address[0];
+}
+
+static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
+{
+ struct tcm_loop_tpg *tl_tpg =
+ (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+ /*
+ * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
+ * to represent the SCSI Target Port.
+ */
+ return tl_tpg->tl_tpgt;
+}
+
+static u32 tcm_loop_get_default_depth(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static u32 tcm_loop_get_pr_transport_id(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code,
+ unsigned char *buf)
+{
+ struct tcm_loop_tpg *tl_tpg =
+ (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+ struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+
+ switch (tl_hba->tl_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+ format_code, buf);
+ case SCSI_PROTOCOL_FCP:
+ return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+ format_code, buf);
+ case SCSI_PROTOCOL_ISCSI:
+ return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+ format_code, buf);
+ default:
+ printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
+ " SAS emulation\n", tl_hba->tl_proto_id);
+ break;
+ }
+
+ return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+ format_code, buf);
+}
+
+static u32 tcm_loop_get_pr_transport_id_len(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code)
+{
+ struct tcm_loop_tpg *tl_tpg =
+ (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+ struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+
+ switch (tl_hba->tl_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+ format_code);
+ case SCSI_PROTOCOL_FCP:
+ return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+ format_code);
+ case SCSI_PROTOCOL_ISCSI:
+ return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+ format_code);
+ default:
+ printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
+ " SAS emulation\n", tl_hba->tl_proto_id);
+ break;
+ }
+
+ return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+ format_code);
+}
+
+/*
+ * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
+ * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
+ */
+static char *tcm_loop_parse_pr_out_transport_id(
+ struct se_portal_group *se_tpg,
+ const char *buf,
+ u32 *out_tid_len,
+ char **port_nexus_ptr)
+{
+ struct tcm_loop_tpg *tl_tpg =
+ (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+ struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+
+ switch (tl_hba->tl_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+ port_nexus_ptr);
+ case SCSI_PROTOCOL_FCP:
+ return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+ port_nexus_ptr);
+ case SCSI_PROTOCOL_ISCSI:
+ return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+ port_nexus_ptr);
+ default:
+ printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
+ " SAS emulation\n", tl_hba->tl_proto_id);
+ break;
+ }
+
+ return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+ port_nexus_ptr);
+}
+
+/*
+ * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
+ * based upon the incoming fabric dependent SCSI Initiator Port
+ */
+static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
+{
+ return 0;
+}
+
+/*
+ * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
+ * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
+ */
+static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
+{
+ return 0;
+}
+
+/*
+ * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
+ * never be called for TCM_Loop by target_core_fabric_configfs.c code.
+ * It has been added here as a nop for target_fabric_tf_ops_check()
+ */
+static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
+{
+ return 0;
+}
+
+static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl(
+ struct se_portal_group *se_tpg)
+{
+ struct tcm_loop_nacl *tl_nacl;
+
+ tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL);
+ if (!tl_nacl) {
+ printk(KERN_ERR "Unable to allocate struct tcm_loop_nacl\n");
+ return NULL;
+ }
+
+ return &tl_nacl->se_node_acl;
+}
+
+static void tcm_loop_tpg_release_fabric_acl(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl)
+{
+ struct tcm_loop_nacl *tl_nacl = container_of(se_nacl,
+ struct tcm_loop_nacl, se_node_acl);
+
+ kfree(tl_nacl);
+}
+
+static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static void tcm_loop_new_cmd_failure(struct se_cmd *se_cmd)
+{
+ /*
+ * Since TCM_loop is already passing struct scatterlist data from
+ * struct scsi_cmnd, no more Linux/SCSI failure dependent state need
+ * to be handled here.
+ */
+ return;
+}
+
+static int tcm_loop_is_state_remove(struct se_cmd *se_cmd)
+{
+ /*
+ * Assume struct scsi_cmnd is not in remove state..
+ */
+ return 0;
+}
+
+static int tcm_loop_sess_logged_in(struct se_session *se_sess)
+{
+ /*
+ * Assume that TL Nexus is always active
+ */
+ return 1;
+}
+
+static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
+{
+ return 1;
+}
+
+static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
+{
+ return;
+}
+
+static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd)
+{
+ return 1;
+}
+
+static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
+{
+ struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
+ struct tcm_loop_cmd, tl_se_cmd);
+
+ return tl_cmd->sc_cmd_state;
+}
+
+static int tcm_loop_shutdown_session(struct se_session *se_sess)
+{
+ return 0;
+}
+
+static void tcm_loop_close_session(struct se_session *se_sess)
+{
+ return;
+};
+
+static void tcm_loop_stop_session(
+ struct se_session *se_sess,
+ int sess_sleep,
+ int conn_sleep)
+{
+ return;
+}
+
+static void tcm_loop_fall_back_to_erl0(struct se_session *se_sess)
+{
+ return;
+}
+
+static int tcm_loop_write_pending(struct se_cmd *se_cmd)
+{
+ /*
+ * Since Linux/SCSI has already sent down a struct scsi_cmnd
+ * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
+ * memory, and memory has already been mapped to struct se_cmd->t_mem_list
+ * format with transport_generic_map_mem_to_cmd().
+ *
+ * We now tell TCM to add this WRITE CDB directly into the TCM storage
+ * object execution queue.
+ */
+ transport_generic_process_write(se_cmd);
+ return 0;
+}
+
+static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
+{
+ struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
+ struct tcm_loop_cmd, tl_se_cmd);
+ struct scsi_cmnd *sc = tl_cmd->sc;
+
+ TL_CDB_DEBUG("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
+ " cdb: 0x%02x\n", sc, sc->cmnd[0]);
+
+ sc->result = SAM_STAT_GOOD;
+ set_host_byte(sc, DID_OK);
+ sc->scsi_done(sc);
+ return 0;
+}
+
+static int tcm_loop_queue_status(struct se_cmd *se_cmd)
+{
+ struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
+ struct tcm_loop_cmd, tl_se_cmd);
+ struct scsi_cmnd *sc = tl_cmd->sc;
+
+ TL_CDB_DEBUG("tcm_loop_queue_status() called for scsi_cmnd: %p"
+ " cdb: 0x%02x\n", sc, sc->cmnd[0]);
+
+ if (se_cmd->sense_buffer &&
+ ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
+ (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
+
+ memcpy((void *)sc->sense_buffer, (void *)se_cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE);
+ sc->result = SAM_STAT_CHECK_CONDITION;
+ set_driver_byte(sc, DRIVER_SENSE);
+ } else
+ sc->result = se_cmd->scsi_status;
+
+ set_host_byte(sc, DID_OK);
+ sc->scsi_done(sc);
+ return 0;
+}
+
+static int tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+ struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+ struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
+ /*
+ * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
+ * and wake up the wait_queue_head_t in tcm_loop_device_reset()
+ */
+ atomic_set(&tl_tmr->tmr_complete, 1);
+ wake_up(&tl_tmr->tl_tmr_wait);
+ return 0;
+}
+
+static u16 tcm_loop_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
+{
+ return 0;
+}
+
+static u16 tcm_loop_get_fabric_sense_len(void)
+{
+ return 0;
+}
+
+static u64 tcm_loop_pack_lun(unsigned int lun)
+{
+ u64 result;
+
+ /* LSB of lun into byte 1 big-endian */
+ result = ((lun & 0xff) << 8);
+ /* use flat space addressing method */
+ result |= 0x40 | ((lun >> 8) & 0x3f);
+
+ return cpu_to_le64(result);
+}
+
+static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
+{
+ switch (tl_hba->tl_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ return "SAS";
+ case SCSI_PROTOCOL_FCP:
+ return "FCP";
+ case SCSI_PROTOCOL_ISCSI:
+ return "iSCSI";
+ default:
+ break;
+ }
+
+ return "Unknown";
+}
+
+/* Start items for tcm_loop_port_cit */
+
+static int tcm_loop_port_link(
+ struct se_portal_group *se_tpg,
+ struct se_lun *lun)
+{
+ struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+ struct tcm_loop_tpg, tl_se_tpg);
+ struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+
+ atomic_inc(&tl_tpg->tl_tpg_port_count);
+ smp_mb__after_atomic_inc();
+ /*
+ * Add Linux/SCSI struct scsi_device by HCTL
+ */
+ scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
+
+ printk(KERN_INFO "TCM_Loop_ConfigFS: Port Link Successful\n");
+ return 0;
+}
+
+static void tcm_loop_port_unlink(
+ struct se_portal_group *se_tpg,
+ struct se_lun *se_lun)
+{
+ struct scsi_device *sd;
+ struct tcm_loop_hba *tl_hba;
+ struct tcm_loop_tpg *tl_tpg;
+
+ tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
+ tl_hba = tl_tpg->tl_hba;
+
+ sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
+ se_lun->unpacked_lun);
+ if (!sd) {
+ printk(KERN_ERR "Unable to locate struct scsi_device for %d:%d:"
+ "%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
+ return;
+ }
+ /*
+ * Remove Linux/SCSI struct scsi_device by HCTL
+ */
+ scsi_remove_device(sd);
+ scsi_device_put(sd);
+
+ atomic_dec(&tl_tpg->tl_tpg_port_count);
+ smp_mb__after_atomic_dec();
+
+ printk(KERN_INFO "TCM_Loop_ConfigFS: Port Unlink Successful\n");
+}
+
+/* End items for tcm_loop_port_cit */
+
+/* Start items for tcm_loop_nexus_cit */
+
+static int tcm_loop_make_nexus(
+ struct tcm_loop_tpg *tl_tpg,
+ const char *name)
+{
+ struct se_portal_group *se_tpg;
+ struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+ struct tcm_loop_nexus *tl_nexus;
+
+ if (tl_tpg->tl_hba->tl_nexus) {
+ printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n");
+ return -EEXIST;
+ }
+ se_tpg = &tl_tpg->tl_se_tpg;
+
+ tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
+ if (!tl_nexus) {
+ printk(KERN_ERR "Unable to allocate struct tcm_loop_nexus\n");
+ return -ENOMEM;
+ }
+ /*
+ * Initialize the struct se_session pointer
+ */
+ tl_nexus->se_sess = transport_init_session();
+ if (!tl_nexus->se_sess)
+ goto out;
+ /*
+ * Since we are running in 'demo mode' this call with generate a
+ * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
+ * Initiator port name of the passed configfs group 'name'.
+ */
+ tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
+ se_tpg, (unsigned char *)name);
+ if (!tl_nexus->se_sess->se_node_acl) {
+ transport_free_session(tl_nexus->se_sess);
+ goto out;
+ }
+ /*
+ * Now, register the SAS I_T Nexus as active with the call to
+ * transport_register_session()
+ */
+ __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
+ tl_nexus->se_sess, (void *)tl_nexus);
+ tl_tpg->tl_hba->tl_nexus = tl_nexus;
+ printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
+ " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
+ name);
+ return 0;
+
+out:
+ kfree(tl_nexus);
+ return -ENOMEM;
+}
+
+static int tcm_loop_drop_nexus(
+ struct tcm_loop_tpg *tpg)
+{
+ struct se_session *se_sess;
+ struct tcm_loop_nexus *tl_nexus;
+ struct tcm_loop_hba *tl_hba = tpg->tl_hba;
+
+ tl_nexus = tpg->tl_hba->tl_nexus;
+ if (!tl_nexus)
+ return -ENODEV;
+
+ se_sess = tl_nexus->se_sess;
+ if (!se_sess)
+ return -ENODEV;
+
+ if (atomic_read(&tpg->tl_tpg_port_count)) {
+ printk(KERN_ERR "Unable to remove TCM_Loop I_T Nexus with"
+ " active TPG port count: %d\n",
+ atomic_read(&tpg->tl_tpg_port_count));
+ return -EPERM;
+ }
+
+ printk(KERN_INFO "TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
+ " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
+ tl_nexus->se_sess->se_node_acl->initiatorname);
+ /*
+ * Release the SCSI I_T Nexus to the emulated SAS Target Port
+ */
+ transport_deregister_session(tl_nexus->se_sess);
+ tpg->tl_hba->tl_nexus = NULL;
+ kfree(tl_nexus);
+ return 0;
+}
+
+/* End items for tcm_loop_nexus_cit */
+
+static ssize_t tcm_loop_tpg_show_nexus(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+ struct tcm_loop_tpg, tl_se_tpg);
+ struct tcm_loop_nexus *tl_nexus;
+ ssize_t ret;
+
+ tl_nexus = tl_tpg->tl_hba->tl_nexus;
+ if (!tl_nexus)
+ return -ENODEV;
+
+ ret = snprintf(page, PAGE_SIZE, "%s\n",
+ tl_nexus->se_sess->se_node_acl->initiatorname);
+
+ return ret;
+}
+
+static ssize_t tcm_loop_tpg_store_nexus(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+ struct tcm_loop_tpg, tl_se_tpg);
+ struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+ unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
+ int ret;
+ /*
+ * Shutdown the active I_T nexus if 'NULL' is passed..
+ */
+ if (!strncmp(page, "NULL", 4)) {
+ ret = tcm_loop_drop_nexus(tl_tpg);
+ return (!ret) ? count : ret;
+ }
+ /*
+ * Otherwise make sure the passed virtual Initiator port WWN matches
+ * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
+ * tcm_loop_make_nexus()
+ */
+ if (strlen(page) > TL_WWN_ADDR_LEN) {
+ printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds"
+ " max: %d\n", page, TL_WWN_ADDR_LEN);
+ return -EINVAL;
+ }
+ snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
+
+ ptr = strstr(i_port, "naa.");
+ if (ptr) {
+ if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
+ printk(KERN_ERR "Passed SAS Initiator Port %s does not"
+ " match target port protoid: %s\n", i_port,
+ tcm_loop_dump_proto_id(tl_hba));
+ return -EINVAL;
+ }
+ port_ptr = &i_port[0];
+ goto check_newline;
+ }
+ ptr = strstr(i_port, "fc.");
+ if (ptr) {
+ if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
+ printk(KERN_ERR "Passed FCP Initiator Port %s does not"
+ " match target port protoid: %s\n", i_port,
+ tcm_loop_dump_proto_id(tl_hba));
+ return -EINVAL;
+ }
+ port_ptr = &i_port[3]; /* Skip over "fc." */
+ goto check_newline;
+ }
+ ptr = strstr(i_port, "iqn.");
+ if (ptr) {
+ if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
+ printk(KERN_ERR "Passed iSCSI Initiator Port %s does not"
+ " match target port protoid: %s\n", i_port,
+ tcm_loop_dump_proto_id(tl_hba));
+ return -EINVAL;
+ }
+ port_ptr = &i_port[0];
+ goto check_newline;
+ }
+ printk(KERN_ERR "Unable to locate prefix for emulated Initiator Port:"
+ " %s\n", i_port);
+ return -EINVAL;
+ /*
+ * Clear any trailing newline for the NAA WWN
+ */
+check_newline:
+ if (i_port[strlen(i_port)-1] == '\n')
+ i_port[strlen(i_port)-1] = '\0';
+
+ ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
+ &tcm_loop_tpg_nexus.attr,
+ NULL,
+};
+
+/* Start items for tcm_loop_naa_cit */
+
+struct se_portal_group *tcm_loop_make_naa_tpg(
+ struct se_wwn *wwn,
+ struct config_group *group,
+ const char *name)
+{
+ struct tcm_loop_hba *tl_hba = container_of(wwn,
+ struct tcm_loop_hba, tl_hba_wwn);
+ struct tcm_loop_tpg *tl_tpg;
+ char *tpgt_str, *end_ptr;
+ int ret;
+ unsigned short int tpgt;
+
+ tpgt_str = strstr(name, "tpgt_");
+ if (!tpgt_str) {
+ printk(KERN_ERR "Unable to locate \"tpgt_#\" directory"
+ " group\n");
+ return ERR_PTR(-EINVAL);
+ }
+ tpgt_str += 5; /* Skip ahead of "tpgt_" */
+ tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
+
+ if (tpgt > TL_TPGS_PER_HBA) {
+ printk(KERN_ERR "Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
+ " %u\n", tpgt, TL_TPGS_PER_HBA);
+ return ERR_PTR(-EINVAL);
+ }
+ tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
+ tl_tpg->tl_hba = tl_hba;
+ tl_tpg->tl_tpgt = tpgt;
+ /*
+ * Register the tl_tpg as a emulated SAS TCM Target Endpoint
+ */
+ ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops,
+ wwn, &tl_tpg->tl_se_tpg, (void *)tl_tpg,
+ TRANSPORT_TPG_TYPE_NORMAL);
+ if (ret < 0)
+ return ERR_PTR(-ENOMEM);
+
+ printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated Emulated %s"
+ " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
+ config_item_name(&wwn->wwn_group.cg_item), tpgt);
+
+ return &tl_tpg->tl_se_tpg;
+}
+
+void tcm_loop_drop_naa_tpg(
+ struct se_portal_group *se_tpg)
+{
+ struct se_wwn *wwn = se_tpg->se_tpg_wwn;
+ struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+ struct tcm_loop_tpg, tl_se_tpg);
+ struct tcm_loop_hba *tl_hba;
+ unsigned short tpgt;
+
+ tl_hba = tl_tpg->tl_hba;
+ tpgt = tl_tpg->tl_tpgt;
+ /*
+ * Release the I_T Nexus for the Virtual SAS link if present
+ */
+ tcm_loop_drop_nexus(tl_tpg);
+ /*
+ * Deregister the tl_tpg as a emulated SAS TCM Target Endpoint
+ */
+ core_tpg_deregister(se_tpg);
+
+ printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated Emulated %s"
+ " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
+ config_item_name(&wwn->wwn_group.cg_item), tpgt);
+}
+
+/* End items for tcm_loop_naa_cit */
+
+/* Start items for tcm_loop_cit */
+
+struct se_wwn *tcm_loop_make_scsi_hba(
+ struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct tcm_loop_hba *tl_hba;
+ struct Scsi_Host *sh;
+ char *ptr;
+ int ret, off = 0;
+
+ tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
+ if (!tl_hba) {
+ printk(KERN_ERR "Unable to allocate struct tcm_loop_hba\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ /*
+ * Determine the emulated Protocol Identifier and Target Port Name
+ * based on the incoming configfs directory name.
+ */
+ ptr = strstr(name, "naa.");
+ if (ptr) {
+ tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
+ goto check_len;
+ }
+ ptr = strstr(name, "fc.");
+ if (ptr) {
+ tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
+ off = 3; /* Skip over "fc." */
+ goto check_len;
+ }
+ ptr = strstr(name, "iqn.");
+ if (ptr) {
+ tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
+ goto check_len;
+ }
+
+ printk(KERN_ERR "Unable to locate prefix for emulated Target Port:"
+ " %s\n", name);
+ return ERR_PTR(-EINVAL);
+
+check_len:
+ if (strlen(name) > TL_WWN_ADDR_LEN) {
+ printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds"
+ " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
+ TL_WWN_ADDR_LEN);
+ kfree(tl_hba);
+ return ERR_PTR(-EINVAL);
+ }
+ snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
+
+ /*
+ * Call device_register(tl_hba->dev) to register the emulated
+ * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
+ * device_register() callbacks in tcm_loop_driver_probe()
+ */
+ ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
+ if (ret)
+ goto out;
+
+ sh = tl_hba->sh;
+ tcm_loop_hba_no_cnt++;
+ printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated emulated Target"
+ " %s Address: %s at Linux/SCSI Host ID: %d\n",
+ tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
+
+ return &tl_hba->tl_hba_wwn;
+out:
+ kfree(tl_hba);
+ return ERR_PTR(ret);
+}
+
+void tcm_loop_drop_scsi_hba(
+ struct se_wwn *wwn)
+{
+ struct tcm_loop_hba *tl_hba = container_of(wwn,
+ struct tcm_loop_hba, tl_hba_wwn);
+ int host_no = tl_hba->sh->host_no;
+ /*
+ * Call device_unregister() on the original tl_hba->dev.
+ * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
+ * release *tl_hba;
+ */
+ device_unregister(&tl_hba->dev);
+
+ printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated emulated Target"
+ " SAS Address: %s at Linux/SCSI Host ID: %d\n",
+ config_item_name(&wwn->wwn_group.cg_item), host_no);
+}
+
+/* Start items for tcm_loop_cit */
+static ssize_t tcm_loop_wwn_show_attr_version(
+ struct target_fabric_configfs *tf,
+ char *page)
+{
+ return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
+}
+
+TF_WWN_ATTR_RO(tcm_loop, version);
+
+static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
+ &tcm_loop_wwn_version.attr,
+ NULL,
+};
+
+/* End items for tcm_loop_cit */
+
+static int tcm_loop_register_configfs(void)
+{
+ struct target_fabric_configfs *fabric;
+ struct config_group *tf_cg;
+ int ret;
+ /*
+ * Set the TCM Loop HBA counter to zero
+ */
+ tcm_loop_hba_no_cnt = 0;
+ /*
+ * Register the top level struct config_item_type with TCM core
+ */
+ fabric = target_fabric_configfs_init(THIS_MODULE, "loopback");
+ if (!fabric) {
+ printk(KERN_ERR "tcm_loop_register_configfs() failed!\n");
+ return -1;
+ }
+ /*
+ * Setup the fabric API of function pointers used by target_core_mod
+ */
+ fabric->tf_ops.get_fabric_name = &tcm_loop_get_fabric_name;
+ fabric->tf_ops.get_fabric_proto_ident = &tcm_loop_get_fabric_proto_ident;
+ fabric->tf_ops.tpg_get_wwn = &tcm_loop_get_endpoint_wwn;
+ fabric->tf_ops.tpg_get_tag = &tcm_loop_get_tag;
+ fabric->tf_ops.tpg_get_default_depth = &tcm_loop_get_default_depth;
+ fabric->tf_ops.tpg_get_pr_transport_id = &tcm_loop_get_pr_transport_id;
+ fabric->tf_ops.tpg_get_pr_transport_id_len =
+ &tcm_loop_get_pr_transport_id_len;
+ fabric->tf_ops.tpg_parse_pr_out_transport_id =
+ &tcm_loop_parse_pr_out_transport_id;
+ fabric->tf_ops.tpg_check_demo_mode = &tcm_loop_check_demo_mode;
+ fabric->tf_ops.tpg_check_demo_mode_cache =
+ &tcm_loop_check_demo_mode_cache;
+ fabric->tf_ops.tpg_check_demo_mode_write_protect =
+ &tcm_loop_check_demo_mode_write_protect;
+ fabric->tf_ops.tpg_check_prod_mode_write_protect =
+ &tcm_loop_check_prod_mode_write_protect;
+ /*
+ * The TCM loopback fabric module runs in demo-mode to a local
+ * virtual SCSI device, so fabric dependent initator ACLs are
+ * not required.
+ */
+ fabric->tf_ops.tpg_alloc_fabric_acl = &tcm_loop_tpg_alloc_fabric_acl;
+ fabric->tf_ops.tpg_release_fabric_acl =
+ &tcm_loop_tpg_release_fabric_acl;
+ fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index;
+ /*
+ * Since tcm_loop is mapping physical memory from Linux/SCSI
+ * struct scatterlist arrays for each struct scsi_cmnd I/O,
+ * we do not need TCM to allocate a iovec array for
+ * virtual memory address mappings
+ */
+ fabric->tf_ops.alloc_cmd_iovecs = NULL;
+ /*
+ * Used for setting up remaining TCM resources in process context
+ */
+ fabric->tf_ops.new_cmd_map = &tcm_loop_new_cmd_map;
+ fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free;
+ fabric->tf_ops.release_cmd_to_pool = &tcm_loop_deallocate_core_cmd;
+ fabric->tf_ops.release_cmd_direct = &tcm_loop_deallocate_core_cmd;
+ fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session;
+ fabric->tf_ops.close_session = &tcm_loop_close_session;
+ fabric->tf_ops.stop_session = &tcm_loop_stop_session;
+ fabric->tf_ops.fall_back_to_erl0 = &tcm_loop_fall_back_to_erl0;
+ fabric->tf_ops.sess_logged_in = &tcm_loop_sess_logged_in;
+ fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index;
+ fabric->tf_ops.sess_get_initiator_sid = NULL;
+ fabric->tf_ops.write_pending = &tcm_loop_write_pending;
+ fabric->tf_ops.write_pending_status = &tcm_loop_write_pending_status;
+ /*
+ * Not used for TCM loopback
+ */
+ fabric->tf_ops.set_default_node_attributes =
+ &tcm_loop_set_default_node_attributes;
+ fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag;
+ fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state;
+ fabric->tf_ops.new_cmd_failure = &tcm_loop_new_cmd_failure;
+ fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
+ fabric->tf_ops.queue_status = &tcm_loop_queue_status;
+ fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
+ fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len;
+ fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len;
+ fabric->tf_ops.is_state_remove = &tcm_loop_is_state_remove;
+ fabric->tf_ops.pack_lun = &tcm_loop_pack_lun;
+
+ tf_cg = &fabric->tf_group;
+ /*
+ * Setup function pointers for generic logic in target_core_fabric_configfs.c
+ */
+ fabric->tf_ops.fabric_make_wwn = &tcm_loop_make_scsi_hba;
+ fabric->tf_ops.fabric_drop_wwn = &tcm_loop_drop_scsi_hba;
+ fabric->tf_ops.fabric_make_tpg = &tcm_loop_make_naa_tpg;
+ fabric->tf_ops.fabric_drop_tpg = &tcm_loop_drop_naa_tpg;
+ /*
+ * fabric_post_link() and fabric_pre_unlink() are used for
+ * registration and release of TCM Loop Virtual SCSI LUNs.
+ */
+ fabric->tf_ops.fabric_post_link = &tcm_loop_port_link;
+ fabric->tf_ops.fabric_pre_unlink = &tcm_loop_port_unlink;
+ fabric->tf_ops.fabric_make_np = NULL;
+ fabric->tf_ops.fabric_drop_np = NULL;
+ /*
+ * Setup default attribute lists for various fabric->tf_cit_tmpl
+ */
+ TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+ /*
+ * Once fabric->tf_ops has been setup, now register the fabric for
+ * use within TCM
+ */
+ ret = target_fabric_configfs_register(fabric);
+ if (ret < 0) {
+ printk(KERN_ERR "target_fabric_configfs_register() for"
+ " TCM_Loop failed!\n");
+ target_fabric_configfs_free(fabric);
+ return -1;
+ }
+ /*
+ * Setup our local pointer to *fabric.
+ */
+ tcm_loop_fabric_configfs = fabric;
+ printk(KERN_INFO "TCM_LOOP[0] - Set fabric ->"
+ " tcm_loop_fabric_configfs\n");
+ return 0;
+}
+
+static void tcm_loop_deregister_configfs(void)
+{
+ if (!tcm_loop_fabric_configfs)
+ return;
+
+ target_fabric_configfs_deregister(tcm_loop_fabric_configfs);
+ tcm_loop_fabric_configfs = NULL;
+ printk(KERN_INFO "TCM_LOOP[0] - Cleared"
+ " tcm_loop_fabric_configfs\n");
+}
+
+static int __init tcm_loop_fabric_init(void)
+{
+ int ret;
+
+ tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
+ sizeof(struct tcm_loop_cmd),
+ __alignof__(struct tcm_loop_cmd),
+ 0, NULL);
+ if (!tcm_loop_cmd_cache) {
+ printk(KERN_ERR "kmem_cache_create() for"
+ " tcm_loop_cmd_cache failed\n");
+ return -ENOMEM;
+ }
+
+ ret = tcm_loop_alloc_core_bus();
+ if (ret)
+ return ret;
+
+ ret = tcm_loop_register_configfs();
+ if (ret) {
+ tcm_loop_release_core_bus();
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit tcm_loop_fabric_exit(void)
+{
+ tcm_loop_deregister_configfs();
+ tcm_loop_release_core_bus();
+ kmem_cache_destroy(tcm_loop_cmd_cache);
+}
+
+MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
+MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
+MODULE_LICENSE("GPL");
+module_init(tcm_loop_fabric_init);
+module_exit(tcm_loop_fabric_exit);
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
new file mode 100644
index 000000000000..7e9f7ab45548
--- /dev/null
+++ b/drivers/target/loopback/tcm_loop.h
@@ -0,0 +1,77 @@
+#define TCM_LOOP_VERSION "v2.1-rc1"
+#define TL_WWN_ADDR_LEN 256
+#define TL_TPGS_PER_HBA 32
+/*
+ * Defaults for struct scsi_host_template tcm_loop_driver_template
+ *
+ * We use large can_queue and cmd_per_lun here and let TCM enforce
+ * the underlying se_device_t->queue_depth.
+ */
+#define TL_SCSI_CAN_QUEUE 1024
+#define TL_SCSI_CMD_PER_LUN 1024
+#define TL_SCSI_MAX_SECTORS 1024
+#define TL_SCSI_SG_TABLESIZE 256
+/*
+ * Used in tcm_loop_driver_probe() for struct Scsi_Host->max_cmd_len
+ */
+#define TL_SCSI_MAX_CMD_LEN 32
+
+#ifdef CONFIG_LOOPBACK_TARGET_CDB_DEBUG
+# define TL_CDB_DEBUG(x...) printk(KERN_INFO x)
+#else
+# define TL_CDB_DEBUG(x...)
+#endif
+
+struct tcm_loop_cmd {
+ /* State of Linux/SCSI CDB+Data descriptor */
+ u32 sc_cmd_state;
+ /* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */
+ struct scsi_cmnd *sc;
+ struct list_head *tl_cmd_list;
+ /* The TCM I/O descriptor that is accessed via container_of() */
+ struct se_cmd tl_se_cmd;
+ /* Sense buffer that will be mapped into outgoing status */
+ unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER];
+};
+
+struct tcm_loop_tmr {
+ atomic_t tmr_complete;
+ wait_queue_head_t tl_tmr_wait;
+};
+
+struct tcm_loop_nexus {
+ int it_nexus_active;
+ /*
+ * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h
+ */
+ struct scsi_host *sh;
+ /*
+ * Pointer to TCM session for I_T Nexus
+ */
+ struct se_session *se_sess;
+};
+
+struct tcm_loop_nacl {
+ struct se_node_acl se_node_acl;
+};
+
+struct tcm_loop_tpg {
+ unsigned short tl_tpgt;
+ atomic_t tl_tpg_port_count;
+ struct se_portal_group tl_se_tpg;
+ struct tcm_loop_hba *tl_hba;
+};
+
+struct tcm_loop_hba {
+ u8 tl_proto_id;
+ unsigned char tl_wwn_address[TL_WWN_ADDR_LEN];
+ struct se_hba_s *se_hba;
+ struct se_lun *tl_hba_lun;
+ struct se_port *tl_hba_lun_sep;
+ struct se_device_s *se_dev_hba_ptr;
+ struct tcm_loop_nexus *tl_nexus;
+ struct device dev;
+ struct Scsi_Host *sh;
+ struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA];
+ struct se_wwn tl_hba_wwn;
+};
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index caf8dc18ee0a..a5f44a6e6e1d 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -3,8 +3,8 @@
*
* This file contains ConfigFS logic for the Generic Target Engine project.
*
- * Copyright (c) 2008-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * Copyright (c) 2008-2011 Rising Tide Systems
+ * Copyright (c) 2008-2011 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -50,6 +50,7 @@
#include "target_core_hba.h"
#include "target_core_pr.h"
#include "target_core_rd.h"
+#include "target_core_stat.h"
static struct list_head g_tf_list;
static struct mutex g_tf_lock;
@@ -1451,8 +1452,8 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
size_t count)
{
struct se_device *dev;
- unsigned char *i_fabric, *t_fabric, *i_port = NULL, *t_port = NULL;
- unsigned char *isid = NULL;
+ unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
+ unsigned char *t_fabric = NULL, *t_port = NULL;
char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
unsigned long long tmp_ll;
@@ -1488,9 +1489,17 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
switch (token) {
case Opt_initiator_fabric:
i_fabric = match_strdup(&args[0]);
+ if (!i_fabric) {
+ ret = -ENOMEM;
+ goto out;
+ }
break;
case Opt_initiator_node:
i_port = match_strdup(&args[0]);
+ if (!i_port) {
+ ret = -ENOMEM;
+ goto out;
+ }
if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) {
printk(KERN_ERR "APTPL metadata initiator_node="
" exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
@@ -1501,6 +1510,10 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
break;
case Opt_initiator_sid:
isid = match_strdup(&args[0]);
+ if (!isid) {
+ ret = -ENOMEM;
+ goto out;
+ }
if (strlen(isid) > PR_REG_ISID_LEN) {
printk(KERN_ERR "APTPL metadata initiator_isid"
"= exceeds PR_REG_ISID_LEN: %d\n",
@@ -1511,6 +1524,10 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
break;
case Opt_sa_res_key:
arg_p = match_strdup(&args[0]);
+ if (!arg_p) {
+ ret = -ENOMEM;
+ goto out;
+ }
ret = strict_strtoull(arg_p, 0, &tmp_ll);
if (ret < 0) {
printk(KERN_ERR "strict_strtoull() failed for"
@@ -1547,9 +1564,17 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
*/
case Opt_target_fabric:
t_fabric = match_strdup(&args[0]);
+ if (!t_fabric) {
+ ret = -ENOMEM;
+ goto out;
+ }
break;
case Opt_target_node:
t_port = match_strdup(&args[0]);
+ if (!t_port) {
+ ret = -ENOMEM;
+ goto out;
+ }
if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) {
printk(KERN_ERR "APTPL metadata target_node="
" exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
@@ -1592,6 +1617,11 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
i_port, isid, mapped_lun, t_port, tpgt, target_lun,
res_holder, all_tg_pt, type);
out:
+ kfree(i_fabric);
+ kfree(i_port);
+ kfree(isid);
+ kfree(t_fabric);
+ kfree(t_port);
kfree(orig);
return (ret == 0) ? count : ret;
}
@@ -1798,7 +1828,9 @@ static ssize_t target_core_store_dev_enable(
return -EINVAL;
dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
- if (!(dev) || IS_ERR(dev))
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+ else if (!dev)
return -EINVAL;
se_dev->se_dev_ptr = dev;
@@ -2678,6 +2710,34 @@ static struct config_item_type target_core_alua_cit = {
/* End functions for struct config_item_type target_core_alua_cit */
+/* Start functions for struct config_item_type target_core_stat_cit */
+
+static struct config_group *target_core_stat_mkdir(
+ struct config_group *group,
+ const char *name)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static void target_core_stat_rmdir(
+ struct config_group *group,
+ struct config_item *item)
+{
+ return;
+}
+
+static struct configfs_group_operations target_core_stat_group_ops = {
+ .make_group = &target_core_stat_mkdir,
+ .drop_item = &target_core_stat_rmdir,
+};
+
+static struct config_item_type target_core_stat_cit = {
+ .ct_group_ops = &target_core_stat_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_stat_cit */
+
/* Start functions for struct config_item_type target_core_hba_cit */
static struct config_group *target_core_make_subdev(
@@ -2690,10 +2750,12 @@ static struct config_group *target_core_make_subdev(
struct config_item *hba_ci = &group->cg_item;
struct se_hba *hba = item_to_hba(hba_ci);
struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
+ struct config_group *dev_stat_grp = NULL;
+ int errno = -ENOMEM, ret;
- if (mutex_lock_interruptible(&hba->hba_access_mutex))
- return NULL;
-
+ ret = mutex_lock_interruptible(&hba->hba_access_mutex);
+ if (ret)
+ return ERR_PTR(ret);
/*
* Locate the struct se_subsystem_api from parent's struct se_hba.
*/
@@ -2723,7 +2785,7 @@ static struct config_group *target_core_make_subdev(
se_dev->se_dev_hba = hba;
dev_cg = &se_dev->se_dev_group;
- dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 6,
+ dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7,
GFP_KERNEL);
if (!(dev_cg->default_groups))
goto out;
@@ -2755,13 +2817,17 @@ static struct config_group *target_core_make_subdev(
&target_core_dev_wwn_cit);
config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group,
"alua", &target_core_alua_tg_pt_gps_cit);
+ config_group_init_type_name(&se_dev->dev_stat_grps.stat_group,
+ "statistics", &target_core_stat_cit);
+
dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group;
dev_cg->default_groups[1] = &se_dev->se_dev_pr_group;
dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group;
dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group;
- dev_cg->default_groups[4] = NULL;
+ dev_cg->default_groups[4] = &se_dev->dev_stat_grps.stat_group;
+ dev_cg->default_groups[5] = NULL;
/*
- * Add core/$HBA/$DEV/alua/tg_pt_gps/default_tg_pt_gp
+ * Add core/$HBA/$DEV/alua/default_tg_pt_gp
*/
tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1);
if (!(tg_pt_gp))
@@ -2781,6 +2847,17 @@ static struct config_group *target_core_make_subdev(
tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
tg_pt_gp_cg->default_groups[1] = NULL;
T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp;
+ /*
+ * Add core/$HBA/$DEV/statistics/ default groups
+ */
+ dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group;
+ dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4,
+ GFP_KERNEL);
+ if (!dev_stat_grp->default_groups) {
+ printk(KERN_ERR "Unable to allocate dev_stat_grp->default_groups\n");
+ goto out;
+ }
+ target_stat_setup_dev_default_groups(se_dev);
printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
" %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
@@ -2792,6 +2869,8 @@ out:
core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
}
+ if (dev_stat_grp)
+ kfree(dev_stat_grp->default_groups);
if (tg_pt_gp_cg)
kfree(tg_pt_gp_cg->default_groups);
if (dev_cg)
@@ -2801,7 +2880,7 @@ out:
kfree(se_dev);
unlock:
mutex_unlock(&hba->hba_access_mutex);
- return NULL;
+ return ERR_PTR(errno);
}
static void target_core_drop_subdev(
@@ -2813,7 +2892,7 @@ static void target_core_drop_subdev(
struct se_hba *hba;
struct se_subsystem_api *t;
struct config_item *df_item;
- struct config_group *dev_cg, *tg_pt_gp_cg;
+ struct config_group *dev_cg, *tg_pt_gp_cg, *dev_stat_grp;
int i;
hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
@@ -2825,6 +2904,14 @@ static void target_core_drop_subdev(
list_del(&se_dev->g_se_dev_list);
spin_unlock(&se_global->g_device_lock);
+ dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group;
+ for (i = 0; dev_stat_grp->default_groups[i]; i++) {
+ df_item = &dev_stat_grp->default_groups[i]->cg_item;
+ dev_stat_grp->default_groups[i] = NULL;
+ config_item_put(df_item);
+ }
+ kfree(dev_stat_grp->default_groups);
+
tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
@@ -3044,7 +3131,7 @@ static struct config_item_type target_core_cit = {
/* Stop functions for struct config_item_type target_core_hba_cit */
-static int target_core_init_configfs(void)
+static int __init target_core_init_configfs(void)
{
struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
struct config_group *lu_gp_cg = NULL;
@@ -3176,7 +3263,7 @@ out_global:
return -1;
}
-static void target_core_exit_configfs(void)
+static void __exit target_core_exit_configfs(void)
{
struct configfs_subsystem *subsys;
struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 350ed401544e..3fb8e32506ed 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -589,6 +589,7 @@ static void core_export_port(
* Called with struct se_device->se_port_lock spinlock held.
*/
static void core_release_port(struct se_device *dev, struct se_port *port)
+ __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
{
/*
* Wait for any port reference for PR ALL_TG_PT=1 operation
@@ -779,49 +780,14 @@ void se_release_vpd_for_dev(struct se_device *dev)
return;
}
-/*
- * Called with struct se_hba->device_lock held.
- */
-void se_clear_dev_ports(struct se_device *dev)
-{
- struct se_hba *hba = dev->se_hba;
- struct se_lun *lun;
- struct se_portal_group *tpg;
- struct se_port *sep, *sep_tmp;
-
- spin_lock(&dev->se_port_lock);
- list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
- spin_unlock(&dev->se_port_lock);
- spin_unlock(&hba->device_lock);
-
- lun = sep->sep_lun;
- tpg = sep->sep_tpg;
- spin_lock(&lun->lun_sep_lock);
- if (lun->lun_se_dev == NULL) {
- spin_unlock(&lun->lun_sep_lock);
- continue;
- }
- spin_unlock(&lun->lun_sep_lock);
-
- core_dev_del_lun(tpg, lun->unpacked_lun);
-
- spin_lock(&hba->device_lock);
- spin_lock(&dev->se_port_lock);
- }
- spin_unlock(&dev->se_port_lock);
-
- return;
-}
-
/* se_free_virtual_device():
*
* Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
*/
int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
{
- spin_lock(&hba->device_lock);
- se_clear_dev_ports(dev);
- spin_unlock(&hba->device_lock);
+ if (!list_empty(&dev->dev_sep_list))
+ dump_stack();
core_alua_free_lu_gp_mem(dev);
se_release_device_for_hba(dev);
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index b65d1c8e7740..07ab5a3bb8e8 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -4,10 +4,10 @@
* This file contains generic fabric module configfs infrastructure for
* TCM v4.x code
*
- * Copyright (c) 2010 Rising Tide Systems
- * Copyright (c) 2010 Linux-iSCSI.org
+ * Copyright (c) 2010,2011 Rising Tide Systems
+ * Copyright (c) 2010,2011 Linux-iSCSI.org
*
- * Copyright (c) 2010 Nicholas A. Bellinger <nab@linux-iscsi.org>
+ * Copyright (c) Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -48,6 +48,7 @@
#include "target_core_alua.h"
#include "target_core_hba.h"
#include "target_core_pr.h"
+#include "target_core_stat.h"
#define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
@@ -241,6 +242,32 @@ TF_CIT_SETUP(tpg_mappedlun, &target_fabric_mappedlun_item_ops, NULL,
/* End of tfc_tpg_mappedlun_cit */
+/* Start of tfc_tpg_mappedlun_port_cit */
+
+static struct config_group *target_core_mappedlun_stat_mkdir(
+ struct config_group *group,
+ const char *name)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static void target_core_mappedlun_stat_rmdir(
+ struct config_group *group,
+ struct config_item *item)
+{
+ return;
+}
+
+static struct configfs_group_operations target_fabric_mappedlun_stat_group_ops = {
+ .make_group = target_core_mappedlun_stat_mkdir,
+ .drop_item = target_core_mappedlun_stat_rmdir,
+};
+
+TF_CIT_SETUP(tpg_mappedlun_stat, NULL, &target_fabric_mappedlun_stat_group_ops,
+ NULL);
+
+/* End of tfc_tpg_mappedlun_port_cit */
+
/* Start of tfc_tpg_nacl_attrib_cit */
CONFIGFS_EATTR_OPS(target_fabric_nacl_attrib, se_node_acl, acl_attrib_group);
@@ -294,6 +321,7 @@ static struct config_group *target_fabric_make_mappedlun(
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_lun_acl *lacl;
struct config_item *acl_ci;
+ struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
char *buf;
unsigned long mapped_lun;
int ret = 0;
@@ -330,15 +358,42 @@ static struct config_group *target_fabric_make_mappedlun(
lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
config_item_name(acl_ci), &ret);
- if (!(lacl))
+ if (!(lacl)) {
+ ret = -EINVAL;
goto out;
+ }
+
+ lacl_cg = &lacl->se_lun_group;
+ lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ GFP_KERNEL);
+ if (!lacl_cg->default_groups) {
+ printk(KERN_ERR "Unable to allocate lacl_cg->default_groups\n");
+ ret = -ENOMEM;
+ goto out;
+ }
config_group_init_type_name(&lacl->se_lun_group, name,
&TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit);
+ config_group_init_type_name(&lacl->ml_stat_grps.stat_group,
+ "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_stat_cit);
+ lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group;
+ lacl_cg->default_groups[1] = NULL;
+
+ ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group;
+ ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
+ GFP_KERNEL);
+ if (!ml_stat_grp->default_groups) {
+ printk(KERN_ERR "Unable to allocate ml_stat_grp->default_groups\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ target_stat_setup_mappedlun_default_groups(lacl);
kfree(buf);
return &lacl->se_lun_group;
out:
+ if (lacl_cg)
+ kfree(lacl_cg->default_groups);
kfree(buf);
return ERR_PTR(ret);
}
@@ -347,6 +402,28 @@ static void target_fabric_drop_mappedlun(
struct config_group *group,
struct config_item *item)
{
+ struct se_lun_acl *lacl = container_of(to_config_group(item),
+ struct se_lun_acl, se_lun_group);
+ struct config_item *df_item;
+ struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
+ int i;
+
+ ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group;
+ for (i = 0; ml_stat_grp->default_groups[i]; i++) {
+ df_item = &ml_stat_grp->default_groups[i]->cg_item;
+ ml_stat_grp->default_groups[i] = NULL;
+ config_item_put(df_item);
+ }
+ kfree(ml_stat_grp->default_groups);
+
+ lacl_cg = &lacl->se_lun_group;
+ for (i = 0; lacl_cg->default_groups[i]; i++) {
+ df_item = &lacl_cg->default_groups[i]->cg_item;
+ lacl_cg->default_groups[i] = NULL;
+ config_item_put(df_item);
+ }
+ kfree(lacl_cg->default_groups);
+
config_item_put(item);
}
@@ -376,6 +453,15 @@ TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops,
/* End of tfc_tpg_nacl_base_cit */
+/* Start of tfc_node_fabric_stats_cit */
+/*
+ * This is used as a placeholder for struct se_node_acl->acl_fabric_stat_group
+ * to allow fabrics access to ->acl_fabric_stat_group->default_groups[]
+ */
+TF_CIT_SETUP(tpg_nacl_stat, NULL, NULL, NULL);
+
+/* End of tfc_wwn_fabric_stats_cit */
+
/* Start of tfc_tpg_nacl_cit */
static struct config_group *target_fabric_make_nodeacl(
@@ -402,7 +488,8 @@ static struct config_group *target_fabric_make_nodeacl(
nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group;
nacl_cg->default_groups[1] = &se_nacl->acl_auth_group;
nacl_cg->default_groups[2] = &se_nacl->acl_param_group;
- nacl_cg->default_groups[3] = NULL;
+ nacl_cg->default_groups[3] = &se_nacl->acl_fabric_stat_group;
+ nacl_cg->default_groups[4] = NULL;
config_group_init_type_name(&se_nacl->acl_group, name,
&TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit);
@@ -412,6 +499,9 @@ static struct config_group *target_fabric_make_nodeacl(
&TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit);
config_group_init_type_name(&se_nacl->acl_param_group, "param",
&TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit);
+ config_group_init_type_name(&se_nacl->acl_fabric_stat_group,
+ "fabric_statistics",
+ &TF_CIT_TMPL(tf)->tfc_tpg_nacl_stat_cit);
return &se_nacl->acl_group;
}
@@ -758,6 +848,31 @@ TF_CIT_SETUP(tpg_port, &target_fabric_port_item_ops, NULL, target_fabric_port_at
/* End of tfc_tpg_port_cit */
+/* Start of tfc_tpg_port_stat_cit */
+
+static struct config_group *target_core_port_stat_mkdir(
+ struct config_group *group,
+ const char *name)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static void target_core_port_stat_rmdir(
+ struct config_group *group,
+ struct config_item *item)
+{
+ return;
+}
+
+static struct configfs_group_operations target_fabric_port_stat_group_ops = {
+ .make_group = target_core_port_stat_mkdir,
+ .drop_item = target_core_port_stat_rmdir,
+};
+
+TF_CIT_SETUP(tpg_port_stat, NULL, &target_fabric_port_stat_group_ops, NULL);
+
+/* End of tfc_tpg_port_stat_cit */
+
/* Start of tfc_tpg_lun_cit */
static struct config_group *target_fabric_make_lun(
@@ -768,7 +883,9 @@ static struct config_group *target_fabric_make_lun(
struct se_portal_group *se_tpg = container_of(group,
struct se_portal_group, tpg_lun_group);
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+ struct config_group *lun_cg = NULL, *port_stat_grp = NULL;
unsigned long unpacked_lun;
+ int errno;
if (strstr(name, "lun_") != name) {
printk(KERN_ERR "Unable to locate \'_\" in"
@@ -782,16 +899,64 @@ static struct config_group *target_fabric_make_lun(
if (!(lun))
return ERR_PTR(-EINVAL);
+ lun_cg = &lun->lun_group;
+ lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ GFP_KERNEL);
+ if (!lun_cg->default_groups) {
+ printk(KERN_ERR "Unable to allocate lun_cg->default_groups\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
config_group_init_type_name(&lun->lun_group, name,
&TF_CIT_TMPL(tf)->tfc_tpg_port_cit);
+ config_group_init_type_name(&lun->port_stat_grps.stat_group,
+ "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_port_stat_cit);
+ lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group;
+ lun_cg->default_groups[1] = NULL;
+
+ port_stat_grp = &PORT_STAT_GRP(lun)->stat_group;
+ port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
+ GFP_KERNEL);
+ if (!port_stat_grp->default_groups) {
+ printk(KERN_ERR "Unable to allocate port_stat_grp->default_groups\n");
+ errno = -ENOMEM;
+ goto out;
+ }
+ target_stat_setup_port_default_groups(lun);
return &lun->lun_group;
+out:
+ if (lun_cg)
+ kfree(lun_cg->default_groups);
+ return ERR_PTR(errno);
}
static void target_fabric_drop_lun(
struct config_group *group,
struct config_item *item)
{
+ struct se_lun *lun = container_of(to_config_group(item),
+ struct se_lun, lun_group);
+ struct config_item *df_item;
+ struct config_group *lun_cg, *port_stat_grp;
+ int i;
+
+ port_stat_grp = &PORT_STAT_GRP(lun)->stat_group;
+ for (i = 0; port_stat_grp->default_groups[i]; i++) {
+ df_item = &port_stat_grp->default_groups[i]->cg_item;
+ port_stat_grp->default_groups[i] = NULL;
+ config_item_put(df_item);
+ }
+ kfree(port_stat_grp->default_groups);
+
+ lun_cg = &lun->lun_group;
+ for (i = 0; lun_cg->default_groups[i]; i++) {
+ df_item = &lun_cg->default_groups[i]->cg_item;
+ lun_cg->default_groups[i] = NULL;
+ config_item_put(df_item);
+ }
+ kfree(lun_cg->default_groups);
+
config_item_put(item);
}
@@ -946,6 +1111,15 @@ TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops,
/* End of tfc_tpg_cit */
+/* Start of tfc_wwn_fabric_stats_cit */
+/*
+ * This is used as a placeholder for struct se_wwn->fabric_stat_group
+ * to allow fabrics access to ->fabric_stat_group->default_groups[]
+ */
+TF_CIT_SETUP(wwn_fabric_stats, NULL, NULL, NULL);
+
+/* End of tfc_wwn_fabric_stats_cit */
+
/* Start of tfc_wwn_cit */
static struct config_group *target_fabric_make_wwn(
@@ -966,8 +1140,17 @@ static struct config_group *target_fabric_make_wwn(
return ERR_PTR(-EINVAL);
wwn->wwn_tf = tf;
+ /*
+ * Setup default groups from pre-allocated wwn->wwn_default_groups
+ */
+ wwn->wwn_group.default_groups = wwn->wwn_default_groups;
+ wwn->wwn_group.default_groups[0] = &wwn->fabric_stat_group;
+ wwn->wwn_group.default_groups[1] = NULL;
+
config_group_init_type_name(&wwn->wwn_group, name,
&TF_CIT_TMPL(tf)->tfc_tpg_cit);
+ config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics",
+ &TF_CIT_TMPL(tf)->tfc_wwn_fabric_stats_cit);
return &wwn->wwn_group;
}
@@ -976,6 +1159,18 @@ static void target_fabric_drop_wwn(
struct config_group *group,
struct config_item *item)
{
+ struct se_wwn *wwn = container_of(to_config_group(item),
+ struct se_wwn, wwn_group);
+ struct config_item *df_item;
+ struct config_group *cg = &wwn->wwn_group;
+ int i;
+
+ for (i = 0; cg->default_groups[i]; i++) {
+ df_item = &cg->default_groups[i]->cg_item;
+ cg->default_groups[i] = NULL;
+ config_item_put(df_item);
+ }
+
config_item_put(item);
}
@@ -1015,9 +1210,11 @@ int target_fabric_setup_cits(struct target_fabric_configfs *tf)
{
target_fabric_setup_discovery_cit(tf);
target_fabric_setup_wwn_cit(tf);
+ target_fabric_setup_wwn_fabric_stats_cit(tf);
target_fabric_setup_tpg_cit(tf);
target_fabric_setup_tpg_base_cit(tf);
target_fabric_setup_tpg_port_cit(tf);
+ target_fabric_setup_tpg_port_stat_cit(tf);
target_fabric_setup_tpg_lun_cit(tf);
target_fabric_setup_tpg_np_cit(tf);
target_fabric_setup_tpg_np_base_cit(tf);
@@ -1028,7 +1225,9 @@ int target_fabric_setup_cits(struct target_fabric_configfs *tf)
target_fabric_setup_tpg_nacl_attrib_cit(tf);
target_fabric_setup_tpg_nacl_auth_cit(tf);
target_fabric_setup_tpg_nacl_param_cit(tf);
+ target_fabric_setup_tpg_nacl_stat_cit(tf);
target_fabric_setup_tpg_mappedlun_cit(tf);
+ target_fabric_setup_tpg_mappedlun_stat_cit(tf);
return 0;
}
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index a3c695adabec..d57ad672677f 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -34,6 +34,7 @@
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_transport.h>
+#include <target/target_core_fabric_lib.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_configfs.h>
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 190ca8ac2498..02f553aef43d 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -134,7 +134,7 @@ static struct se_device *fd_create_virtdevice(
mm_segment_t old_fs;
struct file *file;
struct inode *inode = NULL;
- int dev_flags = 0, flags;
+ int dev_flags = 0, flags, ret = -EINVAL;
memset(&dev_limits, 0, sizeof(struct se_dev_limits));
@@ -146,6 +146,7 @@ static struct se_device *fd_create_virtdevice(
if (IS_ERR(dev_p)) {
printk(KERN_ERR "getname(%s) failed: %lu\n",
fd_dev->fd_dev_name, IS_ERR(dev_p));
+ ret = PTR_ERR(dev_p);
goto fail;
}
#if 0
@@ -165,8 +166,12 @@ static struct se_device *fd_create_virtdevice(
flags |= O_SYNC;
file = filp_open(dev_p, flags, 0600);
-
- if (IS_ERR(file) || !file || !file->f_dentry) {
+ if (IS_ERR(file)) {
+ printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
+ ret = PTR_ERR(file);
+ goto fail;
+ }
+ if (!file || !file->f_dentry) {
printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
goto fail;
}
@@ -241,7 +246,7 @@ fail:
fd_dev->fd_file = NULL;
}
putname(dev_p);
- return NULL;
+ return ERR_PTR(ret);
}
/* fd_free_device(): (Part of se_subsystem_api_t template)
@@ -509,7 +514,7 @@ enum {
static match_table_t tokens = {
{Opt_fd_dev_name, "fd_dev_name=%s"},
{Opt_fd_dev_size, "fd_dev_size=%s"},
- {Opt_fd_buffered_io, "fd_buffered_id=%d"},
+ {Opt_fd_buffered_io, "fd_buffered_io=%d"},
{Opt_err, NULL}
};
@@ -536,15 +541,26 @@ static ssize_t fd_set_configfs_dev_params(
token = match_token(ptr, tokens, args);
switch (token) {
case Opt_fd_dev_name:
+ arg_p = match_strdup(&args[0]);
+ if (!arg_p) {
+ ret = -ENOMEM;
+ break;
+ }
snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
- "%s", match_strdup(&args[0]));
+ "%s", arg_p);
+ kfree(arg_p);
printk(KERN_INFO "FILEIO: Referencing Path: %s\n",
fd_dev->fd_dev_name);
fd_dev->fbd_flags |= FBDF_HAS_PATH;
break;
case Opt_fd_dev_size:
arg_p = match_strdup(&args[0]);
+ if (!arg_p) {
+ ret = -ENOMEM;
+ break;
+ }
ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
+ kfree(arg_p);
if (ret < 0) {
printk(KERN_ERR "strict_strtoull() failed for"
" fd_dev_size=\n");
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index 6ec51cbc018e..0b8f8da89019 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -151,19 +151,8 @@ out_free_hba:
int
core_delete_hba(struct se_hba *hba)
{
- struct se_device *dev, *dev_tmp;
-
- spin_lock(&hba->device_lock);
- list_for_each_entry_safe(dev, dev_tmp, &hba->hba_dev_list, dev_list) {
-
- se_clear_dev_ports(dev);
- spin_unlock(&hba->device_lock);
-
- se_release_device_for_hba(dev);
-
- spin_lock(&hba->device_lock);
- }
- spin_unlock(&hba->device_lock);
+ if (!list_empty(&hba->hba_dev_list))
+ dump_stack();
hba->transport->detach_hba(hba);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index eb0afec046e1..86639004af9e 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -129,10 +129,11 @@ static struct se_device *iblock_create_virtdevice(
struct request_queue *q;
struct queue_limits *limits;
u32 dev_flags = 0;
+ int ret = -EINVAL;
if (!(ib_dev)) {
printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n");
- return 0;
+ return ERR_PTR(ret);
}
memset(&dev_limits, 0, sizeof(struct se_dev_limits));
/*
@@ -141,7 +142,7 @@ static struct se_device *iblock_create_virtdevice(
ib_dev->ibd_bio_set = bioset_create(32, 64);
if (!(ib_dev->ibd_bio_set)) {
printk(KERN_ERR "IBLOCK: Unable to create bioset()\n");
- return 0;
+ return ERR_PTR(-ENOMEM);
}
printk(KERN_INFO "IBLOCK: Created bio_set()\n");
/*
@@ -153,8 +154,10 @@ static struct se_device *iblock_create_virtdevice(
bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
- if (IS_ERR(bd))
+ if (IS_ERR(bd)) {
+ ret = PTR_ERR(bd);
goto failed;
+ }
/*
* Setup the local scope queue_limits from struct request_queue->limits
* to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
@@ -184,9 +187,7 @@ static struct se_device *iblock_create_virtdevice(
* the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
* in ATA and we need to set TPE=1
*/
- if (blk_queue_discard(bdev_get_queue(bd))) {
- struct request_queue *q = bdev_get_queue(bd);
-
+ if (blk_queue_discard(q)) {
DEV_ATTRIB(dev)->max_unmap_lba_count =
q->limits.max_discard_sectors;
/*
@@ -212,7 +213,7 @@ failed:
ib_dev->ibd_bd = NULL;
ib_dev->ibd_major = 0;
ib_dev->ibd_minor = 0;
- return NULL;
+ return ERR_PTR(ret);
}
static void iblock_free_device(void *p)
@@ -467,7 +468,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
const char *page, ssize_t count)
{
struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
- char *orig, *ptr, *opts;
+ char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token;
@@ -490,9 +491,14 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
ret = -EEXIST;
goto out;
}
-
- ret = snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
- "%s", match_strdup(&args[0]));
+ arg_p = match_strdup(&args[0]);
+ if (!arg_p) {
+ ret = -ENOMEM;
+ break;
+ }
+ snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
+ "%s", arg_p);
+ kfree(arg_p);
printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n",
ib_dev->ibd_udev_path);
ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 5a9d2ba4b609..7ff6a35f26ac 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -441,6 +441,7 @@ static struct se_device *pscsi_create_type_disk(
struct pscsi_dev_virt *pdv,
struct se_subsystem_dev *se_dev,
struct se_hba *hba)
+ __releases(sh->host_lock)
{
struct se_device *dev;
struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
@@ -488,6 +489,7 @@ static struct se_device *pscsi_create_type_rom(
struct pscsi_dev_virt *pdv,
struct se_subsystem_dev *se_dev,
struct se_hba *hba)
+ __releases(sh->host_lock)
{
struct se_device *dev;
struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
@@ -522,6 +524,7 @@ static struct se_device *pscsi_create_type_other(
struct pscsi_dev_virt *pdv,
struct se_subsystem_dev *se_dev,
struct se_hba *hba)
+ __releases(sh->host_lock)
{
struct se_device *dev;
struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
@@ -555,7 +558,7 @@ static struct se_device *pscsi_create_virtdevice(
if (!(pdv)) {
printk(KERN_ERR "Unable to locate struct pscsi_dev_virt"
" parameter\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
/*
* If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
@@ -565,7 +568,7 @@ static struct se_device *pscsi_create_virtdevice(
if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
printk(KERN_ERR "pSCSI: Unable to locate struct"
" Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
- return NULL;
+ return ERR_PTR(-ENODEV);
}
/*
* For the newer PHV_VIRUTAL_HOST_ID struct scsi_device
@@ -574,7 +577,7 @@ static struct se_device *pscsi_create_virtdevice(
if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) {
printk(KERN_ERR "pSCSI: udev_path attribute has not"
" been set before ENABLE=1\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
/*
* If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID,
@@ -587,12 +590,12 @@ static struct se_device *pscsi_create_virtdevice(
printk(KERN_ERR "pSCSI: Unable to set hba_mode"
" with active devices\n");
spin_unlock(&hba->device_lock);
- return NULL;
+ return ERR_PTR(-EEXIST);
}
spin_unlock(&hba->device_lock);
if (pscsi_pmode_enable_hba(hba, 1) != 1)
- return NULL;
+ return ERR_PTR(-ENODEV);
legacy_mode_enable = 1;
hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
@@ -602,14 +605,14 @@ static struct se_device *pscsi_create_virtdevice(
if (!(sh)) {
printk(KERN_ERR "pSCSI: Unable to locate"
" pdv_host_id: %d\n", pdv->pdv_host_id);
- return NULL;
+ return ERR_PTR(-ENODEV);
}
}
} else {
if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) {
printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while"
" struct Scsi_Host exists\n");
- return NULL;
+ return ERR_PTR(-EEXIST);
}
}
@@ -644,7 +647,7 @@ static struct se_device *pscsi_create_virtdevice(
hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
}
pdv->pdv_sd = NULL;
- return NULL;
+ return ERR_PTR(-ENODEV);
}
return dev;
}
@@ -660,7 +663,7 @@ static struct se_device *pscsi_create_virtdevice(
hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
}
- return NULL;
+ return ERR_PTR(-ENODEV);
}
/* pscsi_free_device(): (Part of se_subsystem_api_t template)
@@ -816,6 +819,7 @@ pscsi_alloc_task(struct se_cmd *cmd)
if (!(pt->pscsi_cdb)) {
printk(KERN_ERR "pSCSI: Unable to allocate extended"
" pt->pscsi_cdb\n");
+ kfree(pt);
return NULL;
}
} else
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 8dc6d74c1d40..7837dd365a9d 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -150,7 +150,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
if (rd_dev->rd_page_count <= 0) {
printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n",
rd_dev->rd_page_count);
- return -1;
+ return -EINVAL;
}
total_sg_needed = rd_dev->rd_page_count;
@@ -160,7 +160,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
if (!(sg_table)) {
printk(KERN_ERR "Unable to allocate memory for Ramdisk"
" scatterlist tables\n");
- return -1;
+ return -ENOMEM;
}
rd_dev->sg_table_array = sg_table;
@@ -175,7 +175,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
if (!(sg)) {
printk(KERN_ERR "Unable to allocate scatterlist array"
" for struct rd_dev\n");
- return -1;
+ return -ENOMEM;
}
sg_init_table((struct scatterlist *)&sg[0], sg_per_table);
@@ -191,7 +191,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
if (!(pg)) {
printk(KERN_ERR "Unable to allocate scatterlist"
" pages for struct rd_dev_sg_table\n");
- return -1;
+ return -ENOMEM;
}
sg_assign_page(&sg[j], pg);
sg[j].length = PAGE_SIZE;
@@ -253,12 +253,13 @@ static struct se_device *rd_create_virtdevice(
struct se_dev_limits dev_limits;
struct rd_dev *rd_dev = p;
struct rd_host *rd_host = hba->hba_ptr;
- int dev_flags = 0;
+ int dev_flags = 0, ret;
char prod[16], rev[4];
memset(&dev_limits, 0, sizeof(struct se_dev_limits));
- if (rd_build_device_space(rd_dev) < 0)
+ ret = rd_build_device_space(rd_dev);
+ if (ret < 0)
goto fail;
snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
@@ -292,7 +293,7 @@ static struct se_device *rd_create_virtdevice(
fail:
rd_release_device_space(rd_dev);
- return NULL;
+ return ERR_PTR(ret);
}
static struct se_device *rd_DIRECT_create_virtdevice(
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index 13badfbaf9c0..3ea19e29d8ec 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -14,8 +14,6 @@
#define RD_BLOCKSIZE 512
#define RD_MAX_SECTORS 1024
-extern struct kmem_cache *se_mem_cache;
-
/* Used in target_core_init_configfs() for virtual LUN 0 access */
int __init rd_module_init(void);
void rd_module_exit(void);
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
new file mode 100644
index 000000000000..5e3a067a7475
--- /dev/null
+++ b/drivers/target/target_core_stat.c
@@ -0,0 +1,1810 @@
+/*******************************************************************************
+ * Filename: target_core_stat.c
+ *
+ * Copyright (c) 2011 Rising Tide Systems
+ * Copyright (c) 2011 Linux-iSCSI.org
+ *
+ * Modern ConfigFS group context specific statistics based on original
+ * target_core_mib.c code
+ *
+ * Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved.
+ *
+ * Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/blkdev.h>
+#include <linux/configfs.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "target_core_hba.h"
+
+#ifndef INITIAL_JIFFIES
+#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
+#endif
+
+#define NONE "None"
+#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
+
+#define SCSI_LU_INDEX 1
+#define LU_COUNT 1
+
+/*
+ * SCSI Device Table
+ */
+
+CONFIGFS_EATTR_STRUCT(target_stat_scsi_dev, se_dev_stat_grps);
+#define DEV_STAT_SCSI_DEV_ATTR(_name, _mode) \
+static struct target_stat_scsi_dev_attribute \
+ target_stat_scsi_dev_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ target_stat_scsi_dev_show_attr_##_name, \
+ target_stat_scsi_dev_store_attr_##_name);
+
+#define DEV_STAT_SCSI_DEV_ATTR_RO(_name) \
+static struct target_stat_scsi_dev_attribute \
+ target_stat_scsi_dev_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ target_stat_scsi_dev_show_attr_##_name);
+
+static ssize_t target_stat_scsi_dev_show_attr_inst(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_hba *hba = se_subdev->se_dev_hba;
+ struct se_device *dev = se_subdev->se_dev_ptr;
+
+ if (!dev)
+ return -ENODEV;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
+}
+DEV_STAT_SCSI_DEV_ATTR_RO(inst);
+
+static ssize_t target_stat_scsi_dev_show_attr_indx(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+
+ if (!dev)
+ return -ENODEV;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+}
+DEV_STAT_SCSI_DEV_ATTR_RO(indx);
+
+static ssize_t target_stat_scsi_dev_show_attr_role(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+
+ if (!dev)
+ return -ENODEV;
+
+ return snprintf(page, PAGE_SIZE, "Target\n");
+}
+DEV_STAT_SCSI_DEV_ATTR_RO(role);
+
+static ssize_t target_stat_scsi_dev_show_attr_ports(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+
+ if (!dev)
+ return -ENODEV;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count);
+}
+DEV_STAT_SCSI_DEV_ATTR_RO(ports);
+
+CONFIGFS_EATTR_OPS(target_stat_scsi_dev, se_dev_stat_grps, scsi_dev_group);
+
+static struct configfs_attribute *target_stat_scsi_dev_attrs[] = {
+ &target_stat_scsi_dev_inst.attr,
+ &target_stat_scsi_dev_indx.attr,
+ &target_stat_scsi_dev_role.attr,
+ &target_stat_scsi_dev_ports.attr,
+ NULL,
+};
+
+static struct configfs_item_operations target_stat_scsi_dev_attrib_ops = {
+ .show_attribute = target_stat_scsi_dev_attr_show,
+ .store_attribute = target_stat_scsi_dev_attr_store,
+};
+
+static struct config_item_type target_stat_scsi_dev_cit = {
+ .ct_item_ops = &target_stat_scsi_dev_attrib_ops,
+ .ct_attrs = target_stat_scsi_dev_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/*
+ * SCSI Target Device Table
+ */
+
+CONFIGFS_EATTR_STRUCT(target_stat_scsi_tgt_dev, se_dev_stat_grps);
+#define DEV_STAT_SCSI_TGT_DEV_ATTR(_name, _mode) \
+static struct target_stat_scsi_tgt_dev_attribute \
+ target_stat_scsi_tgt_dev_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ target_stat_scsi_tgt_dev_show_attr_##_name, \
+ target_stat_scsi_tgt_dev_store_attr_##_name);
+
+#define DEV_STAT_SCSI_TGT_DEV_ATTR_RO(_name) \
+static struct target_stat_scsi_tgt_dev_attribute \
+ target_stat_scsi_tgt_dev_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ target_stat_scsi_tgt_dev_show_attr_##_name);
+
+static ssize_t target_stat_scsi_tgt_dev_show_attr_inst(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_hba *hba = se_subdev->se_dev_hba;
+ struct se_device *dev = se_subdev->se_dev_ptr;
+
+ if (!dev)
+ return -ENODEV;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
+}
+DEV_STAT_SCSI_TGT_DEV_ATTR_RO(inst);
+
+static ssize_t target_stat_scsi_tgt_dev_show_attr_indx(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+
+ if (!dev)
+ return -ENODEV;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+}
+DEV_STAT_SCSI_TGT_DEV_ATTR_RO(indx);
+
+static ssize_t target_stat_scsi_tgt_dev_show_attr_num_lus(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+
+ if (!dev)
+ return -ENODEV;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT);
+}
+DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus);
+
+static ssize_t target_stat_scsi_tgt_dev_show_attr_status(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+ char status[16];
+
+ if (!dev)
+ return -ENODEV;
+
+ switch (dev->dev_status) {
+ case TRANSPORT_DEVICE_ACTIVATED:
+ strcpy(status, "activated");
+ break;
+ case TRANSPORT_DEVICE_DEACTIVATED:
+ strcpy(status, "deactivated");
+ break;
+ case TRANSPORT_DEVICE_SHUTDOWN:
+ strcpy(status, "shutdown");
+ break;
+ case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
+ case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
+ strcpy(status, "offline");
+ break;
+ default:
+ sprintf(status, "unknown(%d)", dev->dev_status);
+ break;
+ }
+
+ return snprintf(page, PAGE_SIZE, "%s\n", status);
+}
+DEV_STAT_SCSI_TGT_DEV_ATTR_RO(status);
+
+static ssize_t target_stat_scsi_tgt_dev_show_attr_non_access_lus(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+ int non_accessible_lus;
+
+ if (!dev)
+ return -ENODEV;
+
+ switch (dev->dev_status) {
+ case TRANSPORT_DEVICE_ACTIVATED:
+ non_accessible_lus = 0;
+ break;
+ case TRANSPORT_DEVICE_DEACTIVATED:
+ case TRANSPORT_DEVICE_SHUTDOWN:
+ case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
+ case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
+ default:
+ non_accessible_lus = 1;
+ break;
+ }
+
+ return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus);
+}
+DEV_STAT_SCSI_TGT_DEV_ATTR_RO(non_access_lus);
+
+static ssize_t target_stat_scsi_tgt_dev_show_attr_resets(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+
+ if (!dev)
+ return -ENODEV;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
+}
+DEV_STAT_SCSI_TGT_DEV_ATTR_RO(resets);
+
+
+CONFIGFS_EATTR_OPS(target_stat_scsi_tgt_dev, se_dev_stat_grps, scsi_tgt_dev_group);
+
+static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[] = {
+ &target_stat_scsi_tgt_dev_inst.attr,
+ &target_stat_scsi_tgt_dev_indx.attr,
+ &target_stat_scsi_tgt_dev_num_lus.attr,
+ &target_stat_scsi_tgt_dev_status.attr,
+ &target_stat_scsi_tgt_dev_non_access_lus.attr,
+ &target_stat_scsi_tgt_dev_resets.attr,
+ NULL,
+};
+
+static struct configfs_item_operations target_stat_scsi_tgt_dev_attrib_ops = {
+ .show_attribute = target_stat_scsi_tgt_dev_attr_show,
+ .store_attribute = target_stat_scsi_tgt_dev_attr_store,
+};
+
+static struct config_item_type target_stat_scsi_tgt_dev_cit = {
+ .ct_item_ops = &target_stat_scsi_tgt_dev_attrib_ops,
+ .ct_attrs = target_stat_scsi_tgt_dev_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/*
+ * SCSI Logical Unit Table
+ */
+
+CONFIGFS_EATTR_STRUCT(target_stat_scsi_lu, se_dev_stat_grps);
+#define DEV_STAT_SCSI_LU_ATTR(_name, _mode) \
+static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ target_stat_scsi_lu_show_attr_##_name, \
+ target_stat_scsi_lu_store_attr_##_name);
+
+#define DEV_STAT_SCSI_LU_ATTR_RO(_name) \
+static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ target_stat_scsi_lu_show_attr_##_name);
+
+static ssize_t target_stat_scsi_lu_show_attr_inst(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_hba *hba = se_subdev->se_dev_hba;
+ struct se_device *dev = se_subdev->se_dev_ptr;
+
+ if (!dev)
+ return -ENODEV;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(inst);
+
+static ssize_t target_stat_scsi_lu_show_attr_dev(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+
+ if (!dev)
+ return -ENODEV;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(dev);
+
+static ssize_t target_stat_scsi_lu_show_attr_indx(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+
+ if (!dev)
+ return -ENODEV;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(indx);
+
+static ssize_t target_stat_scsi_lu_show_attr_lun(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+
+ if (!dev)
+ return -ENODEV;
+ /* FIXME: scsiLuDefaultLun */
+ return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(lun);
+
+static ssize_t target_stat_scsi_lu_show_attr_lu_name(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+
+ if (!dev)
+ return -ENODEV;
+ /* scsiLuWwnName */
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ (strlen(DEV_T10_WWN(dev)->unit_serial)) ?
+ (char *)&DEV_T10_WWN(dev)->unit_serial[0] : "None");
+}
+DEV_STAT_SCSI_LU_ATTR_RO(lu_name);
+
+static ssize_t target_stat_scsi_lu_show_attr_vend(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+ int j;
+ char str[28];
+
+ if (!dev)
+ return -ENODEV;
+ /* scsiLuVendorId */
+ memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
+ for (j = 0; j < 8; j++)
+ str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ?
+ DEV_T10_WWN(dev)->vendor[j] : 0x20;
+ str[8] = 0;
+ return snprintf(page, PAGE_SIZE, "%s\n", str);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(vend);
+
+static ssize_t target_stat_scsi_lu_show_attr_prod(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+ int j;
+ char str[28];
+
+ if (!dev)
+ return -ENODEV;
+
+ /* scsiLuProductId */
+ memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
+ for (j = 0; j < 16; j++)
+ str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ?
+ DEV_T10_WWN(dev)->model[j] : 0x20;
+ str[16] = 0;
+ return snprintf(page, PAGE_SIZE, "%s\n", str);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(prod);
+
+static ssize_t target_stat_scsi_lu_show_attr_rev(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+ int j;
+ char str[28];
+
+ if (!dev)
+ return -ENODEV;
+
+ /* scsiLuRevisionId */
+ memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
+ for (j = 0; j < 4; j++)
+ str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ?
+ DEV_T10_WWN(dev)->revision[j] : 0x20;
+ str[4] = 0;
+ return snprintf(page, PAGE_SIZE, "%s\n", str);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(rev);
+
+static ssize_t target_stat_scsi_lu_show_attr_dev_type(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+
+ if (!dev)
+ return -ENODEV;
+
+ /* scsiLuPeripheralType */
+ return snprintf(page, PAGE_SIZE, "%u\n",
+ TRANSPORT(dev)->get_device_type(dev));
+}
+DEV_STAT_SCSI_LU_ATTR_RO(dev_type);
+
+static ssize_t target_stat_scsi_lu_show_attr_status(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+
+ if (!dev)
+ return -ENODEV;
+
+ /* scsiLuStatus */
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ?
+ "available" : "notavailable");
+}
+DEV_STAT_SCSI_LU_ATTR_RO(status);
+
+static ssize_t target_stat_scsi_lu_show_attr_state_bit(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+
+ if (!dev)
+ return -ENODEV;
+
+ /* scsiLuState */
+ return snprintf(page, PAGE_SIZE, "exposed\n");
+}
+DEV_STAT_SCSI_LU_ATTR_RO(state_bit);
+
+static ssize_t target_stat_scsi_lu_show_attr_num_cmds(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+
+ if (!dev)
+ return -ENODEV;
+
+ /* scsiLuNumCommands */
+ return snprintf(page, PAGE_SIZE, "%llu\n",
+ (unsigned long long)dev->num_cmds);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(num_cmds);
+
+static ssize_t target_stat_scsi_lu_show_attr_read_mbytes(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+
+ if (!dev)
+ return -ENODEV;
+
+ /* scsiLuReadMegaBytes */
+ return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20));
+}
+DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes);
+
+static ssize_t target_stat_scsi_lu_show_attr_write_mbytes(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+
+ if (!dev)
+ return -ENODEV;
+
+ /* scsiLuWrittenMegaBytes */
+ return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20));
+}
+DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes);
+
+static ssize_t target_stat_scsi_lu_show_attr_resets(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+
+ if (!dev)
+ return -ENODEV;
+
+ /* scsiLuInResets */
+ return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(resets);
+
+static ssize_t target_stat_scsi_lu_show_attr_full_stat(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+
+ if (!dev)
+ return -ENODEV;
+
+ /* FIXME: scsiLuOutTaskSetFullStatus */
+ return snprintf(page, PAGE_SIZE, "%u\n", 0);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(full_stat);
+
+static ssize_t target_stat_scsi_lu_show_attr_hs_num_cmds(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+
+ if (!dev)
+ return -ENODEV;
+
+ /* FIXME: scsiLuHSInCommands */
+ return snprintf(page, PAGE_SIZE, "%u\n", 0);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(hs_num_cmds);
+
+static ssize_t target_stat_scsi_lu_show_attr_creation_time(
+ struct se_dev_stat_grps *sgrps, char *page)
+{
+ struct se_subsystem_dev *se_subdev = container_of(sgrps,
+ struct se_subsystem_dev, dev_stat_grps);
+ struct se_device *dev = se_subdev->se_dev_ptr;
+
+ if (!dev)
+ return -ENODEV;
+
+ /* scsiLuCreationTime */
+ return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time -
+ INITIAL_JIFFIES) * 100 / HZ));
+}
+DEV_STAT_SCSI_LU_ATTR_RO(creation_time);
+
+CONFIGFS_EATTR_OPS(target_stat_scsi_lu, se_dev_stat_grps, scsi_lu_group);
+
+static struct configfs_attribute *target_stat_scsi_lu_attrs[] = {
+ &target_stat_scsi_lu_inst.attr,
+ &target_stat_scsi_lu_dev.attr,
+ &target_stat_scsi_lu_indx.attr,
+ &target_stat_scsi_lu_lun.attr,
+ &target_stat_scsi_lu_lu_name.attr,
+ &target_stat_scsi_lu_vend.attr,
+ &target_stat_scsi_lu_prod.attr,
+ &target_stat_scsi_lu_rev.attr,
+ &target_stat_scsi_lu_dev_type.attr,
+ &target_stat_scsi_lu_status.attr,
+ &target_stat_scsi_lu_state_bit.attr,
+ &target_stat_scsi_lu_num_cmds.attr,
+ &target_stat_scsi_lu_read_mbytes.attr,
+ &target_stat_scsi_lu_write_mbytes.attr,
+ &target_stat_scsi_lu_resets.attr,
+ &target_stat_scsi_lu_full_stat.attr,
+ &target_stat_scsi_lu_hs_num_cmds.attr,
+ &target_stat_scsi_lu_creation_time.attr,
+ NULL,
+};
+
+static struct configfs_item_operations target_stat_scsi_lu_attrib_ops = {
+ .show_attribute = target_stat_scsi_lu_attr_show,
+ .store_attribute = target_stat_scsi_lu_attr_store,
+};
+
+static struct config_item_type target_stat_scsi_lu_cit = {
+ .ct_item_ops = &target_stat_scsi_lu_attrib_ops,
+ .ct_attrs = target_stat_scsi_lu_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/*
+ * Called from target_core_configfs.c:target_core_make_subdev() to setup
+ * the target statistics groups + configfs CITs located in target_core_stat.c
+ */
+void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev)
+{
+ struct config_group *dev_stat_grp = &DEV_STAT_GRP(se_subdev)->stat_group;
+
+ config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_dev_group,
+ "scsi_dev", &target_stat_scsi_dev_cit);
+ config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group,
+ "scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit);
+ config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_lu_group,
+ "scsi_lu", &target_stat_scsi_lu_cit);
+
+ dev_stat_grp->default_groups[0] = &DEV_STAT_GRP(se_subdev)->scsi_dev_group;
+ dev_stat_grp->default_groups[1] = &DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group;
+ dev_stat_grp->default_groups[2] = &DEV_STAT_GRP(se_subdev)->scsi_lu_group;
+ dev_stat_grp->default_groups[3] = NULL;
+}
+
+/*
+ * SCSI Port Table
+ */
+
+CONFIGFS_EATTR_STRUCT(target_stat_scsi_port, se_port_stat_grps);
+#define DEV_STAT_SCSI_PORT_ATTR(_name, _mode) \
+static struct target_stat_scsi_port_attribute \
+ target_stat_scsi_port_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ target_stat_scsi_port_show_attr_##_name, \
+ target_stat_scsi_port_store_attr_##_name);
+
+#define DEV_STAT_SCSI_PORT_ATTR_RO(_name) \
+static struct target_stat_scsi_port_attribute \
+ target_stat_scsi_port_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ target_stat_scsi_port_show_attr_##_name);
+
+static ssize_t target_stat_scsi_port_show_attr_inst(
+ struct se_port_stat_grps *pgrps, char *page)
+{
+ struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+ struct se_port *sep;
+ struct se_device *dev = lun->lun_se_dev;
+ struct se_hba *hba;
+ ssize_t ret;
+
+ spin_lock(&lun->lun_sep_lock);
+ sep = lun->lun_sep;
+ if (!sep) {
+ spin_unlock(&lun->lun_sep_lock);
+ return -ENODEV;
+ }
+ hba = dev->se_hba;
+ ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
+ spin_unlock(&lun->lun_sep_lock);
+ return ret;
+}
+DEV_STAT_SCSI_PORT_ATTR_RO(inst);
+
+static ssize_t target_stat_scsi_port_show_attr_dev(
+ struct se_port_stat_grps *pgrps, char *page)
+{
+ struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+ struct se_port *sep;
+ struct se_device *dev = lun->lun_se_dev;
+ ssize_t ret;
+
+ spin_lock(&lun->lun_sep_lock);
+ sep = lun->lun_sep;
+ if (!sep) {
+ spin_unlock(&lun->lun_sep_lock);
+ return -ENODEV;
+ }
+ ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+ spin_unlock(&lun->lun_sep_lock);
+ return ret;
+}
+DEV_STAT_SCSI_PORT_ATTR_RO(dev);
+
+static ssize_t target_stat_scsi_port_show_attr_indx(
+ struct se_port_stat_grps *pgrps, char *page)
+{
+ struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+ struct se_port *sep;
+ ssize_t ret;
+
+ spin_lock(&lun->lun_sep_lock);
+ sep = lun->lun_sep;
+ if (!sep) {
+ spin_unlock(&lun->lun_sep_lock);
+ return -ENODEV;
+ }
+ ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index);
+ spin_unlock(&lun->lun_sep_lock);
+ return ret;
+}
+DEV_STAT_SCSI_PORT_ATTR_RO(indx);
+
+static ssize_t target_stat_scsi_port_show_attr_role(
+ struct se_port_stat_grps *pgrps, char *page)
+{
+ struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+ struct se_device *dev = lun->lun_se_dev;
+ struct se_port *sep;
+ ssize_t ret;
+
+ if (!dev)
+ return -ENODEV;
+
+ spin_lock(&lun->lun_sep_lock);
+ sep = lun->lun_sep;
+ if (!sep) {
+ spin_unlock(&lun->lun_sep_lock);
+ return -ENODEV;
+ }
+ ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index);
+ spin_unlock(&lun->lun_sep_lock);
+ return ret;
+}
+DEV_STAT_SCSI_PORT_ATTR_RO(role);
+
+static ssize_t target_stat_scsi_port_show_attr_busy_count(
+ struct se_port_stat_grps *pgrps, char *page)
+{
+ struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+ struct se_port *sep;
+ ssize_t ret;
+
+ spin_lock(&lun->lun_sep_lock);
+ sep = lun->lun_sep;
+ if (!sep) {
+ spin_unlock(&lun->lun_sep_lock);
+ return -ENODEV;
+ }
+ /* FIXME: scsiPortBusyStatuses */
+ ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
+ spin_unlock(&lun->lun_sep_lock);
+ return ret;
+}
+DEV_STAT_SCSI_PORT_ATTR_RO(busy_count);
+
+CONFIGFS_EATTR_OPS(target_stat_scsi_port, se_port_stat_grps, scsi_port_group);
+
+static struct configfs_attribute *target_stat_scsi_port_attrs[] = {
+ &target_stat_scsi_port_inst.attr,
+ &target_stat_scsi_port_dev.attr,
+ &target_stat_scsi_port_indx.attr,
+ &target_stat_scsi_port_role.attr,
+ &target_stat_scsi_port_busy_count.attr,
+ NULL,
+};
+
+static struct configfs_item_operations target_stat_scsi_port_attrib_ops = {
+ .show_attribute = target_stat_scsi_port_attr_show,
+ .store_attribute = target_stat_scsi_port_attr_store,
+};
+
+static struct config_item_type target_stat_scsi_port_cit = {
+ .ct_item_ops = &target_stat_scsi_port_attrib_ops,
+ .ct_attrs = target_stat_scsi_port_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/*
+ * SCSI Target Port Table
+ */
+CONFIGFS_EATTR_STRUCT(target_stat_scsi_tgt_port, se_port_stat_grps);
+#define DEV_STAT_SCSI_TGT_PORT_ATTR(_name, _mode) \
+static struct target_stat_scsi_tgt_port_attribute \
+ target_stat_scsi_tgt_port_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ target_stat_scsi_tgt_port_show_attr_##_name, \
+ target_stat_scsi_tgt_port_store_attr_##_name);
+
+#define DEV_STAT_SCSI_TGT_PORT_ATTR_RO(_name) \
+static struct target_stat_scsi_tgt_port_attribute \
+ target_stat_scsi_tgt_port_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ target_stat_scsi_tgt_port_show_attr_##_name);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_inst(
+ struct se_port_stat_grps *pgrps, char *page)
+{
+ struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+ struct se_device *dev = lun->lun_se_dev;
+ struct se_port *sep;
+ struct se_hba *hba;
+ ssize_t ret;
+
+ spin_lock(&lun->lun_sep_lock);
+ sep = lun->lun_sep;
+ if (!sep) {
+ spin_unlock(&lun->lun_sep_lock);
+ return -ENODEV;
+ }
+ hba = dev->se_hba;
+ ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
+ spin_unlock(&lun->lun_sep_lock);
+ return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(inst);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_dev(
+ struct se_port_stat_grps *pgrps, char *page)
+{
+ struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+ struct se_device *dev = lun->lun_se_dev;
+ struct se_port *sep;
+ ssize_t ret;
+
+ spin_lock(&lun->lun_sep_lock);
+ sep = lun->lun_sep;
+ if (!sep) {
+ spin_unlock(&lun->lun_sep_lock);
+ return -ENODEV;
+ }
+ ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+ spin_unlock(&lun->lun_sep_lock);
+ return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(dev);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_indx(
+ struct se_port_stat_grps *pgrps, char *page)
+{
+ struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+ struct se_port *sep;
+ ssize_t ret;
+
+ spin_lock(&lun->lun_sep_lock);
+ sep = lun->lun_sep;
+ if (!sep) {
+ spin_unlock(&lun->lun_sep_lock);
+ return -ENODEV;
+ }
+ ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index);
+ spin_unlock(&lun->lun_sep_lock);
+ return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(indx);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_name(
+ struct se_port_stat_grps *pgrps, char *page)
+{
+ struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+ struct se_port *sep;
+ struct se_portal_group *tpg;
+ ssize_t ret;
+
+ spin_lock(&lun->lun_sep_lock);
+ sep = lun->lun_sep;
+ if (!sep) {
+ spin_unlock(&lun->lun_sep_lock);
+ return -ENODEV;
+ }
+ tpg = sep->sep_tpg;
+
+ ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n",
+ TPG_TFO(tpg)->get_fabric_name(), sep->sep_index);
+ spin_unlock(&lun->lun_sep_lock);
+ return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(name);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_port_index(
+ struct se_port_stat_grps *pgrps, char *page)
+{
+ struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+ struct se_port *sep;
+ struct se_portal_group *tpg;
+ ssize_t ret;
+
+ spin_lock(&lun->lun_sep_lock);
+ sep = lun->lun_sep;
+ if (!sep) {
+ spin_unlock(&lun->lun_sep_lock);
+ return -ENODEV;
+ }
+ tpg = sep->sep_tpg;
+
+ ret = snprintf(page, PAGE_SIZE, "%s%s%d\n",
+ TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+",
+ TPG_TFO(tpg)->tpg_get_tag(tpg));
+ spin_unlock(&lun->lun_sep_lock);
+ return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(port_index);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_in_cmds(
+ struct se_port_stat_grps *pgrps, char *page)
+{
+ struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+ struct se_port *sep;
+ struct se_portal_group *tpg;
+ ssize_t ret;
+
+ spin_lock(&lun->lun_sep_lock);
+ sep = lun->lun_sep;
+ if (!sep) {
+ spin_unlock(&lun->lun_sep_lock);
+ return -ENODEV;
+ }
+ tpg = sep->sep_tpg;
+
+ ret = snprintf(page, PAGE_SIZE, "%llu\n", sep->sep_stats.cmd_pdus);
+ spin_unlock(&lun->lun_sep_lock);
+ return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(in_cmds);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_write_mbytes(
+ struct se_port_stat_grps *pgrps, char *page)
+{
+ struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+ struct se_port *sep;
+ struct se_portal_group *tpg;
+ ssize_t ret;
+
+ spin_lock(&lun->lun_sep_lock);
+ sep = lun->lun_sep;
+ if (!sep) {
+ spin_unlock(&lun->lun_sep_lock);
+ return -ENODEV;
+ }
+ tpg = sep->sep_tpg;
+
+ ret = snprintf(page, PAGE_SIZE, "%u\n",
+ (u32)(sep->sep_stats.rx_data_octets >> 20));
+ spin_unlock(&lun->lun_sep_lock);
+ return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(write_mbytes);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_read_mbytes(
+ struct se_port_stat_grps *pgrps, char *page)
+{
+ struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+ struct se_port *sep;
+ struct se_portal_group *tpg;
+ ssize_t ret;
+
+ spin_lock(&lun->lun_sep_lock);
+ sep = lun->lun_sep;
+ if (!sep) {
+ spin_unlock(&lun->lun_sep_lock);
+ return -ENODEV;
+ }
+ tpg = sep->sep_tpg;
+
+ ret = snprintf(page, PAGE_SIZE, "%u\n",
+ (u32)(sep->sep_stats.tx_data_octets >> 20));
+ spin_unlock(&lun->lun_sep_lock);
+ return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(read_mbytes);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_hs_in_cmds(
+ struct se_port_stat_grps *pgrps, char *page)
+{
+ struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+ struct se_port *sep;
+ struct se_portal_group *tpg;
+ ssize_t ret;
+
+ spin_lock(&lun->lun_sep_lock);
+ sep = lun->lun_sep;
+ if (!sep) {
+ spin_unlock(&lun->lun_sep_lock);
+ return -ENODEV;
+ }
+ tpg = sep->sep_tpg;
+
+ /* FIXME: scsiTgtPortHsInCommands */
+ ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
+ spin_unlock(&lun->lun_sep_lock);
+ return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(hs_in_cmds);
+
+CONFIGFS_EATTR_OPS(target_stat_scsi_tgt_port, se_port_stat_grps,
+ scsi_tgt_port_group);
+
+static struct configfs_attribute *target_stat_scsi_tgt_port_attrs[] = {
+ &target_stat_scsi_tgt_port_inst.attr,
+ &target_stat_scsi_tgt_port_dev.attr,
+ &target_stat_scsi_tgt_port_indx.attr,
+ &target_stat_scsi_tgt_port_name.attr,
+ &target_stat_scsi_tgt_port_port_index.attr,
+ &target_stat_scsi_tgt_port_in_cmds.attr,
+ &target_stat_scsi_tgt_port_write_mbytes.attr,
+ &target_stat_scsi_tgt_port_read_mbytes.attr,
+ &target_stat_scsi_tgt_port_hs_in_cmds.attr,
+ NULL,
+};
+
+static struct configfs_item_operations target_stat_scsi_tgt_port_attrib_ops = {
+ .show_attribute = target_stat_scsi_tgt_port_attr_show,
+ .store_attribute = target_stat_scsi_tgt_port_attr_store,
+};
+
+static struct config_item_type target_stat_scsi_tgt_port_cit = {
+ .ct_item_ops = &target_stat_scsi_tgt_port_attrib_ops,
+ .ct_attrs = target_stat_scsi_tgt_port_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/*
+ * SCSI Transport Table
+o */
+
+CONFIGFS_EATTR_STRUCT(target_stat_scsi_transport, se_port_stat_grps);
+#define DEV_STAT_SCSI_TRANSPORT_ATTR(_name, _mode) \
+static struct target_stat_scsi_transport_attribute \
+ target_stat_scsi_transport_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ target_stat_scsi_transport_show_attr_##_name, \
+ target_stat_scsi_transport_store_attr_##_name);
+
+#define DEV_STAT_SCSI_TRANSPORT_ATTR_RO(_name) \
+static struct target_stat_scsi_transport_attribute \
+ target_stat_scsi_transport_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ target_stat_scsi_transport_show_attr_##_name);
+
+static ssize_t target_stat_scsi_transport_show_attr_inst(
+ struct se_port_stat_grps *pgrps, char *page)
+{
+ struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+ struct se_device *dev = lun->lun_se_dev;
+ struct se_port *sep;
+ struct se_hba *hba;
+ ssize_t ret;
+
+ spin_lock(&lun->lun_sep_lock);
+ sep = lun->lun_sep;
+ if (!sep) {
+ spin_unlock(&lun->lun_sep_lock);
+ return -ENODEV;
+ }
+
+ hba = dev->se_hba;
+ ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
+ spin_unlock(&lun->lun_sep_lock);
+ return ret;
+}
+DEV_STAT_SCSI_TRANSPORT_ATTR_RO(inst);
+
+static ssize_t target_stat_scsi_transport_show_attr_device(
+ struct se_port_stat_grps *pgrps, char *page)
+{
+ struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+ struct se_port *sep;
+ struct se_portal_group *tpg;
+ ssize_t ret;
+
+ spin_lock(&lun->lun_sep_lock);
+ sep = lun->lun_sep;
+ if (!sep) {
+ spin_unlock(&lun->lun_sep_lock);
+ return -ENODEV;
+ }
+ tpg = sep->sep_tpg;
+ /* scsiTransportType */
+ ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n",
+ TPG_TFO(tpg)->get_fabric_name());
+ spin_unlock(&lun->lun_sep_lock);
+ return ret;
+}
+DEV_STAT_SCSI_TRANSPORT_ATTR_RO(device);
+
+static ssize_t target_stat_scsi_transport_show_attr_indx(
+ struct se_port_stat_grps *pgrps, char *page)
+{
+ struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+ struct se_port *sep;
+ struct se_portal_group *tpg;
+ ssize_t ret;
+
+ spin_lock(&lun->lun_sep_lock);
+ sep = lun->lun_sep;
+ if (!sep) {
+ spin_unlock(&lun->lun_sep_lock);
+ return -ENODEV;
+ }
+ tpg = sep->sep_tpg;
+ ret = snprintf(page, PAGE_SIZE, "%u\n",
+ TPG_TFO(tpg)->tpg_get_inst_index(tpg));
+ spin_unlock(&lun->lun_sep_lock);
+ return ret;
+}
+DEV_STAT_SCSI_TRANSPORT_ATTR_RO(indx);
+
+static ssize_t target_stat_scsi_transport_show_attr_dev_name(
+ struct se_port_stat_grps *pgrps, char *page)
+{
+ struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+ struct se_device *dev = lun->lun_se_dev;
+ struct se_port *sep;
+ struct se_portal_group *tpg;
+ struct t10_wwn *wwn;
+ ssize_t ret;
+
+ spin_lock(&lun->lun_sep_lock);
+ sep = lun->lun_sep;
+ if (!sep) {
+ spin_unlock(&lun->lun_sep_lock);
+ return -ENODEV;
+ }
+ tpg = sep->sep_tpg;
+ wwn = DEV_T10_WWN(dev);
+ /* scsiTransportDevName */
+ ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
+ TPG_TFO(tpg)->tpg_get_wwn(tpg),
+ (strlen(wwn->unit_serial)) ? wwn->unit_serial :
+ wwn->vendor);
+ spin_unlock(&lun->lun_sep_lock);
+ return ret;
+}
+DEV_STAT_SCSI_TRANSPORT_ATTR_RO(dev_name);
+
+CONFIGFS_EATTR_OPS(target_stat_scsi_transport, se_port_stat_grps,
+ scsi_transport_group);
+
+static struct configfs_attribute *target_stat_scsi_transport_attrs[] = {
+ &target_stat_scsi_transport_inst.attr,
+ &target_stat_scsi_transport_device.attr,
+ &target_stat_scsi_transport_indx.attr,
+ &target_stat_scsi_transport_dev_name.attr,
+ NULL,
+};
+
+static struct configfs_item_operations target_stat_scsi_transport_attrib_ops = {
+ .show_attribute = target_stat_scsi_transport_attr_show,
+ .store_attribute = target_stat_scsi_transport_attr_store,
+};
+
+static struct config_item_type target_stat_scsi_transport_cit = {
+ .ct_item_ops = &target_stat_scsi_transport_attrib_ops,
+ .ct_attrs = target_stat_scsi_transport_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/*
+ * Called from target_core_fabric_configfs.c:target_fabric_make_lun() to setup
+ * the target port statistics groups + configfs CITs located in target_core_stat.c
+ */
+void target_stat_setup_port_default_groups(struct se_lun *lun)
+{
+ struct config_group *port_stat_grp = &PORT_STAT_GRP(lun)->stat_group;
+
+ config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_port_group,
+ "scsi_port", &target_stat_scsi_port_cit);
+ config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_tgt_port_group,
+ "scsi_tgt_port", &target_stat_scsi_tgt_port_cit);
+ config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_transport_group,
+ "scsi_transport", &target_stat_scsi_transport_cit);
+
+ port_stat_grp->default_groups[0] = &PORT_STAT_GRP(lun)->scsi_port_group;
+ port_stat_grp->default_groups[1] = &PORT_STAT_GRP(lun)->scsi_tgt_port_group;
+ port_stat_grp->default_groups[2] = &PORT_STAT_GRP(lun)->scsi_transport_group;
+ port_stat_grp->default_groups[3] = NULL;
+}
+
+/*
+ * SCSI Authorized Initiator Table
+ */
+
+CONFIGFS_EATTR_STRUCT(target_stat_scsi_auth_intr, se_ml_stat_grps);
+#define DEV_STAT_SCSI_AUTH_INTR_ATTR(_name, _mode) \
+static struct target_stat_scsi_auth_intr_attribute \
+ target_stat_scsi_auth_intr_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ target_stat_scsi_auth_intr_show_attr_##_name, \
+ target_stat_scsi_auth_intr_store_attr_##_name);
+
+#define DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(_name) \
+static struct target_stat_scsi_auth_intr_attribute \
+ target_stat_scsi_auth_intr_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ target_stat_scsi_auth_intr_show_attr_##_name);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_inst(
+ struct se_ml_stat_grps *lgrps, char *page)
+{
+ struct se_lun_acl *lacl = container_of(lgrps,
+ struct se_lun_acl, ml_stat_grps);
+ struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry *deve;
+ struct se_portal_group *tpg;
+ ssize_t ret;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[lacl->mapped_lun];
+ if (!deve->se_lun || !deve->se_lun_acl) {
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -ENODEV;
+ }
+ tpg = nacl->se_tpg;
+ /* scsiInstIndex */
+ ret = snprintf(page, PAGE_SIZE, "%u\n",
+ TPG_TFO(tpg)->tpg_get_inst_index(tpg));
+ spin_unlock_irq(&nacl->device_list_lock);
+ return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(inst);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_dev(
+ struct se_ml_stat_grps *lgrps, char *page)
+{
+ struct se_lun_acl *lacl = container_of(lgrps,
+ struct se_lun_acl, ml_stat_grps);
+ struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry *deve;
+ struct se_lun *lun;
+ struct se_portal_group *tpg;
+ ssize_t ret;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[lacl->mapped_lun];
+ if (!deve->se_lun || !deve->se_lun_acl) {
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -ENODEV;
+ }
+ tpg = nacl->se_tpg;
+ lun = deve->se_lun;
+ /* scsiDeviceIndex */
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index);
+ spin_unlock_irq(&nacl->device_list_lock);
+ return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_port(
+ struct se_ml_stat_grps *lgrps, char *page)
+{
+ struct se_lun_acl *lacl = container_of(lgrps,
+ struct se_lun_acl, ml_stat_grps);
+ struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry *deve;
+ struct se_portal_group *tpg;
+ ssize_t ret;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[lacl->mapped_lun];
+ if (!deve->se_lun || !deve->se_lun_acl) {
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -ENODEV;
+ }
+ tpg = nacl->se_tpg;
+ /* scsiAuthIntrTgtPortIndex */
+ ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg));
+ spin_unlock_irq(&nacl->device_list_lock);
+ return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(port);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_indx(
+ struct se_ml_stat_grps *lgrps, char *page)
+{
+ struct se_lun_acl *lacl = container_of(lgrps,
+ struct se_lun_acl, ml_stat_grps);
+ struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry *deve;
+ ssize_t ret;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[lacl->mapped_lun];
+ if (!deve->se_lun || !deve->se_lun_acl) {
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -ENODEV;
+ }
+ /* scsiAuthIntrIndex */
+ ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index);
+ spin_unlock_irq(&nacl->device_list_lock);
+ return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(indx);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_dev_or_port(
+ struct se_ml_stat_grps *lgrps, char *page)
+{
+ struct se_lun_acl *lacl = container_of(lgrps,
+ struct se_lun_acl, ml_stat_grps);
+ struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry *deve;
+ ssize_t ret;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[lacl->mapped_lun];
+ if (!deve->se_lun || !deve->se_lun_acl) {
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -ENODEV;
+ }
+ /* scsiAuthIntrDevOrPort */
+ ret = snprintf(page, PAGE_SIZE, "%u\n", 1);
+ spin_unlock_irq(&nacl->device_list_lock);
+ return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev_or_port);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_intr_name(
+ struct se_ml_stat_grps *lgrps, char *page)
+{
+ struct se_lun_acl *lacl = container_of(lgrps,
+ struct se_lun_acl, ml_stat_grps);
+ struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry *deve;
+ ssize_t ret;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[lacl->mapped_lun];
+ if (!deve->se_lun || !deve->se_lun_acl) {
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -ENODEV;
+ }
+ /* scsiAuthIntrName */
+ ret = snprintf(page, PAGE_SIZE, "%s\n", nacl->initiatorname);
+ spin_unlock_irq(&nacl->device_list_lock);
+ return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(intr_name);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_map_indx(
+ struct se_ml_stat_grps *lgrps, char *page)
+{
+ struct se_lun_acl *lacl = container_of(lgrps,
+ struct se_lun_acl, ml_stat_grps);
+ struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry *deve;
+ ssize_t ret;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[lacl->mapped_lun];
+ if (!deve->se_lun || !deve->se_lun_acl) {
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -ENODEV;
+ }
+ /* FIXME: scsiAuthIntrLunMapIndex */
+ ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
+ spin_unlock_irq(&nacl->device_list_lock);
+ return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(map_indx);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_att_count(
+ struct se_ml_stat_grps *lgrps, char *page)
+{
+ struct se_lun_acl *lacl = container_of(lgrps,
+ struct se_lun_acl, ml_stat_grps);
+ struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry *deve;
+ ssize_t ret;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[lacl->mapped_lun];
+ if (!deve->se_lun || !deve->se_lun_acl) {
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -ENODEV;
+ }
+ /* scsiAuthIntrAttachedTimes */
+ ret = snprintf(page, PAGE_SIZE, "%u\n", deve->attach_count);
+ spin_unlock_irq(&nacl->device_list_lock);
+ return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(att_count);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_num_cmds(
+ struct se_ml_stat_grps *lgrps, char *page)
+{
+ struct se_lun_acl *lacl = container_of(lgrps,
+ struct se_lun_acl, ml_stat_grps);
+ struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry *deve;
+ ssize_t ret;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[lacl->mapped_lun];
+ if (!deve->se_lun || !deve->se_lun_acl) {
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -ENODEV;
+ }
+ /* scsiAuthIntrOutCommands */
+ ret = snprintf(page, PAGE_SIZE, "%u\n", deve->total_cmds);
+ spin_unlock_irq(&nacl->device_list_lock);
+ return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(num_cmds);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_read_mbytes(
+ struct se_ml_stat_grps *lgrps, char *page)
+{
+ struct se_lun_acl *lacl = container_of(lgrps,
+ struct se_lun_acl, ml_stat_grps);
+ struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry *deve;
+ ssize_t ret;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[lacl->mapped_lun];
+ if (!deve->se_lun || !deve->se_lun_acl) {
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -ENODEV;
+ }
+ /* scsiAuthIntrReadMegaBytes */
+ ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->read_bytes >> 20));
+ spin_unlock_irq(&nacl->device_list_lock);
+ return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(read_mbytes);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_write_mbytes(
+ struct se_ml_stat_grps *lgrps, char *page)
+{
+ struct se_lun_acl *lacl = container_of(lgrps,
+ struct se_lun_acl, ml_stat_grps);
+ struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry *deve;
+ ssize_t ret;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[lacl->mapped_lun];
+ if (!deve->se_lun || !deve->se_lun_acl) {
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -ENODEV;
+ }
+ /* scsiAuthIntrWrittenMegaBytes */
+ ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->write_bytes >> 20));
+ spin_unlock_irq(&nacl->device_list_lock);
+ return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(write_mbytes);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_hs_num_cmds(
+ struct se_ml_stat_grps *lgrps, char *page)
+{
+ struct se_lun_acl *lacl = container_of(lgrps,
+ struct se_lun_acl, ml_stat_grps);
+ struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry *deve;
+ ssize_t ret;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[lacl->mapped_lun];
+ if (!deve->se_lun || !deve->se_lun_acl) {
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -ENODEV;
+ }
+ /* FIXME: scsiAuthIntrHSOutCommands */
+ ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
+ spin_unlock_irq(&nacl->device_list_lock);
+ return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(hs_num_cmds);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_creation_time(
+ struct se_ml_stat_grps *lgrps, char *page)
+{
+ struct se_lun_acl *lacl = container_of(lgrps,
+ struct se_lun_acl, ml_stat_grps);
+ struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry *deve;
+ ssize_t ret;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[lacl->mapped_lun];
+ if (!deve->se_lun || !deve->se_lun_acl) {
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -ENODEV;
+ }
+ /* scsiAuthIntrLastCreation */
+ ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)deve->creation_time -
+ INITIAL_JIFFIES) * 100 / HZ));
+ spin_unlock_irq(&nacl->device_list_lock);
+ return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(creation_time);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_row_status(
+ struct se_ml_stat_grps *lgrps, char *page)
+{
+ struct se_lun_acl *lacl = container_of(lgrps,
+ struct se_lun_acl, ml_stat_grps);
+ struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry *deve;
+ ssize_t ret;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[lacl->mapped_lun];
+ if (!deve->se_lun || !deve->se_lun_acl) {
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -ENODEV;
+ }
+ /* FIXME: scsiAuthIntrRowStatus */
+ ret = snprintf(page, PAGE_SIZE, "Ready\n");
+ spin_unlock_irq(&nacl->device_list_lock);
+ return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(row_status);
+
+CONFIGFS_EATTR_OPS(target_stat_scsi_auth_intr, se_ml_stat_grps,
+ scsi_auth_intr_group);
+
+static struct configfs_attribute *target_stat_scsi_auth_intr_attrs[] = {
+ &target_stat_scsi_auth_intr_inst.attr,
+ &target_stat_scsi_auth_intr_dev.attr,
+ &target_stat_scsi_auth_intr_port.attr,
+ &target_stat_scsi_auth_intr_indx.attr,
+ &target_stat_scsi_auth_intr_dev_or_port.attr,
+ &target_stat_scsi_auth_intr_intr_name.attr,
+ &target_stat_scsi_auth_intr_map_indx.attr,
+ &target_stat_scsi_auth_intr_att_count.attr,
+ &target_stat_scsi_auth_intr_num_cmds.attr,
+ &target_stat_scsi_auth_intr_read_mbytes.attr,
+ &target_stat_scsi_auth_intr_write_mbytes.attr,
+ &target_stat_scsi_auth_intr_hs_num_cmds.attr,
+ &target_stat_scsi_auth_intr_creation_time.attr,
+ &target_stat_scsi_auth_intr_row_status.attr,
+ NULL,
+};
+
+static struct configfs_item_operations target_stat_scsi_auth_intr_attrib_ops = {
+ .show_attribute = target_stat_scsi_auth_intr_attr_show,
+ .store_attribute = target_stat_scsi_auth_intr_attr_store,
+};
+
+static struct config_item_type target_stat_scsi_auth_intr_cit = {
+ .ct_item_ops = &target_stat_scsi_auth_intr_attrib_ops,
+ .ct_attrs = target_stat_scsi_auth_intr_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/*
+ * SCSI Attached Initiator Port Table
+ */
+
+CONFIGFS_EATTR_STRUCT(target_stat_scsi_att_intr_port, se_ml_stat_grps);
+#define DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR(_name, _mode) \
+static struct target_stat_scsi_att_intr_port_attribute \
+ target_stat_scsi_att_intr_port_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ target_stat_scsi_att_intr_port_show_attr_##_name, \
+ target_stat_scsi_att_intr_port_store_attr_##_name);
+
+#define DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(_name) \
+static struct target_stat_scsi_att_intr_port_attribute \
+ target_stat_scsi_att_intr_port_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ target_stat_scsi_att_intr_port_show_attr_##_name);
+
+static ssize_t target_stat_scsi_att_intr_port_show_attr_inst(
+ struct se_ml_stat_grps *lgrps, char *page)
+{
+ struct se_lun_acl *lacl = container_of(lgrps,
+ struct se_lun_acl, ml_stat_grps);
+ struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry *deve;
+ struct se_portal_group *tpg;
+ ssize_t ret;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[lacl->mapped_lun];
+ if (!deve->se_lun || !deve->se_lun_acl) {
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -ENODEV;
+ }
+ tpg = nacl->se_tpg;
+ /* scsiInstIndex */
+ ret = snprintf(page, PAGE_SIZE, "%u\n",
+ TPG_TFO(tpg)->tpg_get_inst_index(tpg));
+ spin_unlock_irq(&nacl->device_list_lock);
+ return ret;
+}
+DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(inst);
+
+static ssize_t target_stat_scsi_att_intr_port_show_attr_dev(
+ struct se_ml_stat_grps *lgrps, char *page)
+{
+ struct se_lun_acl *lacl = container_of(lgrps,
+ struct se_lun_acl, ml_stat_grps);
+ struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry *deve;
+ struct se_lun *lun;
+ struct se_portal_group *tpg;
+ ssize_t ret;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[lacl->mapped_lun];
+ if (!deve->se_lun || !deve->se_lun_acl) {
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -ENODEV;
+ }
+ tpg = nacl->se_tpg;
+ lun = deve->se_lun;
+ /* scsiDeviceIndex */
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index);
+ spin_unlock_irq(&nacl->device_list_lock);
+ return ret;
+}
+DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(dev);
+
+static ssize_t target_stat_scsi_att_intr_port_show_attr_port(
+ struct se_ml_stat_grps *lgrps, char *page)
+{
+ struct se_lun_acl *lacl = container_of(lgrps,
+ struct se_lun_acl, ml_stat_grps);
+ struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry *deve;
+ struct se_portal_group *tpg;
+ ssize_t ret;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[lacl->mapped_lun];
+ if (!deve->se_lun || !deve->se_lun_acl) {
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -ENODEV;
+ }
+ tpg = nacl->se_tpg;
+ /* scsiPortIndex */
+ ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg));
+ spin_unlock_irq(&nacl->device_list_lock);
+ return ret;
+}
+DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port);
+
+static ssize_t target_stat_scsi_att_intr_port_show_attr_indx(
+ struct se_ml_stat_grps *lgrps, char *page)
+{
+ struct se_lun_acl *lacl = container_of(lgrps,
+ struct se_lun_acl, ml_stat_grps);
+ struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_session *se_sess;
+ struct se_portal_group *tpg;
+ ssize_t ret;
+
+ spin_lock_irq(&nacl->nacl_sess_lock);
+ se_sess = nacl->nacl_sess;
+ if (!se_sess) {
+ spin_unlock_irq(&nacl->nacl_sess_lock);
+ return -ENODEV;
+ }
+
+ tpg = nacl->se_tpg;
+ /* scsiAttIntrPortIndex */
+ ret = snprintf(page, PAGE_SIZE, "%u\n",
+ TPG_TFO(tpg)->sess_get_index(se_sess));
+ spin_unlock_irq(&nacl->nacl_sess_lock);
+ return ret;
+}
+DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(indx);
+
+static ssize_t target_stat_scsi_att_intr_port_show_attr_port_auth_indx(
+ struct se_ml_stat_grps *lgrps, char *page)
+{
+ struct se_lun_acl *lacl = container_of(lgrps,
+ struct se_lun_acl, ml_stat_grps);
+ struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry *deve;
+ ssize_t ret;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[lacl->mapped_lun];
+ if (!deve->se_lun || !deve->se_lun_acl) {
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -ENODEV;
+ }
+ /* scsiAttIntrPortAuthIntrIdx */
+ ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index);
+ spin_unlock_irq(&nacl->device_list_lock);
+ return ret;
+}
+DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_auth_indx);
+
+static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident(
+ struct se_ml_stat_grps *lgrps, char *page)
+{
+ struct se_lun_acl *lacl = container_of(lgrps,
+ struct se_lun_acl, ml_stat_grps);
+ struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_session *se_sess;
+ struct se_portal_group *tpg;
+ ssize_t ret;
+ unsigned char buf[64];
+
+ spin_lock_irq(&nacl->nacl_sess_lock);
+ se_sess = nacl->nacl_sess;
+ if (!se_sess) {
+ spin_unlock_irq(&nacl->nacl_sess_lock);
+ return -ENODEV;
+ }
+
+ tpg = nacl->se_tpg;
+ /* scsiAttIntrPortName+scsiAttIntrPortIdentifier */
+ memset(buf, 0, 64);
+ if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL)
+ TPG_TFO(tpg)->sess_get_initiator_sid(se_sess,
+ (unsigned char *)&buf[0], 64);
+
+ ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf);
+ spin_unlock_irq(&nacl->nacl_sess_lock);
+ return ret;
+}
+DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_ident);
+
+CONFIGFS_EATTR_OPS(target_stat_scsi_att_intr_port, se_ml_stat_grps,
+ scsi_att_intr_port_group);
+
+static struct configfs_attribute *target_stat_scsi_ath_intr_port_attrs[] = {
+ &target_stat_scsi_att_intr_port_inst.attr,
+ &target_stat_scsi_att_intr_port_dev.attr,
+ &target_stat_scsi_att_intr_port_port.attr,
+ &target_stat_scsi_att_intr_port_indx.attr,
+ &target_stat_scsi_att_intr_port_port_auth_indx.attr,
+ &target_stat_scsi_att_intr_port_port_ident.attr,
+ NULL,
+};
+
+static struct configfs_item_operations target_stat_scsi_att_intr_port_attrib_ops = {
+ .show_attribute = target_stat_scsi_att_intr_port_attr_show,
+ .store_attribute = target_stat_scsi_att_intr_port_attr_store,
+};
+
+static struct config_item_type target_stat_scsi_att_intr_port_cit = {
+ .ct_item_ops = &target_stat_scsi_att_intr_port_attrib_ops,
+ .ct_attrs = target_stat_scsi_ath_intr_port_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/*
+ * Called from target_core_fabric_configfs.c:target_fabric_make_mappedlun() to setup
+ * the target MappedLUN statistics groups + configfs CITs located in target_core_stat.c
+ */
+void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl)
+{
+ struct config_group *ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group;
+
+ config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_auth_intr_group,
+ "scsi_auth_intr", &target_stat_scsi_auth_intr_cit);
+ config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_att_intr_port_group,
+ "scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit);
+
+ ml_stat_grp->default_groups[0] = &ML_STAT_GRPS(lacl)->scsi_auth_intr_group;
+ ml_stat_grp->default_groups[1] = &ML_STAT_GRPS(lacl)->scsi_att_intr_port_group;
+ ml_stat_grp->default_groups[2] = NULL;
+}
diff --git a/drivers/target/target_core_stat.h b/drivers/target/target_core_stat.h
new file mode 100644
index 000000000000..86c252f9ea47
--- /dev/null
+++ b/drivers/target/target_core_stat.h
@@ -0,0 +1,8 @@
+#ifndef TARGET_CORE_STAT_H
+#define TARGET_CORE_STAT_H
+
+extern void target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
+extern void target_stat_setup_port_default_groups(struct se_lun *);
+extern void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
+
+#endif /*** TARGET_CORE_STAT_H ***/
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index ff9ace01e27a..bf6aa8a9f1d8 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -227,8 +227,6 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
static void transport_stop_all_task_timers(struct se_cmd *cmd);
-int transport_emulate_control_cdb(struct se_task *task);
-
int init_se_global(void)
{
struct se_global *global;
@@ -1622,7 +1620,7 @@ struct se_device *transport_add_device_to_core_hba(
const char *inquiry_prod,
const char *inquiry_rev)
{
- int ret = 0, force_pt;
+ int force_pt;
struct se_device *dev;
dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
@@ -1739,9 +1737,8 @@ struct se_device *transport_add_device_to_core_hba(
}
scsi_dump_inquiry(dev);
+ return dev;
out:
- if (!ret)
- return dev;
kthread_stop(dev->process_thread);
spin_lock(&hba->device_lock);
@@ -4359,11 +4356,9 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
printk(KERN_ERR "Unable to allocate struct se_mem\n");
goto out;
}
- INIT_LIST_HEAD(&se_mem->se_list);
- se_mem->se_len = (length > dma_size) ? dma_size : length;
/* #warning FIXME Allocate contigous pages for struct se_mem elements */
- se_mem->se_page = (struct page *) alloc_pages(GFP_KERNEL, 0);
+ se_mem->se_page = alloc_pages(GFP_KERNEL, 0);
if (!(se_mem->se_page)) {
printk(KERN_ERR "alloc_pages() failed\n");
goto out;
@@ -4374,6 +4369,8 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
printk(KERN_ERR "kmap_atomic() failed\n");
goto out;
}
+ INIT_LIST_HEAD(&se_mem->se_list);
+ se_mem->se_len = (length > dma_size) ? dma_size : length;
memset(buf, 0, se_mem->se_len);
kunmap_atomic(buf, KM_IRQ0);
@@ -4392,10 +4389,13 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
return 0;
out:
+ if (se_mem)
+ __free_pages(se_mem->se_page, 0);
+ kmem_cache_free(se_mem_cache, se_mem);
return -1;
}
-extern u32 transport_calc_sg_num(
+u32 transport_calc_sg_num(
struct se_task *task,
struct se_mem *in_se_mem,
u32 task_offset)
@@ -5834,31 +5834,26 @@ int transport_generic_do_tmr(struct se_cmd *cmd)
int ret;
switch (tmr->function) {
- case ABORT_TASK:
+ case TMR_ABORT_TASK:
ref_cmd = tmr->ref_cmd;
tmr->response = TMR_FUNCTION_REJECTED;
break;
- case ABORT_TASK_SET:
- case CLEAR_ACA:
- case CLEAR_TASK_SET:
+ case TMR_ABORT_TASK_SET:
+ case TMR_CLEAR_ACA:
+ case TMR_CLEAR_TASK_SET:
tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
break;
- case LUN_RESET:
+ case TMR_LUN_RESET:
ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
TMR_FUNCTION_REJECTED;
break;
-#if 0
- case TARGET_WARM_RESET:
- transport_generic_host_reset(dev->se_hba);
+ case TMR_TARGET_WARM_RESET:
tmr->response = TMR_FUNCTION_REJECTED;
break;
- case TARGET_COLD_RESET:
- transport_generic_host_reset(dev->se_hba);
- transport_generic_cold_reset(dev->se_hba);
+ case TMR_TARGET_COLD_RESET:
tmr->response = TMR_FUNCTION_REJECTED;
break;
-#endif
default:
printk(KERN_ERR "Uknown TMR function: 0x%02x.\n",
tmr->function);
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 25a8bc565f40..87e7e6c876d4 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -131,7 +131,7 @@ static void kgdboc_unregister_kbd(void)
static int kgdboc_option_setup(char *opt)
{
- if (strlen(opt) > MAX_CONFIG_LEN) {
+ if (strlen(opt) >= MAX_CONFIG_LEN) {
printk(KERN_ERR "kgdboc: config string too long\n");
return -ENOSPC;
}
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 81f13958e751..43db715f1502 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -306,7 +306,7 @@ static struct sysrq_key_op sysrq_ftrace_dump_op = {
static void sysrq_handle_showmem(int key)
{
- show_mem();
+ show_mem(0);
}
static struct sysrq_key_op sysrq_showmem_op = {
.handler = sysrq_handle_showmem,
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 6dd3c68c13ad..d6b342b5b423 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -600,7 +600,7 @@ static void fn_scroll_back(struct vc_data *vc)
static void fn_show_mem(struct vc_data *vc)
{
- show_mem();
+ show_mem(0);
}
static void fn_show_state(struct vc_data *vc)