diff options
author | Jeff Garzik <jgarzik@pobox.com> | 2006-02-20 02:16:23 -0500 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2006-02-20 02:16:23 -0500 |
commit | 5b2ffed906a3ebd4e52a5bbef06b99a517c53e4b (patch) | |
tree | 2f900f89d93db6b0822d8bdf4f49851c581c12a6 | |
parent | f1b318793dcd2d9ff6b5ac06e7762098fa079cee (diff) | |
parent | bd71c2b17468a2531fb4c81ec1d73520845e97e1 (diff) | |
download | linux-5b2ffed906a3ebd4e52a5bbef06b99a517c53e4b.tar.gz linux-5b2ffed906a3ebd4e52a5bbef06b99a517c53e4b.tar.bz2 linux-5b2ffed906a3ebd4e52a5bbef06b99a517c53e4b.zip |
Merge branch 'master'
333 files changed, 5473 insertions, 3200 deletions
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt index 08c5d04f3086..e71bc6cbbc5e 100644 --- a/Documentation/cpu-hotplug.txt +++ b/Documentation/cpu-hotplug.txt @@ -11,6 +11,8 @@ Joel Schopp <jschopp@austin.ibm.com> ia64/x86_64: Ashok Raj <ashok.raj@intel.com> + s390: + Heiko Carstens <heiko.carstens@de.ibm.com> Authors: Ashok Raj <ashok.raj@intel.com> Lots of feedback: Nathan Lynch <nathanl@austin.ibm.com>, @@ -44,9 +46,23 @@ maxcpus=n Restrict boot time cpus to n. Say if you have 4 cpus, using maxcpus=2 will only boot 2. You can choose to bring the other cpus later online, read FAQ's for more info. -additional_cpus=n [x86_64 only] use this to limit hotpluggable cpus. - This option sets - cpu_possible_map = cpu_present_map + additional_cpus +additional_cpus=n [x86_64, s390 only] use this to limit hotpluggable cpus. + This option sets + cpu_possible_map = cpu_present_map + additional_cpus + +ia64 and x86_64 use the number of disabled local apics in ACPI tables MADT +to determine the number of potentially hot-pluggable cpus. The implementation +should only rely on this to count the #of cpus, but *MUST* not rely on the +apicid values in those tables for disabled apics. In the event BIOS doesnt +mark such hot-pluggable cpus as disabled entries, one could use this +parameter "additional_cpus=x" to represent those cpus in the cpu_possible_map. + + +possible_cpus=n [s390 only] use this to set hotpluggable cpus. + This option sets possible_cpus bits in + cpu_possible_map. Thus keeping the numbers of bits set + constant even if the machine gets rebooted. + This option overrides additional_cpus. CPU maps and such ----------------- diff --git a/Documentation/fujitsu/frv/kernel-ABI.txt b/Documentation/fujitsu/frv/kernel-ABI.txt new file mode 100644 index 000000000000..0ed9b0a779bc --- /dev/null +++ b/Documentation/fujitsu/frv/kernel-ABI.txt @@ -0,0 +1,234 @@ + ================================= + INTERNAL KERNEL ABI FOR FR-V ARCH + ================================= + +The internal FRV kernel ABI is not quite the same as the userspace ABI. A number of the registers +are used for special purposed, and the ABI is not consistent between modules vs core, and MMU vs +no-MMU. + +This partly stems from the fact that FRV CPUs do not have a separate supervisor stack pointer, and +most of them do not have any scratch registers, thus requiring at least one general purpose +register to be clobbered in such an event. Also, within the kernel core, it is possible to simply +jump or call directly between functions using a relative offset. This cannot be extended to modules +for the displacement is likely to be too far. Thus in modules the address of a function to call +must be calculated in a register and then used, requiring two extra instructions. + +This document has the following sections: + + (*) System call register ABI + (*) CPU operating modes + (*) Internal kernel-mode register ABI + (*) Internal debug-mode register ABI + (*) Virtual interrupt handling + + +======================== +SYSTEM CALL REGISTER ABI +======================== + +When a system call is made, the following registers are effective: + + REGISTERS CALL RETURN + =============== ======================= ======================= + GR7 System call number Preserved + GR8 Syscall arg #1 Return value + GR9-GR13 Syscall arg #2-6 Preserved + + +=================== +CPU OPERATING MODES +=================== + +The FR-V CPU has three basic operating modes. In order of increasing capability: + + (1) User mode. + + Basic userspace running mode. + + (2) Kernel mode. + + Normal kernel mode. There are many additional control registers available that may be + accessed in this mode, in addition to all the stuff available to user mode. This has two + submodes: + + (a) Exceptions enabled (PSR.T == 1). + + Exceptions will invoke the appropriate normal kernel mode handler. On entry to the + handler, the PSR.T bit will be cleared. + + (b) Exceptions disabled (PSR.T == 0). + + No exceptions or interrupts may happen. Any mandatory exceptions will cause the CPU to + halt unless the CPU is told to jump into debug mode instead. + + (3) Debug mode. + + No exceptions may happen in this mode. Memory protection and management exceptions will be + flagged for later consideration, but the exception handler won't be invoked. Debugging traps + such as hardware breakpoints and watchpoints will be ignored. This mode is entered only by + debugging events obtained from the other two modes. + + All kernel mode registers may be accessed, plus a few extra debugging specific registers. + + +================================= +INTERNAL KERNEL-MODE REGISTER ABI +================================= + +There are a number of permanent register assignments that are set up by entry.S in the exception +prologue. Note that there is a complete set of exception prologues for each of user->kernel +transition and kernel->kernel transition. There are also user->debug and kernel->debug mode +transition prologues. + + + REGISTER FLAVOUR USE + =============== ======= ==================================================== + GR1 Supervisor stack pointer + GR15 Current thread info pointer + GR16 GP-Rel base register for small data + GR28 Current exception frame pointer (__frame) + GR29 Current task pointer (current) + GR30 Destroyed by kernel mode entry + GR31 NOMMU Destroyed by debug mode entry + GR31 MMU Destroyed by TLB miss kernel mode entry + CCR.ICC2 Virtual interrupt disablement tracking + CCCR.CC3 Cleared by exception prologue (atomic op emulation) + SCR0 MMU See mmu-layout.txt. + SCR1 MMU See mmu-layout.txt. + SCR2 MMU Save for EAR0 (destroyed by icache insns in debug mode) + SCR3 MMU Save for GR31 during debug exceptions + DAMR/IAMR NOMMU Fixed memory protection layout. + DAMR/IAMR MMU See mmu-layout.txt. + + +Certain registers are also used or modified across function calls: + + REGISTER CALL RETURN + =============== =============================== =============================== + GR0 Fixed Zero - + GR2 Function call frame pointer + GR3 Special Preserved + GR3-GR7 - Clobbered + GR8 Function call arg #1 Return value (or clobbered) + GR9 Function call arg #2 Return value MSW (or clobbered) + GR10-GR13 Function call arg #3-#6 Clobbered + GR14 - Clobbered + GR15-GR16 Special Preserved + GR17-GR27 - Preserved + GR28-GR31 Special Only accessed explicitly + LR Return address after CALL Clobbered + CCR/CCCR - Mostly Clobbered + + +================================ +INTERNAL DEBUG-MODE REGISTER ABI +================================ + +This is the same as the kernel-mode register ABI for functions calls. The difference is that in +debug-mode there's a different stack and a different exception frame. Almost all the global +registers from kernel-mode (including the stack pointer) may be changed. + + REGISTER FLAVOUR USE + =============== ======= ==================================================== + GR1 Debug stack pointer + GR16 GP-Rel base register for small data + GR31 Current debug exception frame pointer (__debug_frame) + SCR3 MMU Saved value of GR31 + + +Note that debug mode is able to interfere with the kernel's emulated atomic ops, so it must be +exceedingly careful not to do any that would interact with the main kernel in this regard. Hence +the debug mode code (gdbstub) is almost completely self-contained. The only external code used is +the sprintf family of functions. + +Futhermore, break.S is so complicated because single-step mode does not switch off on entry to an +exception. That means unless manually disabled, single-stepping will blithely go on stepping into +things like interrupts. See gdbstub.txt for more information. + + +========================== +VIRTUAL INTERRUPT HANDLING +========================== + +Because accesses to the PSR is so slow, and to disable interrupts we have to access it twice (once +to read and once to write), we don't actually disable interrupts at all if we don't have to. What +we do instead is use the ICC2 condition code flags to note virtual disablement, such that if we +then do take an interrupt, we note the flag, really disable interrupts, set another flag and resume +execution at the point the interrupt happened. Setting condition flags as a side effect of an +arithmetic or logical instruction is really fast. This use of the ICC2 only occurs within the +kernel - it does not affect userspace. + +The flags we use are: + + (*) CCR.ICC2.Z [Zero flag] + + Set to virtually disable interrupts, clear when interrupts are virtually enabled. Can be + modified by logical instructions without affecting the Carry flag. + + (*) CCR.ICC2.C [Carry flag] + + Clear to indicate hardware interrupts are really disabled, set otherwise. + + +What happens is this: + + (1) Normal kernel-mode operation. + + ICC2.Z is 0, ICC2.C is 1. + + (2) An interrupt occurs. The exception prologue examines ICC2.Z and determines that nothing needs + doing. This is done simply with an unlikely BEQ instruction. + + (3) The interrupts are disabled (local_irq_disable) + + ICC2.Z is set to 1. + + (4) If interrupts were then re-enabled (local_irq_enable): + + ICC2.Z would be set to 0. + + A TIHI #2 instruction (trap #2 if condition HI - Z==0 && C==0) would be used to trap if + interrupts were now virtually enabled, but physically disabled - which they're not, so the + trap isn't taken. The kernel would then be back to state (1). + + (5) An interrupt occurs. The exception prologue examines ICC2.Z and determines that the interrupt + shouldn't actually have happened. It jumps aside, and there disabled interrupts by setting + PSR.PIL to 14 and then it clears ICC2.C. + + (6) If interrupts were then saved and disabled again (local_irq_save): + + ICC2.Z would be shifted into the save variable and masked off (giving a 1). + + ICC2.Z would then be set to 1 (thus unchanged), and ICC2.C would be unaffected (ie: 0). + + (7) If interrupts were then restored from state (6) (local_irq_restore): + + ICC2.Z would be set to indicate the result of XOR'ing the saved value (ie: 1) with 1, which + gives a result of 0 - thus leaving ICC2.Z set. + + ICC2.C would remain unaffected (ie: 0). + + A TIHI #2 instruction would be used to again assay the current state, but this would do + nothing as Z==1. + + (8) If interrupts were then enabled (local_irq_enable): + + ICC2.Z would be cleared. ICC2.C would be left unaffected. Both flags would now be 0. + + A TIHI #2 instruction again issued to assay the current state would then trap as both Z==0 + [interrupts virtually enabled] and C==0 [interrupts really disabled] would then be true. + + (9) The trap #2 handler would simply enable hardware interrupts (set PSR.PIL to 0), set ICC2.C to + 1 and return. + +(10) Immediately upon returning, the pending interrupt would be taken. + +(11) The interrupt handler would take the path of actually processing the interrupt (ICC2.Z is + clear, BEQ fails as per step (2)). + +(12) The interrupt handler would then set ICC2.C to 1 since hardware interrupts are definitely + enabled - or else the kernel wouldn't be here. + +(13) On return from the interrupt handler, things would be back to state (1). + +This trap (#2) is only available in kernel mode. In user mode it will result in SIGILL. diff --git a/Documentation/hwmon/w83627hf b/Documentation/hwmon/w83627hf index 5d23776e9907..bbeaba680443 100644 --- a/Documentation/hwmon/w83627hf +++ b/Documentation/hwmon/w83627hf @@ -36,6 +36,10 @@ Module Parameters (default is 1) Use 'init=0' to bypass initializing the chip. Try this if your computer crashes when you load the module. +* reset: int + (default is 0) + The driver used to reset the chip on load, but does no more. Use + 'reset=1' to restore the old behavior. Report if you need to do this. Description ----------- diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 84370363da80..b874771385cd 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1133,6 +1133,8 @@ running once the system is up. Mechanism 1. conf2 [IA-32] Force use of PCI Configuration Mechanism 2. + nommconf [IA-32,X86_64] Disable use of MMCONFIG for PCI + Configuration nosort [IA-32] Don't sort PCI devices according to order given by the PCI BIOS. This sorting is done to get a device order compatible with @@ -1636,6 +1638,9 @@ running once the system is up. Format: <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]] + norandmaps Don't use address space randomization + Equivalent to echo 0 > /proc/sys/kernel/randomize_va_space + ______________________________________________________________________ Changelog: diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt index 0ea5a0c6e827..2c3b1eae4280 100644 --- a/Documentation/kprobes.txt +++ b/Documentation/kprobes.txt @@ -136,17 +136,20 @@ Kprobes, jprobes, and return probes are implemented on the following architectures: - i386 -- x86_64 (AMD-64, E64MT) +- x86_64 (AMD-64, EM64T) - ppc64 -- ia64 (Support for probes on certain instruction types is still in progress.) +- ia64 (Does not support probes on instruction slot1.) - sparc64 (Return probes not yet implemented.) 3. Configuring Kprobes When configuring the kernel using make menuconfig/xconfig/oldconfig, -ensure that CONFIG_KPROBES is set to "y". Under "Kernel hacking", -look for "Kprobes". You may have to enable "Kernel debugging" -(CONFIG_DEBUG_KERNEL) before you can enable Kprobes. +ensure that CONFIG_KPROBES is set to "y". Under "Instrumentation +Support", look for "Kprobes". + +So that you can load and unload Kprobes-based instrumentation modules, +make sure "Loadable module support" (CONFIG_MODULES) and "Module +unloading" (CONFIG_MODULE_UNLOAD) are set to "y". You may also want to ensure that CONFIG_KALLSYMS and perhaps even CONFIG_KALLSYMS_ALL are set to "y", since kallsyms_lookup_name() @@ -262,18 +265,18 @@ at any time after the probe has been registered. 5. Kprobes Features and Limitations -As of Linux v2.6.12, Kprobes allows multiple probes at the same -address. Currently, however, there cannot be multiple jprobes on -the same function at the same time. +Kprobes allows multiple probes at the same address. Currently, +however, there cannot be multiple jprobes on the same function at +the same time. In general, you can install a probe anywhere in the kernel. In particular, you can probe interrupt handlers. Known exceptions are discussed in this section. -For obvious reasons, it's a bad idea to install a probe in -the code that implements Kprobes (mostly kernel/kprobes.c and -arch/*/kernel/kprobes.c). A patch in the v2.6.13 timeframe instructs -Kprobes to reject such requests. +The register_*probe functions will return -EINVAL if you attempt +to install a probe in the code that implements Kprobes (mostly +kernel/kprobes.c and arch/*/kernel/kprobes.c, but also functions such +as do_page_fault and notifier_call_chain). If you install a probe in an inline-able function, Kprobes makes no attempt to chase down all inline instances of the function and @@ -290,18 +293,14 @@ from the accidental ones. Don't drink and probe. Kprobes makes no attempt to prevent probe handlers from stepping on each other -- e.g., probing printk() and then calling printk() from a -probe handler. As of Linux v2.6.12, if a probe handler hits a probe, -that second probe's handlers won't be run in that instance. - -In Linux v2.6.12 and previous versions, Kprobes' data structures are -protected by a single lock that is held during probe registration and -unregistration and while handlers are run. Thus, no two handlers -can run simultaneously. To improve scalability on SMP systems, -this restriction will probably be removed soon, in which case -multiple handlers (or multiple instances of the same handler) may -run concurrently on different CPUs. Code your handlers accordingly. - -Kprobes does not use semaphores or allocate memory except during +probe handler. If a probe handler hits a probe, that second probe's +handlers won't be run in that instance, and the kprobe.nmissed member +of the second probe will be incremented. + +As of Linux v2.6.15-rc1, multiple handlers (or multiple instances of +the same handler) may run concurrently on different CPUs. + +Kprobes does not use mutexes or allocate memory except during registration and unregistration. Probe handlers are run with preemption disabled. Depending on the @@ -316,11 +315,18 @@ address instead of the real return address for kretprobed functions. (As far as we can tell, __builtin_return_address() is used only for instrumentation and error reporting.) -If the number of times a function is called does not match the -number of times it returns, registering a return probe on that -function may produce undesirable results. We have the do_exit() -and do_execve() cases covered. do_fork() is not an issue. We're -unaware of other specific cases where this could be a problem. +If the number of times a function is called does not match the number +of times it returns, registering a return probe on that function may +produce undesirable results. We have the do_exit() case covered. +do_execve() and do_fork() are not an issue. We're unaware of other +specific cases where this could be a problem. + +If, upon entry to or exit from a function, the CPU is running on +a stack other than that of the current task, registering a return +probe on that function may produce undesirable results. For this +reason, Kprobes doesn't support return probes (or kprobes or jprobes) +on the x86_64 version of __switch_to(); the registration functions +return -EINVAL. 6. Probe Overhead @@ -347,14 +353,12 @@ k = 0.77 usec; j = 1.31; r = 1.26; kr = 1.45; jr = 1.99 7. TODO -a. SystemTap (http://sourceware.org/systemtap): Work in progress -to provide a simplified programming interface for probe-based -instrumentation. -b. Improved SMP scalability: Currently, work is in progress to handle -multiple kprobes in parallel. -c. Kernel return probes for sparc64. -d. Support for other architectures. -e. User-space probes. +a. SystemTap (http://sourceware.org/systemtap): Provides a simplified +programming interface for probe-based instrumentation. Try it out. +b. Kernel return probes for sparc64. +c. Support for other architectures. +d. User-space probes. +e. Watchpoint probes (which fire on data references). 8. Kprobes Example @@ -411,8 +415,7 @@ int init_module(void) printk("Couldn't find %s to plant kprobe\n", "do_fork"); return -1; } - ret = register_kprobe(&kp); - if (ret < 0) { + if ((ret = register_kprobe(&kp) < 0)) { printk("register_kprobe failed, returned %d\n", ret); return -1; } diff --git a/Documentation/mips/AU1xxx_IDE.README b/Documentation/mips/AU1xxx_IDE.README index a7e4c4ea3560..afb31c141d9d 100644 --- a/Documentation/mips/AU1xxx_IDE.README +++ b/Documentation/mips/AU1xxx_IDE.README @@ -95,11 +95,13 @@ CONFIG_BLK_DEV_IDEDMA_PCI=y CONFIG_IDEDMA_PCI_AUTO=y CONFIG_BLK_DEV_IDE_AU1XXX=y CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA=y -CONFIG_BLK_DEV_IDE_AU1XXX_BURSTABLE_ON=y CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ=128 CONFIG_BLK_DEV_IDEDMA=y CONFIG_IDEDMA_AUTO=y +Also define 'IDE_AU1XXX_BURSTMODE' in 'drivers/ide/mips/au1xxx-ide.c' to enable +the burst support on DBDMA controller. + If the used system need the USB support enable the following kernel configs for high IDE to USB throughput. @@ -115,6 +117,8 @@ CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ=128 CONFIG_BLK_DEV_IDEDMA=y CONFIG_IDEDMA_AUTO=y +Also undefine 'IDE_AU1XXX_BURSTMODE' in 'drivers/ide/mips/au1xxx-ide.c' to +disable the burst support on DBDMA controller. ADD NEW HARD DISC TO WHITE OR BLACK LIST ---------------------------------------- diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas index f8c16cbf56ba..2dafa63bd370 100644 --- a/Documentation/scsi/ChangeLog.megaraid_sas +++ b/Documentation/scsi/ChangeLog.megaraid_sas @@ -1,3 +1,26 @@ +1 Release Date : Wed Feb 03 14:31:44 PST 2006 - Sumant Patro <Sumant.Patro@lsil.com> +2 Current Version : 00.00.02.04 +3 Older Version : 00.00.02.04 + +i. Support for 1078 type (ppc IOP) controller, device id : 0x60 added. + During initialization, depending on the device id, the template members + are initialized with function pointers specific to the ppc or + xscale controllers. + + -Sumant Patro <Sumant.Patro@lsil.com> + +1 Release Date : Fri Feb 03 14:16:25 PST 2006 - Sumant Patro + <Sumant.Patro@lsil.com> +2 Current Version : 00.00.02.04 +3 Older Version : 00.00.02.02 +i. Register 16 byte CDB capability with scsi midlayer + + "Ths patch properly registers the 16 byte command length capability of the + megaraid_sas controlled hardware with the scsi midlayer. All megaraid_sas + hardware supports 16 byte CDB's." + + -Joshua Giles <joshua_giles@dell.com> + 1 Release Date : Mon Jan 23 14:09:01 PST 2006 - Sumant Patro <Sumant.Patro@lsil.com> 2 Current Version : 00.00.02.02 3 Older Version : 00.00.02.01 @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 16 -EXTRAVERSION =-rc3 +EXTRAVERSION =-rc4 NAME=Sliding Snow Leopard # *DOCUMENTATION* @@ -106,13 +106,12 @@ KBUILD_OUTPUT := $(shell cd $(KBUILD_OUTPUT) && /bin/pwd) $(if $(KBUILD_OUTPUT),, \ $(error output directory "$(saved-output)" does not exist)) -.PHONY: $(MAKECMDGOALS) cdbuilddir -$(MAKECMDGOALS) _all: cdbuilddir +.PHONY: $(MAKECMDGOALS) -cdbuilddir: +$(filter-out _all,$(MAKECMDGOALS)) _all: $(if $(KBUILD_VERBOSE:1=),@)$(MAKE) -C $(KBUILD_OUTPUT) \ KBUILD_SRC=$(CURDIR) \ - KBUILD_EXTMOD="$(KBUILD_EXTMOD)" -f $(CURDIR)/Makefile $(MAKECMDGOALS) + KBUILD_EXTMOD="$(KBUILD_EXTMOD)" -f $(CURDIR)/Makefile $@ # Leave processing to above invocation of make skip-makefile := 1 diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 8c3035d5ffc9..3173924a9b60 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -111,7 +111,7 @@ CALL(sys_statfs) /* 100 */ CALL(sys_fstatfs) CALL(sys_ni_syscall) - CALL(OBSOLETE(sys_socketcall)) + CALL(OBSOLETE(ABI(sys_socketcall, sys_oabi_socketcall))) CALL(sys_syslog) CALL(sys_setitimer) /* 105 */ CALL(sys_getitimer) diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index c45d10d07bde..68273b4dc882 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -23,6 +23,7 @@ #include <linux/root_dev.h> #include <linux/cpu.h> #include <linux/interrupt.h> +#include <linux/smp.h> #include <asm/cpu.h> #include <asm/elf.h> @@ -771,6 +772,10 @@ void __init setup_arch(char **cmdline_p) paging_init(&meminfo, mdesc); request_standard_resources(&meminfo, mdesc); +#ifdef CONFIG_SMP + smp_init_cpus(); +#endif + cpu_init(); /* diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 7338948bd7d3..02aa300c4633 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -338,7 +338,6 @@ void __init smp_prepare_boot_cpu(void) per_cpu(cpu_data, cpu).idle = current; - cpu_set(cpu, cpu_possible_map); cpu_set(cpu, cpu_present_map); cpu_set(cpu, cpu_online_map); } diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index 9d4b76409c64..8e2f9bc3368b 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -64,6 +64,7 @@ * sys_connect: * sys_sendmsg: * sys_sendto: + * sys_socketcall: * * struct sockaddr_un loses its padding with EABI. Since the size of the * structure is used as a validation test in unix_mkname(), we need to @@ -78,6 +79,7 @@ #include <linux/eventpoll.h> #include <linux/sem.h> #include <linux/socket.h> +#include <linux/net.h> #include <asm/ipc.h> #include <asm/uaccess.h> @@ -408,3 +410,31 @@ asmlinkage long sys_oabi_sendmsg(int fd, struct msghdr __user *msg, unsigned fla return sys_sendmsg(fd, msg, flags); } +asmlinkage long sys_oabi_socketcall(int call, unsigned long __user *args) +{ + unsigned long r = -EFAULT, a[6]; + + switch (call) { + case SYS_BIND: + if (copy_from_user(a, args, 3 * sizeof(long)) == 0) + r = sys_oabi_bind(a[0], (struct sockaddr __user *)a[1], a[2]); + break; + case SYS_CONNECT: + if (copy_from_user(a, args, 3 * sizeof(long)) == 0) + r = sys_oabi_connect(a[0], (struct sockaddr __user *)a[1], a[2]); + break; + case SYS_SENDTO: + if (copy_from_user(a, args, 6 * sizeof(long)) == 0) + r = sys_oabi_sendto(a[0], (void __user *)a[1], a[2], a[3], + (struct sockaddr __user *)a[4], a[5]); + break; + case SYS_SENDMSG: + if (copy_from_user(a, args, 3 * sizeof(long)) == 0) + r = sys_oabi_sendmsg(a[0], (struct msghdr __user *)a[1], a[2]); + break; + default: + r = sys_socketcall(call, args); + } + + return r; +} diff --git a/arch/arm/mach-integrator/platsmp.c b/arch/arm/mach-integrator/platsmp.c index ea10bd8c972c..1bc8534ef0c6 100644 --- a/arch/arm/mach-integrator/platsmp.c +++ b/arch/arm/mach-integrator/platsmp.c @@ -140,6 +140,18 @@ static void __init poke_milo(void) mb(); } +/* + * Initialise the CPU possible map early - this describes the CPUs + * which may be present or become present in the system. + */ +void __init smp_init_cpus(void) +{ + unsigned int i, ncores = get_core_count(); + + for (i = 0; i < ncores; i++) + cpu_set(i, cpu_possible_map); +} + void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int ncores = get_core_count(); @@ -176,14 +188,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus) max_cpus = ncores; /* - * Initialise the possible/present maps. - * cpu_possible_map describes the set of CPUs which may be present - * cpu_present_map describes the set of CPUs populated + * Initialise the present map, which describes the set of CPUs + * actually populated at the present time. */ - for (i = 0; i < max_cpus; i++) { - cpu_set(i, cpu_possible_map); + for (i = 0; i < max_cpus; i++) cpu_set(i, cpu_present_map); - } /* * Do we need any more CPUs? If so, then let them know where diff --git a/arch/arm/mach-iop3xx/iop321-setup.c b/arch/arm/mach-iop3xx/iop321-setup.c index e4f4c52d93d4..0ebbcb20c6ae 100644 --- a/arch/arm/mach-iop3xx/iop321-setup.c +++ b/arch/arm/mach-iop3xx/iop321-setup.c @@ -13,7 +13,6 @@ #include <linux/mm.h> #include <linux/init.h> #include <linux/config.h> -#include <linux/init.h> #include <linux/major.h> #include <linux/fs.h> #include <linux/platform_device.h> diff --git a/arch/arm/mach-iop3xx/iop331-setup.c b/arch/arm/mach-iop3xx/iop331-setup.c index 63585485123e..2d6abe5be14d 100644 --- a/arch/arm/mach-iop3xx/iop331-setup.c +++ b/arch/arm/mach-iop3xx/iop331-setup.c @@ -12,7 +12,6 @@ #include <linux/mm.h> #include <linux/init.h> #include <linux/config.h> -#include <linux/init.h> #include <linux/major.h> #include <linux/fs.h> #include <linux/platform_device.h> diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c index da9340a53434..f260a9d34f70 100644 --- a/arch/arm/mach-ixp4xx/nslu2-setup.c +++ b/arch/arm/mach-ixp4xx/nslu2-setup.c @@ -27,8 +27,6 @@ static struct flash_platform_data nslu2_flash_data = { }; static struct resource nslu2_flash_resource = { - .start = NSLU2_FLASH_BASE, - .end = NSLU2_FLASH_BASE + NSLU2_FLASH_SIZE, .flags = IORESOURCE_MEM, }; @@ -116,6 +114,10 @@ static void __init nslu2_init(void) { ixp4xx_sys_init(); + nslu2_flash_resource.start = IXP4XX_EXP_BUS_BASE(0); + nslu2_flash_resource.end = + IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1; + pm_power_off = nslu2_power_off; platform_add_devices(nslu2_devices, ARRAY_SIZE(nslu2_devices)); diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c index a8fbd76d8be5..b8484e15dacb 100644 --- a/arch/arm/mach-realview/platsmp.c +++ b/arch/arm/mach-realview/platsmp.c @@ -143,6 +143,18 @@ static void __init poke_milo(void) mb(); } +/* + * Initialise the CPU possible map early - this describes the CPUs + * which may be present or become present in the system. + */ +void __init smp_init_cpus(void) +{ + unsigned int i, ncores = get_core_count(); + + for (i = 0; i < ncores; i++) + cpu_set(i, cpu_possible_map); +} + void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int ncores = get_core_count(); @@ -179,14 +191,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus) local_timer_setup(cpu); /* - * Initialise the possible/present maps. - * cpu_possible_map describes the set of CPUs which may be present - * cpu_present_map describes the set of CPUs populated + * Initialise the present map, which describes the set of CPUs + * actually populated at the present time. */ - for (i = 0; i < max_cpus; i++) { - cpu_set(i, cpu_possible_map); + for (i = 0; i < max_cpus; i++) cpu_set(i, cpu_present_map); - } /* * Do we need any more CPUs? If so, then let them know where diff --git a/arch/arm/plat-omap/pm.c b/arch/arm/plat-omap/pm.c index 1a24e2c10714..093efd786f21 100644 --- a/arch/arm/plat-omap/pm.c +++ b/arch/arm/plat-omap/pm.c @@ -38,7 +38,6 @@ #include <linux/pm.h> #include <linux/sched.h> #include <linux/proc_fs.h> -#include <linux/pm.h> #include <linux/interrupt.h> #include <asm/io.h> diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index 60a617aff8ba..e08383712370 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig @@ -25,6 +25,10 @@ config GENERIC_HARDIRQS bool default n +config TIME_LOW_RES + bool + default y + mainmenu "Fujitsu FR-V Kernel Configuration" source "init/Kconfig" diff --git a/arch/frv/Makefile b/arch/frv/Makefile index 90c0fb8d9dc3..d163747d17c0 100644 --- a/arch/frv/Makefile +++ b/arch/frv/Makefile @@ -81,7 +81,7 @@ endif # - reserve CC3 for use with atomic ops # - all the extra registers are dealt with only at context switch time CFLAGS += -mno-fdpic -mgpr-32 -msoft-float -mno-media -CFLAGS += -ffixed-fcc3 -ffixed-cc3 -ffixed-gr15 +CFLAGS += -ffixed-fcc3 -ffixed-cc3 -ffixed-gr15 -ffixed-icc2 AFLAGS += -mno-fdpic ASFLAGS += -mno-fdpic diff --git a/arch/frv/kernel/break.S b/arch/frv/kernel/break.S index 33233dc23e29..687c48d62dde 100644 --- a/arch/frv/kernel/break.S +++ b/arch/frv/kernel/break.S @@ -200,12 +200,20 @@ __break_step: movsg bpcsr,gr2 sethi.p %hi(__entry_kernel_external_interrupt),gr3 setlo %lo(__entry_kernel_external_interrupt),gr3 - subcc gr2,gr3,gr0,icc0 + subcc.p gr2,gr3,gr0,icc0 + sethi %hi(__entry_uspace_external_interrupt),gr3 + setlo.p %lo(__entry_uspace_external_interrupt),gr3 beq icc0,#2,__break_step_kernel_external_interrupt - sethi.p %hi(__entry_uspace_external_interrupt),gr3 - setlo %lo(__entry_uspace_external_interrupt),gr3 - subcc gr2,gr3,gr0,icc0 + subcc.p gr2,gr3,gr0,icc0 + sethi %hi(__entry_kernel_external_interrupt_virtually_disabled),gr3 + setlo.p %lo(__entry_kernel_external_interrupt_virtually_disabled),gr3 beq icc0,#2,__break_step_uspace_external_interrupt + subcc.p gr2,gr3,gr0,icc0 + sethi %hi(__entry_kernel_external_interrupt_virtual_reenable),gr3 + setlo.p %lo(__entry_kernel_external_interrupt_virtual_reenable),gr3 + beq icc0,#2,__break_step_kernel_external_interrupt_virtually_disabled + subcc gr2,gr3,gr0,icc0 + beq icc0,#2,__break_step_kernel_external_interrupt_virtual_reenable LEDS 0x2007,gr2 @@ -254,6 +262,9 @@ __break_step_kernel_softprog_interrupt: # step through an external interrupt from kernel mode .globl __break_step_kernel_external_interrupt __break_step_kernel_external_interrupt: + # deal with virtual interrupt disablement + beq icc2,#0,__break_step_kernel_external_interrupt_virtually_disabled + sethi.p %hi(__entry_kernel_external_interrupt_reentry),gr3 setlo %lo(__entry_kernel_external_interrupt_reentry),gr3 @@ -294,6 +305,64 @@ __break_return_as_kernel_prologue: #endif rett #1 +# we single-stepped into an interrupt handler whilst interrupts were merely virtually disabled +# need to really disable interrupts, set flag, fix up and return +__break_step_kernel_external_interrupt_virtually_disabled: + movsg psr,gr2 + andi gr2,#~PSR_PIL,gr2 + ori gr2,#PSR_PIL_14,gr2 /* debugging interrupts only */ + movgs gr2,psr + + ldi @(gr31,#REG_CCR),gr3 + movgs gr3,ccr + subcc.p gr0,gr0,gr0,icc2 /* leave Z set, clear C */ + + # exceptions must've been enabled and we must've been in supervisor mode + setlos BPSR_BET|BPSR_BS,gr3 + movgs gr3,bpsr + + # return to where the interrupt happened + movsg pcsr,gr2 + movgs gr2,bpcsr + + lddi.p @(gr31,#REG_GR(2)),gr2 + + xor gr31,gr31,gr31 + movgs gr0,brr +#ifdef CONFIG_MMU + movsg scr3,gr31 +#endif + rett #1 + +# we stepped through into the virtual interrupt reenablement trap +# +# we also want to single step anyway, but after fixing up so that we get an event on the +# instruction after the broken-into exception returns + .globl __break_step_kernel_external_interrupt_virtual_reenable +__break_step_kernel_external_interrupt_virtual_reenable: + movsg psr,gr2 + andi gr2,#~PSR_PIL,gr2 + movgs gr2,psr + + ldi @(gr31,#REG_CCR),gr3 + movgs gr3,ccr + subicc gr0,#1,gr0,icc2 /* clear Z, set C */ + + # save the adjusted ICC2 + movsg ccr,gr3 + sti gr3,@(gr31,#REG_CCR) + + # exceptions must've been enabled and we must've been in supervisor mode + setlos BPSR_BET|BPSR_BS,gr3 + movgs gr3,bpsr + + # return to where the trap happened + movsg pcsr,gr2 + movgs gr2,bpcsr + + # and then process the single step + bra __break_continue + # step through an internal exception from uspace mode .globl __break_step_uspace_softprog_interrupt __break_step_uspace_softprog_interrupt: diff --git a/arch/frv/kernel/entry-table.S b/arch/frv/kernel/entry-table.S index 9b9243e2103c..81568acea9cd 100644 --- a/arch/frv/kernel/entry-table.S +++ b/arch/frv/kernel/entry-table.S @@ -116,6 +116,8 @@ __break_kerneltrap_fixup_table: .long __break_step_uspace_external_interrupt .section .trap.kernel .org \tbr_tt + # deal with virtual interrupt disablement + beq icc2,#0,__entry_kernel_external_interrupt_virtually_disabled bra __entry_kernel_external_interrupt .section .trap.fixup.kernel .org \tbr_tt >> 2 @@ -259,25 +261,52 @@ __trap_fixup_kernel_data_tlb_miss: .org TBR_TT_TRAP0 .rept 127 bra __entry_uspace_softprog_interrupt - bra __break_step_uspace_softprog_interrupt - .long 0,0 + .long 0,0,0 .endr .org TBR_TT_BREAK bra __entry_break .long 0,0,0 + .section .trap.fixup.user + .org TBR_TT_TRAP0 >> 2 + .rept 127 + .long __break_step_uspace_softprog_interrupt + .endr + .org TBR_TT_BREAK >> 2 + .long 0 + # miscellaneous kernel mode entry points .section .trap.kernel .org TBR_TT_TRAP0 - .rept 127 bra __entry_kernel_softprog_interrupt - bra __break_step_kernel_softprog_interrupt - .long 0,0 + .org TBR_TT_TRAP1 + bra __entry_kernel_softprog_interrupt + + # trap #2 in kernel - reenable interrupts + .org TBR_TT_TRAP2 + bra __entry_kernel_external_interrupt_virtual_reenable + + # miscellaneous kernel traps + .org TBR_TT_TRAP3 + .rept 124 + bra __entry_kernel_softprog_interrupt + .long 0,0,0 .endr .org TBR_TT_BREAK bra __entry_break .long 0,0,0 + .section .trap.fixup.kernel + .org TBR_TT_TRAP0 >> 2 + .long __break_step_kernel_softprog_interrupt + .long __break_step_kernel_softprog_interrupt + .long __break_step_kernel_external_interrupt_virtual_reenable + .rept 124 + .long __break_step_kernel_softprog_interrupt + .endr + .org TBR_TT_BREAK >> 2 + .long 0 + # miscellaneous debug mode entry points .section .trap.break .org TBR_TT_BREAK diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S index 5f6548388b74..1d21c8d34d8a 100644 --- a/arch/frv/kernel/entry.S +++ b/arch/frv/kernel/entry.S @@ -141,7 +141,10 @@ __entry_uspace_external_interrupt_reentry: movsg gner0,gr4 movsg gner1,gr5 - stdi gr4,@(gr28,#REG_GNER0) + stdi.p gr4,@(gr28,#REG_GNER0) + + # interrupts start off fully disabled in the interrupt handler + subcc gr0,gr0,gr0,icc2 /* set Z and clear C */ # set up kernel global registers sethi.p %hi(__kernel_current_task),gr5 @@ -193,9 +196,8 @@ __entry_uspace_external_interrupt_reentry: .type __entry_kernel_external_interrupt,@function __entry_kernel_external_interrupt: LEDS 0x6210 - - sub sp,gr15,gr31 - LEDS32 +// sub sp,gr15,gr31 +// LEDS32 # set up the stack pointer or.p sp,gr0,gr30 @@ -231,7 +233,10 @@ __entry_kernel_external_interrupt_reentry: stdi gr24,@(gr28,#REG_GR(24)) stdi gr26,@(gr28,#REG_GR(26)) sti gr29,@(gr28,#REG_GR(29)) - stdi gr30,@(gr28,#REG_GR(30)) + stdi.p gr30,@(gr28,#REG_GR(30)) + + # note virtual interrupts will be fully enabled upon return + subicc gr0,#1,gr0,icc2 /* clear Z, set C */ movsg tbr ,gr20 movsg psr ,gr22 @@ -267,7 +272,10 @@ __entry_kernel_external_interrupt_reentry: movsg gner0,gr4 movsg gner1,gr5 - stdi gr4,@(gr28,#REG_GNER0) + stdi.p gr4,@(gr28,#REG_GNER0) + + # interrupts start off fully disabled in the interrupt handler + subcc gr0,gr0,gr0,icc2 /* set Z and clear C */ # set the return address sethi.p %hi(__entry_return_from_kernel_interrupt),gr4 @@ -291,6 +299,45 @@ __entry_kernel_external_interrupt_reentry: .size __entry_kernel_external_interrupt,.-__entry_kernel_external_interrupt +############################################################################### +# +# deal with interrupts that were actually virtually disabled +# - we need to really disable them, flag the fact and return immediately +# - if you change this, you must alter break.S also +# +############################################################################### + .balign L1_CACHE_BYTES + .globl __entry_kernel_external_interrupt_virtually_disabled + .type __entry_kernel_external_interrupt_virtually_disabled,@function +__entry_kernel_external_interrupt_virtually_disabled: + movsg psr,gr30 + andi gr30,#~PSR_PIL,gr30 + ori gr30,#PSR_PIL_14,gr30 ; debugging interrupts only + movgs gr30,psr + subcc gr0,gr0,gr0,icc2 ; leave Z set, clear C + rett #0 + + .size __entry_kernel_external_interrupt_virtually_disabled,.-__entry_kernel_external_interrupt_virtually_disabled + +############################################################################### +# +# deal with re-enablement of interrupts that were pending when virtually re-enabled +# - set ICC2.C, re-enable the real interrupts and return +# - we can clear ICC2.Z because we shouldn't be here if it's not 0 [due to TIHI] +# - if you change this, you must alter break.S also +# +############################################################################### + .balign L1_CACHE_BYTES + .globl __entry_kernel_external_interrupt_virtual_reenable + .type __entry_kernel_external_interrupt_virtual_reenable,@function +__entry_kernel_external_interrupt_virtual_reenable: + movsg psr,gr30 + andi gr30,#~PSR_PIL,gr30 ; re-enable interrupts + movgs gr30,psr + subicc gr0,#1,gr0,icc2 ; clear Z, set C + rett #0 + + .size __entry_kernel_external_interrupt_virtual_reenable,.-__entry_kernel_external_interrupt_virtual_reenable ############################################################################### # @@ -335,6 +382,7 @@ __entry_uspace_softprog_interrupt_reentry: sethi.p %hi(__entry_return_from_user_exception),gr23 setlo %lo(__entry_return_from_user_exception),gr23 + bra __entry_common .size __entry_uspace_softprog_interrupt,.-__entry_uspace_softprog_interrupt @@ -495,7 +543,10 @@ __entry_common: movsg gner0,gr4 movsg gner1,gr5 - stdi gr4,@(gr28,#REG_GNER0) + stdi.p gr4,@(gr28,#REG_GNER0) + + # set up virtual interrupt disablement + subicc gr0,#1,gr0,icc2 /* clear Z flag, set C flag */ # set up kernel global registers sethi.p %hi(__kernel_current_task),gr5 @@ -1418,11 +1469,27 @@ sys_call_table: .long sys_add_key .long sys_request_key .long sys_keyctl - .long sys_ni_syscall // sys_vperfctr_open - .long sys_ni_syscall // sys_vperfctr_control /* 290 */ - .long sys_ni_syscall // sys_vperfctr_unlink - .long sys_ni_syscall // sys_vperfctr_iresume - .long sys_ni_syscall // sys_vperfctr_read + .long sys_ioprio_set + .long sys_ioprio_get /* 290 */ + .long sys_inotify_init + .long sys_inotify_add_watch + .long sys_inotify_rm_watch + .long sys_migrate_pages + .long sys_openat /* 295 */ + .long sys_mkdirat + .long sys_mknodat + .long sys_fchownat + .long sys_futimesat + .long sys_newfstatat /* 300 */ + .long sys_unlinkat + .long sys_renameat + .long sys_linkat + .long sys_symlinkat + .long sys_readlinkat /* 305 */ + .long sys_fchmodat + .long sys_faccessat + .long sys_pselect6 + .long sys_ppoll syscall_table_size = (. - sys_call_table) diff --git a/arch/frv/kernel/head.S b/arch/frv/kernel/head.S index c73b4fe9f6ca..29a5265489b7 100644 --- a/arch/frv/kernel/head.S +++ b/arch/frv/kernel/head.S @@ -513,6 +513,9 @@ __head_mmu_enabled: movgs gr0,ccr movgs gr0,cccr + # initialise the virtual interrupt handling + subcc gr0,gr0,gr0,icc2 /* set Z, clear C */ + #ifdef CONFIG_MMU movgs gr3,scr2 movgs gr3,scr3 diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c index 59580c59c62c..27ab4c30aac6 100644 --- a/arch/frv/kernel/irq.c +++ b/arch/frv/kernel/irq.c @@ -287,18 +287,11 @@ asmlinkage void do_IRQ(void) struct irq_source *source; int level, cpu; + irq_enter(); + level = (__frame->tbr >> 4) & 0xf; cpu = smp_processor_id(); -#if 0 - { - static u32 irqcount; - *(volatile u32 *) 0xe1200004 = ~((irqcount++ << 8) | level); - *(volatile u16 *) 0xffc00100 = (u16) ~0x9999; - mb(); - } -#endif - if ((unsigned long) __frame - (unsigned long) (current + 1) < 512) BUG(); @@ -308,40 +301,12 @@ asmlinkage void do_IRQ(void) kstat_this_cpu.irqs[level]++; - irq_enter(); - for (source = frv_irq_levels[level].sources; source; source = source->next) source->doirq(source); - irq_exit(); - __clr_MASK(level); - /* only process softirqs if we didn't interrupt another interrupt handler */ - if ((__frame->psr & PSR_PIL) == PSR_PIL_0) - if (local_softirq_pending()) - do_softirq(); - -#ifdef CONFIG_PREEMPT - local_irq_disable(); - while (--current->preempt_count == 0) { - if (!(__frame->psr & PSR_S) || - current->need_resched == 0 || - in_interrupt()) - break; - current->preempt_count++; - local_irq_enable(); - preempt_schedule(); - local_irq_disable(); - } -#endif - -#if 0 - { - *(volatile u16 *) 0xffc00100 = (u16) ~0x6666; - mb(); - } -#endif + irq_exit(); } /* end do_IRQ() */ diff --git a/arch/frv/mm/kmap.c b/arch/frv/mm/kmap.c index 539f45e6d15e..c54f18e65ea6 100644 --- a/arch/frv/mm/kmap.c +++ b/arch/frv/mm/kmap.c @@ -44,15 +44,6 @@ void iounmap(void *addr) } /* - * __iounmap unmaps nearly everything, so be careful - * it doesn't free currently pointer/page tables anymore but it - * wans't used anyway and might be added later. - */ -void __iounmap(void *addr, unsigned long size) -{ -} - -/* * Set new cache mode for some kernel address space. * The caller must push data for that range itself, if such data may already * be in the cache. diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index 80940d712acf..98308b018a35 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig @@ -33,6 +33,10 @@ config GENERIC_CALIBRATE_DELAY bool default y +config TIME_LOW_RES + bool + default y + config ISA bool default y diff --git a/arch/h8300/Kconfig.cpu b/arch/h8300/Kconfig.cpu index a380167a13cf..582797db9603 100644 --- a/arch/h8300/Kconfig.cpu +++ b/arch/h8300/Kconfig.cpu @@ -169,7 +169,7 @@ endif config CPU_H8300H bool - depends on (H8002 || H83007 || H83048 || H83068) + depends on (H83002 || H83007 || H83048 || H83068) default y config CPU_H8S diff --git a/arch/i386/boot/.gitignore b/arch/i386/boot/.gitignore new file mode 100644 index 000000000000..495f20c085de --- /dev/null +++ b/arch/i386/boot/.gitignore @@ -0,0 +1,3 @@ +bootsect +bzImage +setup diff --git a/arch/i386/boot/tools/.gitignore b/arch/i386/boot/tools/.gitignore new file mode 100644 index 000000000000..378eac25d311 --- /dev/null +++ b/arch/i386/boot/tools/.gitignore @@ -0,0 +1 @@ +build diff --git a/arch/i386/kernel/.gitignore b/arch/i386/kernel/.gitignore new file mode 100644 index 000000000000..40836ad9079c --- /dev/null +++ b/arch/i386/kernel/.gitignore @@ -0,0 +1 @@ +vsyscall.lds diff --git a/arch/i386/kernel/cpu/transmeta.c b/arch/i386/kernel/cpu/transmeta.c index bdbeb77f4e22..7214c9b577ab 100644 --- a/arch/i386/kernel/cpu/transmeta.c +++ b/arch/i386/kernel/cpu/transmeta.c @@ -1,4 +1,5 @@ #include <linux/kernel.h> +#include <linux/mm.h> #include <linux/init.h> #include <asm/processor.h> #include <asm/msr.h> diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c index 7c86e3c5f1c1..a7f5a2aceba2 100644 --- a/arch/i386/kernel/timers/timer_tsc.c +++ b/arch/i386/kernel/timers/timer_tsc.c @@ -282,6 +282,10 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, if (val != CPUFREQ_RESUMECHANGE) write_seqlock_irq(&xtime_lock); if (!ref_freq) { + if (!freq->old){ + ref_freq = freq->new; + goto end; + } ref_freq = freq->old; loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy; #ifndef CONFIG_SMP @@ -307,6 +311,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, #endif } +end: if (val != CPUFREQ_RESUMECHANGE) write_sequnlock_irq(&xtime_lock); diff --git a/arch/i386/kernel/vsyscall-sysenter.S b/arch/i386/kernel/vsyscall-sysenter.S index 4daefb2ec1b2..76b728159403 100644 --- a/arch/i386/kernel/vsyscall-sysenter.S +++ b/arch/i386/kernel/vsyscall-sysenter.S @@ -7,6 +7,21 @@ * for details. */ +/* + * The caller puts arg2 in %ecx, which gets pushed. The kernel will use + * %ecx itself for arg2. The pushing is because the sysexit instruction + * (found in entry.S) requires that we clobber %ecx with the desired %esp. + * User code might expect that %ecx is unclobbered though, as it would be + * for returning via the iret instruction, so we must push and pop. + * + * The caller puts arg3 in %edx, which the sysexit instruction requires + * for %eip. Thus, exactly as for arg2, we must push and pop. + * + * Arg6 is different. The caller puts arg6 in %ebp. Since the sysenter + * instruction clobbers %esp, the user's %esp won't even survive entry + * into the kernel. We store %esp in %ebp. Code in entry.S must fetch + * arg6 from the stack. + */ .text .globl __kernel_vsyscall .type __kernel_vsyscall,@function diff --git a/arch/i386/oprofile/backtrace.c b/arch/i386/oprofile/backtrace.c index acc18138fb22..c049ce414f01 100644 --- a/arch/i386/oprofile/backtrace.c +++ b/arch/i386/oprofile/backtrace.c @@ -20,7 +20,20 @@ struct frame_head { } __attribute__((packed)); static struct frame_head * -dump_backtrace(struct frame_head * head) +dump_kernel_backtrace(struct frame_head * head) +{ + oprofile_add_trace(head->ret); + + /* frame pointers should strictly progress back up the stack + * (towards higher addresses) */ + if (head >= head->ebp) + return NULL; + + return head->ebp; +} + +static struct frame_head * +dump_user_backtrace(struct frame_head * head) { struct frame_head bufhead[2]; @@ -105,10 +118,10 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth) if (!user_mode_vm(regs)) { while (depth-- && valid_kernel_stack(head, regs)) - head = dump_backtrace(head); + head = dump_kernel_backtrace(head); return; } while (depth-- && head) - head = dump_backtrace(head); + head = dump_user_backtrace(head); } diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index d2702c419cf8..ecd44bdc8394 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -761,6 +761,59 @@ int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid) return (0); } +int additional_cpus __initdata = -1; + +static __init int setup_additional_cpus(char *s) +{ + if (s) + additional_cpus = simple_strtol(s, NULL, 0); + + return 0; +} + +early_param("additional_cpus", setup_additional_cpus); + +/* + * cpu_possible_map should be static, it cannot change as cpu's + * are onlined, or offlined. The reason is per-cpu data-structures + * are allocated by some modules at init time, and dont expect to + * do this dynamically on cpu arrival/departure. + * cpu_present_map on the other hand can change dynamically. + * In case when cpu_hotplug is not compiled, then we resort to current + * behaviour, which is cpu_possible == cpu_present. + * - Ashok Raj + * + * Three ways to find out the number of additional hotplug CPUs: + * - If the BIOS specified disabled CPUs in ACPI/mptables use that. + * - The user can overwrite it with additional_cpus=NUM + * - Otherwise don't reserve additional CPUs. + */ +__init void prefill_possible_map(void) +{ + int i; + int possible, disabled_cpus; + + disabled_cpus = total_cpus - available_cpus; + + if (additional_cpus == -1) { + if (disabled_cpus > 0) + additional_cpus = disabled_cpus; + else + additional_cpus = 0; + } + + possible = available_cpus + additional_cpus; + + if (possible > NR_CPUS) + possible = NR_CPUS; + + printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", + possible, max((possible - available_cpus), 0)); + + for (i = 0; i < possible; i++) + cpu_set(i, cpu_possible_map); +} + int acpi_map_lsapic(acpi_handle handle, int *pcpu) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 27b222c277e4..930fdfca6ddb 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -569,7 +569,9 @@ GLOBAL_ENTRY(ia64_trace_syscall) .mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8 .mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10 br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value -.ret3: br.cond.sptk .work_pending_syscall_end +.ret3: +(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk + br.cond.sptk .work_pending_syscall_end strace_error: ld8 r3=[r2] // load pt_regs.r8 diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c index e72de580ebbf..bbcfd08378a6 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c @@ -10,23 +10,8 @@ #include <linux/string.h> EXPORT_SYMBOL(memset); -EXPORT_SYMBOL(memchr); -EXPORT_SYMBOL(memcmp); EXPORT_SYMBOL(memcpy); -EXPORT_SYMBOL(memmove); -EXPORT_SYMBOL(memscan); -EXPORT_SYMBOL(strcat); -EXPORT_SYMBOL(strchr); -EXPORT_SYMBOL(strcmp); -EXPORT_SYMBOL(strcpy); EXPORT_SYMBOL(strlen); -EXPORT_SYMBOL(strncat); -EXPORT_SYMBOL(strncmp); -EXPORT_SYMBOL(strncpy); -EXPORT_SYMBOL(strnlen); -EXPORT_SYMBOL(strrchr); -EXPORT_SYMBOL(strstr); -EXPORT_SYMBOL(strpbrk); #include <asm/checksum.h> EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 35f7835294a3..3258e09278d0 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -430,6 +430,7 @@ setup_arch (char **cmdline_p) if (early_console_setup(*cmdline_p) == 0) mark_bsp_online(); + parse_early_param(); #ifdef CONFIG_ACPI /* Initialize the ACPI boot-time table parser */ acpi_table_init(); @@ -688,6 +689,9 @@ void setup_per_cpu_areas (void) { /* start_kernel() requires this... */ +#ifdef CONFIG_ACPI_HOTPLUG_CPU + prefill_possible_map(); +#endif } /* diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 8f44e7d2df66..b681ef34a86e 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -129,7 +129,7 @@ DEFINE_PER_CPU(int, cpu_state); /* Bitmasks of currently online, and possible CPUs */ cpumask_t cpu_online_map; EXPORT_SYMBOL(cpu_online_map); -cpumask_t cpu_possible_map; +cpumask_t cpu_possible_map = CPU_MASK_NONE; EXPORT_SYMBOL(cpu_possible_map); cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; @@ -506,9 +506,6 @@ smp_build_cpu_map (void) for (cpu = 0; cpu < NR_CPUS; cpu++) { ia64_cpu_to_sapicid[cpu] = -1; -#ifdef CONFIG_HOTPLUG_CPU - cpu_set(cpu, cpu_possible_map); -#endif } ia64_cpu_to_sapicid[0] = boot_cpu_id; diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index a094ec49ccfa..307d01e15b2e 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -250,32 +250,27 @@ time_init (void) set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); } -#define SMALLUSECS 100 - -void -udelay (unsigned long usecs) +/* + * Generic udelay assumes that if preemption is allowed and the thread + * migrates to another CPU, that the ITC values are synchronized across + * all CPUs. + */ +static void +ia64_itc_udelay (unsigned long usecs) { - unsigned long start; - unsigned long cycles; - unsigned long smallusecs; + unsigned long start = ia64_get_itc(); + unsigned long end = start + usecs*local_cpu_data->cyc_per_usec; - /* - * Execute the non-preemptible delay loop (because the ITC might - * not be synchronized between CPUS) in relatively short time - * chunks, allowing preemption between the chunks. - */ - while (usecs > 0) { - smallusecs = (usecs > SMALLUSECS) ? SMALLUSECS : usecs; - preempt_disable(); - cycles = smallusecs*local_cpu_data->cyc_per_usec; - start = ia64_get_itc(); + while (time_before(ia64_get_itc(), end)) + cpu_relax(); +} - while (ia64_get_itc() - start < cycles) - cpu_relax(); +void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay; - preempt_enable(); - usecs -= smallusecs; - } +void +udelay (unsigned long usecs) +{ + (*ia64_udelay)(usecs); } EXPORT_SYMBOL(udelay); diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index 55391901b013..dabd6c32641e 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -16,6 +16,7 @@ #include <linux/module.h> /* for EXPORT_SYMBOL */ #include <linux/hardirq.h> #include <linux/kprobes.h> +#include <linux/delay.h> /* for ssleep() */ #include <asm/fpswa.h> #include <asm/ia32.h> @@ -116,6 +117,13 @@ die (const char *str, struct pt_regs *regs, long err) bust_spinlocks(0); die.lock_owner = -1; spin_unlock_irq(&die.lock); + + if (panic_on_oops) { + printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n"); + ssleep(5); + panic("Fatal exception"); + } + do_exit(SIGSEGV); } diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index 3437c2390429..3edef0d32f86 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c @@ -23,6 +23,10 @@ #include "xtalk/hubdev.h" #include "xtalk/xwidgetdev.h" + +extern void sn_init_cpei_timer(void); +extern void register_sn_procfs(void); + static struct list_head sn_sysdata_list; /* sysdata list struct */ @@ -40,12 +44,12 @@ struct brick { struct slab_info slab_info[MAX_SLABS + 1]; }; -int sn_ioif_inited = 0; /* SN I/O infrastructure initialized? */ +int sn_ioif_inited; /* SN I/O infrastructure initialized? */ struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */ -static int max_segment_number = 0; /* Default highest segment number */ -static int max_pcibus_number = 255; /* Default highest pci bus number */ +static int max_segment_number; /* Default highest segment number */ +static int max_pcibus_number = 255; /* Default highest pci bus number */ /* * Hooks and struct for unsupported pci providers @@ -84,7 +88,6 @@ static inline u64 sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num, u64 address) { - struct ia64_sal_retval ret_stuff; ret_stuff.status = 0; ret_stuff.v0 = 0; @@ -94,7 +97,6 @@ sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num, (u64) nasid, (u64) widget_num, (u64) device_num, (u64) address, 0, 0, 0); return ret_stuff.status; - } /* @@ -102,7 +104,6 @@ sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num, */ static inline u64 sal_get_hubdev_info(u64 handle, u64 address) { - struct ia64_sal_retval ret_stuff; ret_stuff.status = 0; ret_stuff.v0 = 0; @@ -118,7 +119,6 @@ static inline u64 sal_get_hubdev_info(u64 handle, u64 address) */ static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address) { - struct ia64_sal_retval ret_stuff; ret_stuff.status = 0; ret_stuff.v0 = 0; @@ -215,7 +215,7 @@ static void __init sn_fixup_ionodes(void) struct hubdev_info *hubdev; u64 status; u64 nasid; - int i, widget, device; + int i, widget, device, size; /* * Get SGI Specific HUB chipset information. @@ -251,48 +251,37 @@ static void __init sn_fixup_ionodes(void) if (!hubdev->hdi_flush_nasid_list.widget_p) continue; + size = (HUB_WIDGET_ID_MAX + 1) * + sizeof(struct sn_flush_device_kernel *); hubdev->hdi_flush_nasid_list.widget_p = - kmalloc((HUB_WIDGET_ID_MAX + 1) * - sizeof(struct sn_flush_device_kernel *), - GFP_KERNEL); - memset(hubdev->hdi_flush_nasid_list.widget_p, 0x0, - (HUB_WIDGET_ID_MAX + 1) * - sizeof(struct sn_flush_device_kernel *)); + kzalloc(size, GFP_KERNEL); + if (!hubdev->hdi_flush_nasid_list.widget_p) + BUG(); for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) { - sn_flush_device_kernel = kmalloc(DEV_PER_WIDGET * - sizeof(struct - sn_flush_device_kernel), - GFP_KERNEL); + size = DEV_PER_WIDGET * + sizeof(struct sn_flush_device_kernel); + sn_flush_device_kernel = kzalloc(size, GFP_KERNEL); if (!sn_flush_device_kernel) BUG(); - memset(sn_flush_device_kernel, 0x0, - DEV_PER_WIDGET * - sizeof(struct sn_flush_device_kernel)); dev_entry = sn_flush_device_kernel; for (device = 0; device < DEV_PER_WIDGET; device++,dev_entry++) { - dev_entry->common = kmalloc(sizeof(struct - sn_flush_device_common), - GFP_KERNEL); + size = sizeof(struct sn_flush_device_common); + dev_entry->common = kzalloc(size, GFP_KERNEL); if (!dev_entry->common) BUG(); - memset(dev_entry->common, 0x0, sizeof(struct - sn_flush_device_common)); if (sn_prom_feature_available( PRF_DEVICE_FLUSH_LIST)) status = sal_get_device_dmaflush_list( - nasid, - widget, - device, - (u64)(dev_entry->common)); + nasid, widget, device, + (u64)(dev_entry->common)); else status = sn_device_fixup_war(nasid, - widget, - device, - dev_entry->common); + widget, device, + dev_entry->common); if (status != SALRET_OK) panic("SAL call failed: %s\n", ia64_sal_strerror(status)); @@ -383,13 +372,12 @@ void sn_pci_fixup_slot(struct pci_dev *dev) pci_dev_get(dev); /* for the sysdata pointer */ pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL); - if (pcidev_info <= 0) + if (!pcidev_info) BUG(); /* Cannot afford to run out of memory */ - sn_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_KERNEL); - if (sn_irq_info <= 0) + sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL); + if (!sn_irq_info) BUG(); /* Cannot afford to run out of memory */ - memset(sn_irq_info, 0, sizeof(struct sn_irq_info)); /* Call to retrieve pci device information needed by kernel. */ status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number, @@ -482,13 +470,13 @@ void sn_pci_fixup_slot(struct pci_dev *dev) */ void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus) { - int status = 0; + int status; int nasid, cnode; struct pci_controller *controller; struct sn_pci_controller *sn_controller; struct pcibus_bussoft *prom_bussoft_ptr; struct hubdev_info *hubdev_info; - void *provider_soft = NULL; + void *provider_soft; struct sn_pcibus_provider *provider; status = sal_get_pcibus_info((u64) segment, (u64) busnum, @@ -535,6 +523,8 @@ void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus) bus->sysdata = controller; if (provider->bus_fixup) provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr, controller); + else + provider_soft = NULL; if (provider_soft == NULL) { /* fixup failed or not applicable */ @@ -638,13 +628,8 @@ void sn_bus_free_sysdata(void) static int __init sn_pci_init(void) { - int i = 0; - int j = 0; + int i, j; struct pci_dev *pci_dev = NULL; - extern void sn_init_cpei_timer(void); -#ifdef CONFIG_PROC_FS - extern void register_sn_procfs(void); -#endif if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM()) return 0; @@ -700,32 +685,29 @@ static int __init sn_pci_init(void) */ void hubdev_init_node(nodepda_t * npda, cnodeid_t node) { - struct hubdev_info *hubdev_info; + int size; + pg_data_t *pg; + + size = sizeof(struct hubdev_info); if (node >= num_online_nodes()) /* Headless/memless IO nodes */ - hubdev_info = - (struct hubdev_info *)alloc_bootmem_node(NODE_DATA(0), - sizeof(struct - hubdev_info)); + pg = NODE_DATA(0); else - hubdev_info = - (struct hubdev_info *)alloc_bootmem_node(NODE_DATA(node), - sizeof(struct - hubdev_info)); - npda->pdinfo = (void *)hubdev_info; + pg = NODE_DATA(node); + hubdev_info = (struct hubdev_info *)alloc_bootmem_node(pg, size); + + npda->pdinfo = (void *)hubdev_info; } geoid_t cnodeid_get_geoid(cnodeid_t cnode) { - struct hubdev_info *hubdev; hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo); return hubdev->hdi_geoid; - } subsys_initcall(sn_pci_init); @@ -734,3 +716,4 @@ EXPORT_SYMBOL(sn_pci_unfixup_slot); EXPORT_SYMBOL(sn_pci_controller_fixup); EXPORT_SYMBOL(sn_bus_store_sysdata); EXPORT_SYMBOL(sn_bus_free_sysdata); +EXPORT_SYMBOL(sn_pcidev_info_get); diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index 48645ac120fc..5b84836c2171 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c @@ -75,7 +75,7 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second); DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); EXPORT_PER_CPU_SYMBOL(__sn_hub_info); -DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]); +DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]); EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda); @@ -317,6 +317,7 @@ struct pcdp_vga_device { #define PCDP_PCI_TRANS_IOPORT 0x02 #define PCDP_PCI_TRANS_MMIO 0x01 +#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) static void sn_scan_pcdp(void) { @@ -358,6 +359,7 @@ sn_scan_pcdp(void) break; /* once we find the primary, we're done */ } } +#endif static unsigned long sn2_rtc_initial; diff --git a/arch/ia64/sn/kernel/sn2/prominfo_proc.c b/arch/ia64/sn/kernel/sn2/prominfo_proc.c index 81c63b2f8ae9..6ae276d5d50c 100644 --- a/arch/ia64/sn/kernel/sn2/prominfo_proc.c +++ b/arch/ia64/sn/kernel/sn2/prominfo_proc.c @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1999,2001-2004 Silicon Graphics, Inc. All Rights Reserved. + * Copyright (C) 1999,2001-2004, 2006 Silicon Graphics, Inc. All Rights Reserved. * * Module to export the system's Firmware Interface Tables, including * PROM revision numbers and banners, in /proc @@ -190,7 +190,7 @@ static int read_version_entry(char *page, char **start, off_t off, int count, int *eof, void *data) { - int len = 0; + int len; /* data holds the NASID of the node */ len = dump_version(page, (unsigned long)data); @@ -202,7 +202,7 @@ static int read_fit_entry(char *page, char **start, off_t off, int count, int *eof, void *data) { - int len = 0; + int len; /* data holds the NASID of the node */ len = dump_fit(page, (unsigned long)data); @@ -229,13 +229,16 @@ int __init prominfo_init(void) struct proc_dir_entry *p; cnodeid_t cnodeid; unsigned long nasid; + int size; char name[NODE_NAME_LEN]; if (!ia64_platform_is("sn2")) return 0; - proc_entries = kmalloc(num_online_nodes() * sizeof(struct proc_dir_entry *), - GFP_KERNEL); + size = num_online_nodes() * sizeof(struct proc_dir_entry *); + proc_entries = kzalloc(size, GFP_KERNEL); + if (!proc_entries) + return -ENOMEM; sgi_prominfo_entry = proc_mkdir("sgi_prominfo", NULL); @@ -244,14 +247,12 @@ int __init prominfo_init(void) sprintf(name, "node%d", cnodeid); *entp = proc_mkdir(name, sgi_prominfo_entry); nasid = cnodeid_to_nasid(cnodeid); - p = create_proc_read_entry( - "fit", 0, *entp, read_fit_entry, - (void *)nasid); + p = create_proc_read_entry("fit", 0, *entp, read_fit_entry, + (void *)nasid); if (p) p->owner = THIS_MODULE; - p = create_proc_read_entry( - "version", 0, *entp, read_version_entry, - (void *)nasid); + p = create_proc_read_entry("version", 0, *entp, + read_version_entry, (void *)nasid); if (p) p->owner = THIS_MODULE; entp++; @@ -263,7 +264,7 @@ int __init prominfo_init(void) void __exit prominfo_exit(void) { struct proc_dir_entry **entp; - unsigned cnodeid; + unsigned int cnodeid; char name[NODE_NAME_LEN]; entp = proc_entries; diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c index f153a4c35c70..24eefb2fc55f 100644 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c @@ -46,8 +46,14 @@ DECLARE_PER_CPU(struct ptc_stats, ptcstats); static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock); -void sn2_ptc_deadlock_recovery(short *, short, short, int, volatile unsigned long *, unsigned long, - volatile unsigned long *, unsigned long); +extern unsigned long +sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long, + volatile unsigned long *, unsigned long, + volatile unsigned long *, unsigned long); +void +sn2_ptc_deadlock_recovery(short *, short, short, int, + volatile unsigned long *, unsigned long, + volatile unsigned long *, unsigned long); /* * Note: some is the following is captured here to make degugging easier @@ -59,16 +65,6 @@ void sn2_ptc_deadlock_recovery(short *, short, short, int, volatile unsigned lon #define reset_max_active_on_deadlock() 1 #define PTC_LOCK(sh1) ((sh1) ? &sn2_global_ptc_lock : &sn_nodepda->ptc_lock) -static inline void ptc_lock(int sh1, unsigned long *flagp) -{ - spin_lock_irqsave(PTC_LOCK(sh1), *flagp); -} - -static inline void ptc_unlock(int sh1, unsigned long flags) -{ - spin_unlock_irqrestore(PTC_LOCK(sh1), flags); -} - struct ptc_stats { unsigned long ptc_l; unsigned long change_rid; @@ -82,6 +78,8 @@ struct ptc_stats { unsigned long shub_ptc_flushes_not_my_mm; }; +#define sn2_ptctest 0 + static inline unsigned long wait_piowc(void) { volatile unsigned long *piows; @@ -200,7 +198,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, max_active = max_active_pio(shub1); itc = ia64_get_itc(); - ptc_lock(shub1, &flags); + spin_lock_irqsave(PTC_LOCK(shub1), flags); itc2 = ia64_get_itc(); __get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc; @@ -258,7 +256,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, ia64_srlz_d(); } - ptc_unlock(shub1, flags); + spin_unlock_irqrestore(PTC_LOCK(shub1), flags); preempt_enable(); } @@ -270,11 +268,12 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, * TLB flush transaction. The recovery sequence is somewhat tricky & is * coded in assembly language. */ -void sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid, volatile unsigned long *ptc0, unsigned long data0, - volatile unsigned long *ptc1, unsigned long data1) + +void +sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid, + volatile unsigned long *ptc0, unsigned long data0, + volatile unsigned long *ptc1, unsigned long data1) { - extern unsigned long sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long, - volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long); short nasid, i; unsigned long *piows, zeroval, n; diff --git a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c index a06719d752a0..c686d9c12f7b 100644 --- a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c +++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c @@ -6,11 +6,11 @@ * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved. */ #include <linux/config.h> -#include <asm/uaccess.h> #ifdef CONFIG_PROC_FS #include <linux/proc_fs.h> #include <linux/seq_file.h> +#include <asm/uaccess.h> #include <asm/sn/sn_sal.h> static int partition_id_show(struct seq_file *s, void *p) @@ -90,10 +90,10 @@ static int coherence_id_open(struct inode *inode, struct file *file) return single_open(file, coherence_id_show, NULL); } -static struct proc_dir_entry *sn_procfs_create_entry( - const char *name, struct proc_dir_entry *parent, - int (*openfunc)(struct inode *, struct file *), - int (*releasefunc)(struct inode *, struct file *)) +static struct proc_dir_entry +*sn_procfs_create_entry(const char *name, struct proc_dir_entry *parent, + int (*openfunc)(struct inode *, struct file *), + int (*releasefunc)(struct inode *, struct file *)) { struct proc_dir_entry *e = create_proc_entry(name, 0444, parent); @@ -126,24 +126,24 @@ void register_sn_procfs(void) return; sn_procfs_create_entry("partition_id", sgi_proc_dir, - partition_id_open, single_release); + partition_id_open, single_release); sn_procfs_create_entry("system_serial_number", sgi_proc_dir, - system_serial_number_open, single_release); + system_serial_number_open, single_release); sn_procfs_create_entry("licenseID", sgi_proc_dir, - licenseID_open, single_release); + licenseID_open, single_release); e = sn_procfs_create_entry("sn_force_interrupt", sgi_proc_dir, - sn_force_interrupt_open, single_release); + sn_force_interrupt_open, single_release); if (e) e->proc_fops->write = sn_force_interrupt_write_proc; sn_procfs_create_entry("coherence_id", sgi_proc_dir, - coherence_id_open, single_release); + coherence_id_open, single_release); sn_procfs_create_entry("sn_topology", sgi_proc_dir, - sn_topology_open, sn_topology_release); + sn_topology_open, sn_topology_release); } #endif /* CONFIG_PROC_FS */ diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c index deb9baf4d473..56a88b6df4b4 100644 --- a/arch/ia64/sn/kernel/sn2/timer.c +++ b/arch/ia64/sn/kernel/sn2/timer.c @@ -14,6 +14,7 @@ #include <asm/hw_irq.h> #include <asm/system.h> +#include <asm/timex.h> #include <asm/sn/leds.h> #include <asm/sn/shub_mmr.h> @@ -28,9 +29,27 @@ static struct time_interpolator sn2_interpolator = { .source = TIME_SOURCE_MMIO64 }; +/* + * sn udelay uses the RTC instead of the ITC because the ITC is not + * synchronized across all CPUs, and the thread may migrate to another CPU + * if preemption is enabled. + */ +static void +ia64_sn_udelay (unsigned long usecs) +{ + unsigned long start = rtc_time(); + unsigned long end = start + + usecs * sn_rtc_cycles_per_second / 1000000; + + while (time_before((unsigned long)rtc_time(), end)) + cpu_relax(); +} + void __init sn_timer_init(void) { sn2_interpolator.frequency = sn_rtc_cycles_per_second; sn2_interpolator.addr = RTC_COUNTER_ADDR; register_time_interpolator(&sn2_interpolator); + + ia64_udelay = &ia64_sn_udelay; } diff --git a/arch/ia64/sn/kernel/sn2/timer_interrupt.c b/arch/ia64/sn/kernel/sn2/timer_interrupt.c index adf5db2e2afe..fa7f69945917 100644 --- a/arch/ia64/sn/kernel/sn2/timer_interrupt.c +++ b/arch/ia64/sn/kernel/sn2/timer_interrupt.c @@ -1,7 +1,7 @@ /* * * - * Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved. + * Copyright (c) 2005, 2006 Silicon Graphics, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License @@ -22,11 +22,6 @@ * License along with this program; if not, write the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * - * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, - * Mountain View, CA 94043, or: - * - * http://www.sgi.com - * * For further information regarding this notice, see: * * http://oss.sgi.com/projects/GenInfo/NoticeExplan diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c index d263d3e8fbb9..8a56f8b5ffa2 100644 --- a/arch/ia64/sn/kernel/tiocx.c +++ b/arch/ia64/sn/kernel/tiocx.c @@ -284,12 +284,10 @@ struct sn_irq_info *tiocx_irq_alloc(nasid_t nasid, int widget, int irq, if ((nasid & 1) == 0) return NULL; - sn_irq_info = kmalloc(sn_irq_size, GFP_KERNEL); + sn_irq_info = kzalloc(sn_irq_size, GFP_KERNEL); if (sn_irq_info == NULL) return NULL; - memset(sn_irq_info, 0x0, sn_irq_size); - status = tiocx_intr_alloc(nasid, widget, __pa(sn_irq_info), irq, req_nasid, slice); if (status) { diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c index 36e5437a0fb6..cdf6856ce089 100644 --- a/arch/ia64/sn/kernel/xpc_channel.c +++ b/arch/ia64/sn/kernel/xpc_channel.c @@ -738,7 +738,9 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) /* make sure all activity has settled down first */ - if (atomic_read(&ch->references) > 0) { + if (atomic_read(&ch->references) > 0 || + ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && + !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE))) { return; } DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); @@ -775,7 +777,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) /* both sides are disconnected now */ - if (ch->flags & XPC_C_CONNECTCALLOUT) { + if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) { spin_unlock_irqrestore(&ch->lock, *irq_flags); xpc_disconnect_callout(ch, xpcDisconnected); spin_lock_irqsave(&ch->lock, *irq_flags); @@ -1300,7 +1302,7 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number) "delivered=%d, partid=%d, channel=%d\n", nmsgs_sent, ch->partid, ch->number); - if (ch->flags & XPC_C_CONNECTCALLOUT) { + if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) { xpc_activate_kthreads(ch, nmsgs_sent); } } diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c index 9cd460dfe27e..8cbf16432570 100644 --- a/arch/ia64/sn/kernel/xpc_main.c +++ b/arch/ia64/sn/kernel/xpc_main.c @@ -750,12 +750,16 @@ xpc_daemonize_kthread(void *args) /* let registerer know that connection has been established */ spin_lock_irqsave(&ch->lock, irq_flags); - if (!(ch->flags & XPC_C_CONNECTCALLOUT)) { - ch->flags |= XPC_C_CONNECTCALLOUT; + if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) { + ch->flags |= XPC_C_CONNECTEDCALLOUT; spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_connected_callout(ch); + spin_lock_irqsave(&ch->lock, irq_flags); + ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE; + spin_unlock_irqrestore(&ch->lock, irq_flags); + /* * It is possible that while the callout was being * made that the remote partition sent some messages. @@ -777,15 +781,17 @@ xpc_daemonize_kthread(void *args) if (atomic_dec_return(&ch->kthreads_assigned) == 0) { spin_lock_irqsave(&ch->lock, irq_flags); - if ((ch->flags & XPC_C_CONNECTCALLOUT) && - !(ch->flags & XPC_C_DISCONNECTCALLOUT)) { - ch->flags |= XPC_C_DISCONNECTCALLOUT; + if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && + !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { + ch->flags |= XPC_C_DISCONNECTINGCALLOUT; spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_disconnect_callout(ch, xpcDisconnecting); - } else { - spin_unlock_irqrestore(&ch->lock, irq_flags); + + spin_lock_irqsave(&ch->lock, irq_flags); + ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; } + spin_unlock_irqrestore(&ch->lock, irq_flags); if (atomic_dec_return(&part->nchannels_engaged) == 0) { xpc_mark_partition_disengaged(part); xpc_IPI_send_disengage(part); diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 5a36292388eb..b4b84c269210 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c @@ -335,10 +335,10 @@ int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size) */ SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE, - pci_domain_nr(bus), bus->number, - 0, /* io */ - 0, /* read */ - port, size, __pa(val)); + pci_domain_nr(bus), bus->number, + 0, /* io */ + 0, /* read */ + port, size, __pa(val)); if (isrv.status == 0) return size; @@ -381,10 +381,10 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size) */ SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE, - pci_domain_nr(bus), bus->number, - 0, /* io */ - 1, /* write */ - port, size, __pa(&val)); + pci_domain_nr(bus), bus->number, + 0, /* io */ + 1, /* write */ + port, size, __pa(&val)); if (isrv.status == 0) return size; diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c index aa3fa5152a32..1f0253bfe0a0 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights reserved. */ #include <linux/types.h> @@ -12,7 +12,7 @@ #include <asm/sn/pcibus_provider_defs.h> #include <asm/sn/pcidev.h> -int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */ +int pcibr_invalidate_ate; /* by default don't invalidate ATE on free */ /* * mark_ate: Mark the ate as either free or inuse. @@ -20,14 +20,12 @@ int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */ static void mark_ate(struct ate_resource *ate_resource, int start, int number, u64 value) { - u64 *ate = ate_resource->ate; int index; int length = 0; for (index = start; length < number; index++, length++) ate[index] = value; - } /* @@ -37,7 +35,6 @@ static void mark_ate(struct ate_resource *ate_resource, int start, int number, static int find_free_ate(struct ate_resource *ate_resource, int start, int count) { - u64 *ate = ate_resource->ate; int index; int start_free; @@ -70,12 +67,10 @@ static int find_free_ate(struct ate_resource *ate_resource, int start, static inline void free_ate_resource(struct ate_resource *ate_resource, int start) { - mark_ate(ate_resource, start, ate_resource->ate[start], 0); if ((ate_resource->lowest_free_index > start) || (ate_resource->lowest_free_index < 0)) ate_resource->lowest_free_index = start; - } /* @@ -84,7 +79,6 @@ static inline void free_ate_resource(struct ate_resource *ate_resource, static inline int alloc_ate_resource(struct ate_resource *ate_resource, int ate_needed) { - int start_index; /* @@ -118,19 +112,12 @@ static inline int alloc_ate_resource(struct ate_resource *ate_resource, */ int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count) { - int status = 0; - u64 flag; + int status; + unsigned long flags; - flag = pcibr_lock(pcibus_info); + spin_lock_irqsave(&pcibus_info->pbi_lock, flags); status = alloc_ate_resource(&pcibus_info->pbi_int_ate_resource, count); - - if (status < 0) { - /* Failed to allocate */ - pcibr_unlock(pcibus_info, flag); - return -1; - } - - pcibr_unlock(pcibus_info, flag); + spin_unlock_irqrestore(&pcibus_info->pbi_lock, flags); return status; } @@ -182,7 +169,7 @@ void pcibr_ate_free(struct pcibus_info *pcibus_info, int index) ate_write(pcibus_info, index, count, (ate & ~PCI32_ATE_V)); } - flags = pcibr_lock(pcibus_info); + spin_lock_irqsave(&pcibus_info->pbi_lock, flags); free_ate_resource(&pcibus_info->pbi_int_ate_resource, index); - pcibr_unlock(pcibus_info, flags); + spin_unlock_irqrestore(&pcibus_info->pbi_lock, flags); } diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c index 54ce5b7ceed2..9f86bb6519aa 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c @@ -137,14 +137,12 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr, pci_addr |= PCI64_ATTR_VIRTUAL; return pci_addr; - } static dma_addr_t pcibr_dmatrans_direct32(struct pcidev_info * info, u64 paddr, size_t req_size, u64 flags) { - struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> pdi_pcibus_info; @@ -171,7 +169,6 @@ pcibr_dmatrans_direct32(struct pcidev_info * info, } return PCI32_DIRECT_BASE | offset; - } /* @@ -218,9 +215,8 @@ void sn_dma_flush(u64 addr) u64 flags; u64 itte; struct hubdev_info *hubinfo; - volatile struct sn_flush_device_kernel *p; - volatile struct sn_flush_device_common *common; - + struct sn_flush_device_kernel *p; + struct sn_flush_device_common *common; struct sn_flush_nasid_entry *flush_nasid_list; if (!sn_ioif_inited) @@ -310,8 +306,7 @@ void sn_dma_flush(u64 addr) (common->sfdl_slot - 1)); } } else { - spin_lock_irqsave((spinlock_t *)&p->sfdl_flush_lock, - flags); + spin_lock_irqsave(&p->sfdl_flush_lock, flags); *common->sfdl_flush_addr = 0; /* force an interrupt. */ @@ -322,8 +317,7 @@ void sn_dma_flush(u64 addr) cpu_relax(); /* okay, everything is synched up. */ - spin_unlock_irqrestore((spinlock_t *)&p->sfdl_flush_lock, - flags); + spin_unlock_irqrestore(&p->sfdl_flush_lock, flags); } return; } diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c index 2fac27049bf6..98f716bd92f0 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c @@ -163,9 +163,12 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont /* Setup the PMU ATE map */ soft->pbi_int_ate_resource.lowest_free_index = 0; soft->pbi_int_ate_resource.ate = - kmalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL); - memset(soft->pbi_int_ate_resource.ate, 0, - (soft->pbi_int_ate_size * sizeof(u64))); + kzalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL); + + if (!soft->pbi_int_ate_resource.ate) { + kfree(soft); + return NULL; + } if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) { /* TIO PCI Bridge: find nearest node with CPUs */ diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 96b919828053..8849439e88dd 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -21,6 +21,10 @@ config GENERIC_CALIBRATE_DELAY bool default y +config TIME_LOW_RES + bool + default y + config ARCH_MAY_HAVE_PC_FDC bool depends on Q40 || (BROKEN && SUN3X) diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig index e2a6e8648960..e50858dbc237 100644 --- a/arch/m68knommu/Kconfig +++ b/arch/m68knommu/Kconfig @@ -29,6 +29,10 @@ config GENERIC_CALIBRATE_DELAY bool default y +config TIME_LOW_RES + bool + default y + source "init/Kconfig" menu "Processor type and features" diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 6a57407df1bc..38c0f3360d51 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -94,7 +94,6 @@ endif # machines may also. Since BFD is incredibly buggy with respect to # crossformat linking we rely on the elf2ecoff tool for format conversion. # -cflags-y += -I $(TOPDIR)/include/asm/gcc cflags-y += -G 0 -mno-abicalls -fno-pic -pipe LDFLAGS_vmlinux += -G 0 -static -n -nostdlib MODFLAGS += -mlong-calls diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 5232fc752935..092679c2dca9 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -25,6 +25,7 @@ #include <linux/a.out.h> #include <linux/init.h> #include <linux/completion.h> +#include <linux/kallsyms.h> #include <asm/abi.h> #include <asm/bootinfo.h> @@ -272,46 +273,19 @@ long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) static struct mips_frame_info { void *func; - int omit_fp; /* compiled without fno-omit-frame-pointer */ - int frame_offset; + unsigned long func_size; + int frame_size; int pc_offset; -} schedule_frame, mfinfo[] = { - { schedule, 0 }, /* must be first */ - /* arch/mips/kernel/semaphore.c */ - { __down, 1 }, - { __down_interruptible, 1 }, - /* kernel/sched.c */ -#ifdef CONFIG_PREEMPT - { preempt_schedule, 0 }, -#endif - { wait_for_completion, 0 }, - { interruptible_sleep_on, 0 }, - { interruptible_sleep_on_timeout, 0 }, - { sleep_on, 0 }, - { sleep_on_timeout, 0 }, - { yield, 0 }, - { io_schedule, 0 }, - { io_schedule_timeout, 0 }, -#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT) - { __preempt_spin_lock, 0 }, - { __preempt_write_lock, 0 }, -#endif - /* kernel/timer.c */ - { schedule_timeout, 1 }, -/* { nanosleep_restart, 1 }, */ - /* lib/rwsem-spinlock.c */ - { __down_read, 1 }, - { __down_write, 1 }, -}; +} *schedule_frame, mfinfo[64]; +static int mfinfo_num; -static int mips_frame_info_initialized; static int __init get_frame_info(struct mips_frame_info *info) { int i; void *func = info->func; union mips_instruction *ip = (union mips_instruction *)func; info->pc_offset = -1; - info->frame_offset = info->omit_fp ? 0 : -1; + info->frame_size = 0; for (i = 0; i < 128; i++, ip++) { /* if jal, jalr, jr, stop. */ if (ip->j_format.opcode == jal_op || @@ -320,6 +294,23 @@ static int __init get_frame_info(struct mips_frame_info *info) ip->r_format.func == jr_op))) break; + if (info->func_size && i >= info->func_size / 4) + break; + if ( +#ifdef CONFIG_32BIT + ip->i_format.opcode == addiu_op && +#endif +#ifdef CONFIG_64BIT + ip->i_format.opcode == daddiu_op && +#endif + ip->i_format.rs == 29 && + ip->i_format.rt == 29) { + /* addiu/daddiu sp,sp,-imm */ + if (info->frame_size) + continue; + info->frame_size = - ip->i_format.simmediate; + } + if ( #ifdef CONFIG_32BIT ip->i_format.opcode == sw_op && @@ -327,31 +318,20 @@ static int __init get_frame_info(struct mips_frame_info *info) #ifdef CONFIG_64BIT ip->i_format.opcode == sd_op && #endif - ip->i_format.rs == 29) - { + ip->i_format.rs == 29 && + ip->i_format.rt == 31) { /* sw / sd $ra, offset($sp) */ - if (ip->i_format.rt == 31) { - if (info->pc_offset != -1) - continue; - info->pc_offset = - ip->i_format.simmediate / sizeof(long); - } - /* sw / sd $s8, offset($sp) */ - if (ip->i_format.rt == 30) { -//#if 0 /* gcc 3.4 does aggressive optimization... */ - if (info->frame_offset != -1) - continue; -//#endif - info->frame_offset = - ip->i_format.simmediate / sizeof(long); - } + if (info->pc_offset != -1) + continue; + info->pc_offset = + ip->i_format.simmediate / sizeof(long); } } - if (info->pc_offset == -1 || info->frame_offset == -1) { - printk("Can't analyze prologue code at %p\n", func); + if (info->pc_offset == -1 || info->frame_size == 0) { + if (func == schedule) + printk("Can't analyze prologue code at %p\n", func); info->pc_offset = -1; - info->frame_offset = -1; - return -1; + info->frame_size = 0; } return 0; @@ -359,25 +339,36 @@ static int __init get_frame_info(struct mips_frame_info *info) static int __init frame_info_init(void) { - int i, found; - for (i = 0; i < ARRAY_SIZE(mfinfo); i++) - if (get_frame_info(&mfinfo[i])) - return -1; - schedule_frame = mfinfo[0]; - /* bubble sort */ - do { - struct mips_frame_info tmp; - found = 0; - for (i = 1; i < ARRAY_SIZE(mfinfo); i++) { - if (mfinfo[i-1].func > mfinfo[i].func) { - tmp = mfinfo[i]; - mfinfo[i] = mfinfo[i-1]; - mfinfo[i-1] = tmp; - found = 1; - } - } - } while (found); - mips_frame_info_initialized = 1; + int i; +#ifdef CONFIG_KALLSYMS + char *modname; + char namebuf[KSYM_NAME_LEN + 1]; + unsigned long start, size, ofs; + extern char __sched_text_start[], __sched_text_end[]; + extern char __lock_text_start[], __lock_text_end[]; + + start = (unsigned long)__sched_text_start; + for (i = 0; i < ARRAY_SIZE(mfinfo); i++) { + if (start == (unsigned long)schedule) + schedule_frame = &mfinfo[i]; + if (!kallsyms_lookup(start, &size, &ofs, &modname, namebuf)) + break; + mfinfo[i].func = (void *)(start + ofs); + mfinfo[i].func_size = size; + start += size - ofs; + if (start >= (unsigned long)__lock_text_end) + break; + if (start == (unsigned long)__sched_text_end) + start = (unsigned long)__lock_text_start; + } +#else + mfinfo[0].func = schedule; + schedule_frame = &mfinfo[0]; +#endif + for (i = 0; i < ARRAY_SIZE(mfinfo) && mfinfo[i].func; i++) + get_frame_info(&mfinfo[i]); + + mfinfo_num = i; return 0; } @@ -394,47 +385,52 @@ unsigned long thread_saved_pc(struct task_struct *tsk) if (t->reg31 == (unsigned long) ret_from_fork) return t->reg31; - if (schedule_frame.pc_offset < 0) + if (!schedule_frame || schedule_frame->pc_offset < 0) return 0; - return ((unsigned long *)t->reg29)[schedule_frame.pc_offset]; + return ((unsigned long *)t->reg29)[schedule_frame->pc_offset]; } /* get_wchan - a maintenance nightmare^W^Wpain in the ass ... */ unsigned long get_wchan(struct task_struct *p) { unsigned long stack_page; - unsigned long frame, pc; + unsigned long pc; +#ifdef CONFIG_KALLSYMS + unsigned long frame; +#endif if (!p || p == current || p->state == TASK_RUNNING) return 0; stack_page = (unsigned long)task_stack_page(p); - if (!stack_page || !mips_frame_info_initialized) + if (!stack_page || !mfinfo_num) return 0; pc = thread_saved_pc(p); +#ifdef CONFIG_KALLSYMS if (!in_sched_functions(pc)) return pc; - frame = ((unsigned long *)p->thread.reg30)[schedule_frame.frame_offset]; + frame = p->thread.reg29 + schedule_frame->frame_size; do { int i; if (frame < stack_page || frame > stack_page + THREAD_SIZE - 32) return 0; - for (i = ARRAY_SIZE(mfinfo) - 1; i >= 0; i--) { + for (i = mfinfo_num - 1; i >= 0; i--) { if (pc >= (unsigned long) mfinfo[i].func) break; } if (i < 0) break; - if (mfinfo[i].omit_fp) - break; pc = ((unsigned long *)frame)[mfinfo[i].pc_offset]; - frame = ((unsigned long *)frame)[mfinfo[i].frame_offset]; + if (!mfinfo[i].frame_size) + break; + frame += mfinfo[i].frame_size; } while (in_sched_functions(pc)); +#endif return pc; } diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index d7c4a38ed5ae..d83e033dbc87 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -623,7 +623,7 @@ einval: li v0, -EINVAL sys sys_mknodat 4 /* 4290 */ sys sys_fchownat 5 sys sys_futimesat 3 - sys sys_newfstatat 4 + sys sys_fstatat64 4 sys sys_unlinkat 3 sys sys_renameat 4 /* 4295 */ sys sys_linkat 4 diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h index 0fbc492d24b4..36bfc2588aa3 100644 --- a/arch/mips/kernel/signal-common.h +++ b/arch/mips/kernel/signal-common.h @@ -176,7 +176,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0)) sp = current->sas_ss_sp + current->sas_ss_size; - return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? 32 : ALMASK)); + return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK)); } static inline int install_sigtramp(unsigned int __user *tramp, diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index da3271e1fdac..8a8b8dd90417 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -537,7 +537,7 @@ _sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) /* The ucontext contains a stack32_t, so we must convert! */ if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp)) goto badframe; - st.ss_size = (long) sp; + st.ss_sp = (void *)(long) sp; if (__get_user(st.ss_size, &frame->rs_uc.uc_stack.ss_size)) goto badframe; if (__get_user(st.ss_flags, &frame->rs_uc.uc_stack.ss_flags)) diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index 384fc4a639a4..5a3776096f07 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c @@ -108,7 +108,7 @@ _sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) /* The ucontext contains a stack32_t, so we must convert! */ if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp)) goto badframe; - st.ss_size = (long) sp; + st.ss_sp = (void *)(long) sp; if (__get_user(st.ss_size, &frame->rs_uc.uc_stack.ss_size)) goto badframe; if (__get_user(st.ss_flags, &frame->rs_uc.uc_stack.ss_flags)) diff --git a/arch/mips/kernel/smp_mt.c b/arch/mips/kernel/smp_mt.c index 794a1c3de2a4..c930364830d0 100644 --- a/arch/mips/kernel/smp_mt.c +++ b/arch/mips/kernel/smp_mt.c @@ -68,6 +68,8 @@ void __init sanitize_tlb_entries(void) set_c0_mvpcontrol(MVPCONTROL_VPC); + back_to_back_c0_hazard(); + /* Disable TLB sharing */ clear_c0_mvpcontrol(MVPCONTROL_STLB); @@ -102,35 +104,6 @@ void __init sanitize_tlb_entries(void) clear_c0_mvpcontrol(MVPCONTROL_VPC); } -#if 0 -/* - * Use c0_MVPConf0 to find out how many CPUs are available, setting up - * phys_cpu_present_map and the logical/physical mappings. - */ -void __init prom_build_cpu_map(void) -{ - int i, num, ncpus; - - cpus_clear(phys_cpu_present_map); - - /* assume we boot on cpu 0.... */ - cpu_set(0, phys_cpu_present_map); - __cpu_number_map[0] = 0; - __cpu_logical_map[0] = 0; - - if (cpu_has_mipsmt) { - ncpus = ((read_c0_mvpconf0() & (MVPCONF0_PVPE)) >> MVPCONF0_PVPE_SHIFT) + 1; - for (i=1, num=0; i< NR_CPUS && i<ncpus; i++) { - cpu_set(i, phys_cpu_present_map); - __cpu_number_map[i] = ++num; - __cpu_logical_map[num] = i; - } - - printk(KERN_INFO "%i available secondary CPU(s)\n", num); - } -} -#endif - static void ipi_resched_dispatch (struct pt_regs *regs) { do_IRQ(MIPS_CPU_IPI_RESCHED_IRQ, regs); @@ -222,6 +195,9 @@ void prom_prepare_cpus(unsigned int max_cpus) /* set config to be the same as vpe0, particularly kseg0 coherency alg */ write_vpe_c0_config( read_c0_config()); + + /* Propagate Config7 */ + write_vpe_c0_config7(read_c0_config7()); } } diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index e51c38cef88e..1b71d91e8268 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -471,61 +471,29 @@ struct flush_icache_range_args { static inline void local_r4k_flush_icache_range(void *args) { struct flush_icache_range_args *fir_args = args; - unsigned long dc_lsize = cpu_dcache_line_size(); - unsigned long ic_lsize = cpu_icache_line_size(); - unsigned long sc_lsize = cpu_scache_line_size(); unsigned long start = fir_args->start; unsigned long end = fir_args->end; - unsigned long addr, aend; if (!cpu_has_ic_fills_f_dc) { if (end - start > dcache_size) { r4k_blast_dcache(); } else { R4600_HIT_CACHEOP_WAR_IMPL; - addr = start & ~(dc_lsize - 1); - aend = (end - 1) & ~(dc_lsize - 1); - - while (1) { - /* Hit_Writeback_Inv_D */ - protected_writeback_dcache_line(addr); - if (addr == aend) - break; - addr += dc_lsize; - } + protected_blast_dcache_range(start, end); } if (!cpu_icache_snoops_remote_store) { - if (end - start > scache_size) { + if (end - start > scache_size) r4k_blast_scache(); - } else { - addr = start & ~(sc_lsize - 1); - aend = (end - 1) & ~(sc_lsize - 1); - - while (1) { - /* Hit_Writeback_Inv_SD */ - protected_writeback_scache_line(addr); - if (addr == aend) - break; - addr += sc_lsize; - } - } + else + protected_blast_scache_range(start, end); } } if (end - start > icache_size) r4k_blast_icache(); - else { - addr = start & ~(ic_lsize - 1); - aend = (end - 1) & ~(ic_lsize - 1); - while (1) { - /* Hit_Invalidate_I */ - protected_flush_icache_line(addr); - if (addr == aend) - break; - addr += ic_lsize; - } - } + else + protected_blast_icache_range(start, end); } static void r4k_flush_icache_range(unsigned long start, unsigned long end) @@ -619,27 +587,14 @@ static void r4k_flush_icache_page(struct vm_area_struct *vma, static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) { - unsigned long end, a; - /* Catch bad driver code */ BUG_ON(size == 0); if (cpu_has_subset_pcaches) { - unsigned long sc_lsize = cpu_scache_line_size(); - - if (size >= scache_size) { + if (size >= scache_size) r4k_blast_scache(); - return; - } - - a = addr & ~(sc_lsize - 1); - end = (addr + size - 1) & ~(sc_lsize - 1); - while (1) { - flush_scache_line(a); /* Hit_Writeback_Inv_SD */ - if (a == end) - break; - a += sc_lsize; - } + else + blast_scache_range(addr, addr + size); return; } @@ -651,17 +606,8 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) if (size >= dcache_size) { r4k_blast_dcache(); } else { - unsigned long dc_lsize = cpu_dcache_line_size(); - R4600_HIT_CACHEOP_WAR_IMPL; - a = addr & ~(dc_lsize - 1); - end = (addr + size - 1) & ~(dc_lsize - 1); - while (1) { - flush_dcache_line(a); /* Hit_Writeback_Inv_D */ - if (a == end) - break; - a += dc_lsize; - } + blast_dcache_range(addr, addr + size); } bc_wback_inv(addr, size); @@ -669,44 +615,22 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) { - unsigned long end, a; - /* Catch bad driver code */ BUG_ON(size == 0); if (cpu_has_subset_pcaches) { - unsigned long sc_lsize = cpu_scache_line_size(); - - if (size >= scache_size) { + if (size >= scache_size) r4k_blast_scache(); - return; - } - - a = addr & ~(sc_lsize - 1); - end = (addr + size - 1) & ~(sc_lsize - 1); - while (1) { - flush_scache_line(a); /* Hit_Writeback_Inv_SD */ - if (a == end) - break; - a += sc_lsize; - } + else + blast_scache_range(addr, addr + size); return; } if (size >= dcache_size) { r4k_blast_dcache(); } else { - unsigned long dc_lsize = cpu_dcache_line_size(); - R4600_HIT_CACHEOP_WAR_IMPL; - a = addr & ~(dc_lsize - 1); - end = (addr + size - 1) & ~(dc_lsize - 1); - while (1) { - flush_dcache_line(a); /* Hit_Writeback_Inv_D */ - if (a == end) - break; - a += dc_lsize; - } + blast_dcache_range(addr, addr + size); } bc_inv(addr, size); diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c index 0a97a9434eba..7c572bea4a98 100644 --- a/arch/mips/mm/c-tx39.c +++ b/arch/mips/mm/c-tx39.c @@ -44,8 +44,6 @@ __asm__ __volatile__( \ /* TX39H-style cache flush routines. */ static void tx39h_flush_icache_all(void) { - unsigned long start = KSEG0; - unsigned long end = (start + icache_size); unsigned long flags, config; /* disable icache (set ICE#) */ @@ -53,33 +51,18 @@ static void tx39h_flush_icache_all(void) config = read_c0_conf(); write_c0_conf(config & ~TX39_CONF_ICE); TX39_STOP_STREAMING(); - - /* invalidate icache */ - while (start < end) { - cache16_unroll32(start, Index_Invalidate_I); - start += 0x200; - } - + blast_icache16(); write_c0_conf(config); local_irq_restore(flags); } static void tx39h_dma_cache_wback_inv(unsigned long addr, unsigned long size) { - unsigned long end, a; - unsigned long dc_lsize = current_cpu_data.dcache.linesz; - /* Catch bad driver code */ BUG_ON(size == 0); iob(); - a = addr & ~(dc_lsize - 1); - end = (addr + size - 1) & ~(dc_lsize - 1); - while (1) { - invalidate_dcache_line(a); /* Hit_Invalidate_D */ - if (a == end) break; - a += dc_lsize; - } + blast_inv_dcache_range(addr, addr + size); } @@ -241,42 +224,21 @@ static void tx39_flush_data_cache_page(unsigned long addr) static void tx39_flush_icache_range(unsigned long start, unsigned long end) { - unsigned long dc_lsize = current_cpu_data.dcache.linesz; - unsigned long addr, aend; - if (end - start > dcache_size) tx39_blast_dcache(); - else { - addr = start & ~(dc_lsize - 1); - aend = (end - 1) & ~(dc_lsize - 1); - - while (1) { - /* Hit_Writeback_Inv_D */ - protected_writeback_dcache_line(addr); - if (addr == aend) - break; - addr += dc_lsize; - } - } + else + protected_blast_dcache_range(start, end); if (end - start > icache_size) tx39_blast_icache(); else { unsigned long flags, config; - addr = start & ~(dc_lsize - 1); - aend = (end - 1) & ~(dc_lsize - 1); /* disable icache (set ICE#) */ local_irq_save(flags); config = read_c0_conf(); write_c0_conf(config & ~TX39_CONF_ICE); TX39_STOP_STREAMING(); - while (1) { - /* Hit_Invalidate_I */ - protected_flush_icache_line(addr); - if (addr == aend) - break; - addr += dc_lsize; - } + protected_blast_icache_range(start, end); write_c0_conf(config); local_irq_restore(flags); } @@ -311,7 +273,7 @@ static void tx39_flush_icache_page(struct vm_area_struct *vma, struct page *page static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size) { - unsigned long end, a; + unsigned long end; if (((size | addr) & (PAGE_SIZE - 1)) == 0) { end = addr + size; @@ -322,20 +284,13 @@ static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size) } else if (size > dcache_size) { tx39_blast_dcache(); } else { - unsigned long dc_lsize = current_cpu_data.dcache.linesz; - a = addr & ~(dc_lsize - 1); - end = (addr + size - 1) & ~(dc_lsize - 1); - while (1) { - flush_dcache_line(a); /* Hit_Writeback_Inv_D */ - if (a == end) break; - a += dc_lsize; - } + blast_dcache_range(addr, addr + size); } } static void tx39_dma_cache_inv(unsigned long addr, unsigned long size) { - unsigned long end, a; + unsigned long end; if (((size | addr) & (PAGE_SIZE - 1)) == 0) { end = addr + size; @@ -346,14 +301,7 @@ static void tx39_dma_cache_inv(unsigned long addr, unsigned long size) } else if (size > dcache_size) { tx39_blast_dcache(); } else { - unsigned long dc_lsize = current_cpu_data.dcache.linesz; - a = addr & ~(dc_lsize - 1); - end = (addr + size - 1) & ~(dc_lsize - 1); - while (1) { - invalidate_dcache_line(a); /* Hit_Invalidate_D */ - if (a == end) break; - a += dc_lsize; - } + blast_inv_dcache_range(addr, addr + size); } } diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 7c914a4c67c3..eca33cfa8a4c 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -29,6 +29,11 @@ config GENERIC_CALIBRATE_DELAY bool default y +config TIME_LOW_RES + bool + depends on SMP + default y + config GENERIC_ISA_DMA bool diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 51d2480627d1..71011eadb872 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -377,15 +377,15 @@ ENTRY_SAME(inotify_init) ENTRY_SAME(inotify_add_watch) /* 270 */ ENTRY_SAME(inotify_rm_watch) - ENTRY_COMP(pselect6) - ENTRY_COMP(ppoll) + ENTRY_SAME(ni_syscall) /* 271 ENTRY_COMP(pselect6) */ + ENTRY_SAME(ni_syscall) /* 272 ENTRY_COMP(ppoll) */ ENTRY_SAME(migrate_pages) ENTRY_COMP(openat) /* 275 */ ENTRY_SAME(mkdirat) ENTRY_SAME(mknodat) ENTRY_SAME(fchownat) ENTRY_COMP(futimesat) - ENTRY_COMP(newfstatat) /* 280 */ + ENTRY_SAME(fstatat64) /* 280 */ ENTRY_SAME(unlinkat) ENTRY_SAME(renameat) ENTRY_SAME(linkat) diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index b66602ad7b33..b7ca5bf9acfc 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -80,6 +80,10 @@ config HOTPLUG_CPU can be controlled through /sys/devices/system/cpu/cpu#. Say N if you want to disable CPU hotplug. +config DEFAULT_MIGRATION_COST + int + default "1000000" + config MATHEMU bool "IEEE FPU emulation" depends on MARCH_G5 diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 2d021626c1a6..cc058dc3bc8b 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -905,8 +905,8 @@ asmlinkage long sys32_fstat64(unsigned long fd, struct stat64_emu31 __user * sta return ret; } -asmlinkage long sys32_fstatat(unsigned int dfd, char __user *filename, - struct stat64_emu31 __user* statbuf, int flag) +asmlinkage long sys32_fstatat64(unsigned int dfd, char __user *filename, + struct stat64_emu31 __user* statbuf, int flag) { struct kstat stat; int error = -EINVAL; diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index dd2d6c3e8df8..615964cca15f 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -1523,13 +1523,13 @@ compat_sys_futimesat_wrapper: llgtr %r4,%r4 # struct timeval * jg compat_sys_futimesat - .globl sys32_fstatat_wrapper -sys32_fstatat_wrapper: + .globl sys32_fstatat64_wrapper +sys32_fstatat64_wrapper: llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # char * llgtr %r4,%r4 # struct stat64 * lgfr %r5,%r5 # int - jg sys32_fstatat + jg sys32_fstatat64 .globl sys_unlinkat_wrapper sys_unlinkat_wrapper: diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 008c74526fd3..da6fbae8df91 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -128,8 +128,10 @@ void default_idle(void) __ctl_set_bit(8, 15); #ifdef CONFIG_HOTPLUG_CPU - if (cpu_is_offline(cpu)) + if (cpu_is_offline(cpu)) { + preempt_enable_no_resched(); cpu_die(); + } #endif local_mcck_disable(); diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index de8784267473..24f62f16c0e5 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -600,6 +600,7 @@ setup_arch(char **cmdline_p) init_mm.brk = (unsigned long) &_end; parse_cmdline_early(cmdline_p); + parse_early_param(); setup_memory(); setup_resources(); @@ -607,6 +608,7 @@ setup_arch(char **cmdline_p) cpu_init(); __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; + smp_setup_cpu_possible_map(); /* * Create kernel page tables and switch to virtual addressing. diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 0d1ad5dbe2b1..7dbe00c76c6b 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -1,8 +1,7 @@ /* * arch/s390/kernel/smp.c * - * S390 version - * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright (C) IBM Corp. 1999,2006 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), * Martin Schwidefsky (schwidefsky@de.ibm.com) * Heiko Carstens (heiko.carstens@de.ibm.com) @@ -41,8 +40,6 @@ #include <asm/cpcmd.h> #include <asm/tlbflush.h> -/* prototypes */ - extern volatile int __cpu_logical_map[]; /* @@ -51,13 +48,11 @@ extern volatile int __cpu_logical_map[]; struct _lowcore *lowcore_ptr[NR_CPUS]; -cpumask_t cpu_online_map; -cpumask_t cpu_possible_map = CPU_MASK_ALL; +cpumask_t cpu_online_map = CPU_MASK_NONE; +cpumask_t cpu_possible_map = CPU_MASK_NONE; static struct task_struct *current_set[NR_CPUS]; -EXPORT_SYMBOL(cpu_online_map); - /* * Reboot, halt and power_off routines for SMP. */ @@ -490,10 +485,10 @@ void smp_ctl_clear_bit(int cr, int bit) { * Lets check how many CPUs we have. */ -void -__init smp_check_cpus(unsigned int max_cpus) +static unsigned int +__init smp_count_cpus(void) { - int cpu, num_cpus; + unsigned int cpu, num_cpus; __u16 boot_cpu_addr; /* @@ -503,19 +498,20 @@ __init smp_check_cpus(unsigned int max_cpus) boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; current_thread_info()->cpu = 0; num_cpus = 1; - for (cpu = 0; cpu <= 65535 && num_cpus < max_cpus; cpu++) { + for (cpu = 0; cpu <= 65535; cpu++) { if ((__u16) cpu == boot_cpu_addr) continue; - __cpu_logical_map[num_cpus] = (__u16) cpu; - if (signal_processor(num_cpus, sigp_sense) == + __cpu_logical_map[1] = (__u16) cpu; + if (signal_processor(1, sigp_sense) == sigp_not_operational) continue; - cpu_set(num_cpus, cpu_present_map); num_cpus++; } printk("Detected %d CPU's\n",(int) num_cpus); printk("Boot cpu address %2X\n", boot_cpu_addr); + + return num_cpus; } /* @@ -676,6 +672,44 @@ __cpu_up(unsigned int cpu) return 0; } +static unsigned int __initdata additional_cpus; +static unsigned int __initdata possible_cpus; + +void __init smp_setup_cpu_possible_map(void) +{ + unsigned int phy_cpus, pos_cpus, cpu; + + phy_cpus = smp_count_cpus(); + pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS); + + if (possible_cpus) + pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS); + + for (cpu = 0; cpu < pos_cpus; cpu++) + cpu_set(cpu, cpu_possible_map); + + phy_cpus = min(phy_cpus, pos_cpus); + + for (cpu = 0; cpu < phy_cpus; cpu++) + cpu_set(cpu, cpu_present_map); +} + +#ifdef CONFIG_HOTPLUG_CPU + +static int __init setup_additional_cpus(char *s) +{ + additional_cpus = simple_strtoul(s, NULL, 0); + return 0; +} +early_param("additional_cpus", setup_additional_cpus); + +static int __init setup_possible_cpus(char *s) +{ + possible_cpus = simple_strtoul(s, NULL, 0); + return 0; +} +early_param("possible_cpus", setup_possible_cpus); + int __cpu_disable(void) { @@ -744,6 +778,8 @@ cpu_die(void) for(;;); } +#endif /* CONFIG_HOTPLUG_CPU */ + /* * Cycle through the processors and setup structures. */ @@ -757,7 +793,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) /* request the 0x1201 emergency signal external interrupt */ if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) panic("Couldn't request external interrupt 0x1201"); - smp_check_cpus(max_cpus); memset(lowcore_ptr,0,sizeof(lowcore_ptr)); /* * Initialize prefix pages and stacks for all possible cpus @@ -806,7 +841,6 @@ void __devinit smp_prepare_boot_cpu(void) BUG_ON(smp_processor_id() != 0); cpu_set(0, cpu_online_map); - cpu_set(0, cpu_present_map); S390_lowcore.percpu_offset = __per_cpu_offset[0]; current_set[0] = current; } @@ -845,6 +879,7 @@ static int __init topology_init(void) subsys_initcall(topology_init); +EXPORT_SYMBOL(cpu_online_map); EXPORT_SYMBOL(cpu_possible_map); EXPORT_SYMBOL(lowcore_ptr); EXPORT_SYMBOL(smp_ctl_set_bit); diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 84921fe8d266..7c88d85c3597 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -301,7 +301,7 @@ SYSCALL(sys_mkdirat,sys_mkdirat,sys_mkdirat_wrapper) SYSCALL(sys_mknodat,sys_mknodat,sys_mknodat_wrapper) /* 290 */ SYSCALL(sys_fchownat,sys_fchownat,sys_fchownat_wrapper) SYSCALL(sys_futimesat,sys_futimesat,compat_sys_futimesat_wrapper) -SYSCALL(sys_fstatat64,sys_newfstatat,sys32_fstatat_wrapper) +SYSCALL(sys_fstatat64,sys_newfstatat,sys32_fstatat64_wrapper) SYSCALL(sys_unlinkat,sys_unlinkat,sys_unlinkat_wrapper) SYSCALL(sys_renameat,sys_renameat,sys_renameat_wrapper) /* 295 */ SYSCALL(sys_linkat,sys_linkat,sys_linkat_wrapper) diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index e96c35bddac7..71f0a2fb3078 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c @@ -30,7 +30,7 @@ void __delay(unsigned long loops) */ __asm__ __volatile__( "0: brct %0,0b" - : /* no outputs */ : "r" (loops/2) ); + : /* no outputs */ : "r" ((loops/2) + 1)); } /* diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 504d56f8ca7f..e73621d03a28 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -446,7 +446,7 @@ endmenu config ISA_DMA_API bool - depends on MPC1211 + depends on SH_MPC1211 default y menu "Kernel features" diff --git a/arch/sparc/kernel/systbls.S b/arch/sparc/kernel/systbls.S index c0314705d73a..768de64b371f 100644 --- a/arch/sparc/kernel/systbls.S +++ b/arch/sparc/kernel/systbls.S @@ -76,7 +76,7 @@ sys_call_table: /*270*/ .long sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink /*275*/ .long sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid /*280*/ .long sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl, sys_openat -/*285*/ .long sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, sys_newfstatat +/*285*/ .long sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, sys_fstatat64 /*290*/ .long sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat /*295*/ .long sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c index 9264ccbaaafa..417727bd87ba 100644 --- a/arch/sparc64/kernel/sys_sparc32.c +++ b/arch/sparc64/kernel/sys_sparc32.c @@ -428,6 +428,27 @@ asmlinkage long compat_sys_fstat64(unsigned int fd, return error; } +asmlinkage long compat_sys_fstatat64(unsigned int dfd, char __user *filename, + struct compat_stat64 __user * statbuf, int flag) +{ + struct kstat stat; + int error = -EINVAL; + + if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0) + goto out; + + if (flag & AT_SYMLINK_NOFOLLOW) + error = vfs_lstat_fd(dfd, filename, &stat); + else + error = vfs_stat_fd(dfd, filename, &stat); + + if (!error) + error = cp_compat_stat64(&stat, statbuf); + +out: + return error; +} + asmlinkage long compat_sys_sysfs(int option, u32 arg1, u32 arg2) { return sys_sysfs(option, arg1, arg2); diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S index a19168510be2..c3adb7ac167d 100644 --- a/arch/sparc64/kernel/systbls.S +++ b/arch/sparc64/kernel/systbls.S @@ -77,7 +77,7 @@ sys_call_table32: /*270*/ .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink .word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid /*280*/ .word sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl, compat_sys_openat - .word sys_mkdirat, sys_mknodat, sys_fchownat, compat_sys_futimesat, compat_sys_newfstatat + .word sys_mkdirat, sys_mknodat, sys_fchownat, compat_sys_futimesat, compat_sys_fstatat64 /*285*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat .word sys_fchmodat, sys_faccessat, compat_sys_pselect6, compat_sys_ppoll, sys_unshare @@ -146,7 +146,7 @@ sys_call_table: /*270*/ .word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink .word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid /*280*/ .word sys_nis_syscall, sys_add_key, sys_request_key, sys_keyctl, sys_openat - .word sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, sys_newfstatat + .word sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, sys_fstatat64 /*285*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat .word sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare diff --git a/arch/v850/Kconfig b/arch/v850/Kconfig index 04494638b963..e7fc3e500342 100644 --- a/arch/v850/Kconfig +++ b/arch/v850/Kconfig @@ -28,6 +28,10 @@ config GENERIC_IRQ_PROBE bool default y +config TIME_LOW_RES + bool + default y + # Turn off some random 386 crap that can affect device config config ISA bool diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig index 56832929a543..b337136f28b6 100644 --- a/arch/x86_64/defconfig +++ b/arch/x86_64/defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.16-rc1-git2 -# Thu Jan 19 10:05:21 2006 +# Linux kernel version: 2.6.16-rc3 +# Mon Feb 13 22:31:24 2006 # CONFIG_X86_64=y CONFIG_64BIT=y @@ -21,7 +21,6 @@ CONFIG_DMI=y # Code maturity level options # CONFIG_EXPERIMENTAL=y -CONFIG_CLEAN_COMPILE=y CONFIG_LOCK_KERNEL=y CONFIG_INIT_ENV_ARG_LIMIT=32 @@ -267,6 +266,7 @@ CONFIG_NET=y # # Networking options # +# CONFIG_NETDEBUG is not set CONFIG_PACKET=y # CONFIG_PACKET_MMAP is not set CONFIG_UNIX=y @@ -446,7 +446,6 @@ CONFIG_BLK_DEV_PIIX=y # CONFIG_BLK_DEV_NS87415 is not set # CONFIG_BLK_DEV_PDC202XX_OLD is not set CONFIG_BLK_DEV_PDC202XX_NEW=y -# CONFIG_PDC202XX_FORCE is not set # CONFIG_BLK_DEV_SVWKS is not set # CONFIG_BLK_DEV_SIIMAGE is not set # CONFIG_BLK_DEV_SIS5513 is not set @@ -573,7 +572,33 @@ CONFIG_FUSION_MAX_SGE=128 # # IEEE 1394 (FireWire) support # -# CONFIG_IEEE1394 is not set +CONFIG_IEEE1394=y + +# +# Subsystem Options +# +# CONFIG_IEEE1394_VERBOSEDEBUG is not set +# CONFIG_IEEE1394_OUI_DB is not set +# CONFIG_IEEE1394_EXTRA_CONFIG_ROMS is not set +# CONFIG_IEEE1394_EXPORT_FULL_API is not set + +# +# Device Drivers +# + +# +# Texas Instruments PCILynx requires I2C +# +CONFIG_IEEE1394_OHCI1394=y + +# +# Protocol Drivers +# +# CONFIG_IEEE1394_VIDEO1394 is not set +# CONFIG_IEEE1394_SBP2 is not set +# CONFIG_IEEE1394_ETH1394 is not set +# CONFIG_IEEE1394_DV1394 is not set +CONFIG_IEEE1394_RAWIO=y # # I2O device support @@ -772,6 +797,7 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=4 # CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_JSM is not set CONFIG_UNIX98_PTYS=y CONFIG_LEGACY_PTYS=y CONFIG_LEGACY_PTY_COUNT=256 @@ -871,6 +897,7 @@ CONFIG_HPET_MMAP=y # CONFIG_HWMON=y # CONFIG_HWMON_VID is not set +# CONFIG_SENSORS_F71805F is not set # CONFIG_SENSORS_HDAPS is not set # CONFIG_HWMON_DEBUG_CHIP is not set @@ -1101,7 +1128,6 @@ CONFIG_USB_MON=y # EDAC - error detection and reporting (RAS) # # CONFIG_EDAC is not set -# CONFIG_EDAC_POLL is not set # # Firmware Drivers @@ -1291,14 +1317,12 @@ CONFIG_DETECT_SOFTLOCKUP=y # CONFIG_DEBUG_SPINLOCK is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_KOBJECT is not set -# CONFIG_DEBUG_INFO is not set +CONFIG_DEBUG_INFO=y CONFIG_DEBUG_FS=y # CONFIG_DEBUG_VM is not set # CONFIG_FRAME_POINTER is not set # CONFIG_FORCED_INLINING is not set -# CONFIG_UNWIND_INFO is not set # CONFIG_RCU_TORTURE_TEST is not set -CONFIG_INIT_DEBUG=y # CONFIG_DEBUG_RODATA is not set # CONFIG_IOMMU_DEBUG is not set diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c index 7a0a3e8d5d72..e5b14c57eaa0 100644 --- a/arch/x86_64/kernel/apic.c +++ b/arch/x86_64/kernel/apic.c @@ -1152,6 +1152,7 @@ __setup("noapicmaintimer", setup_noapicmaintimer); static __init int setup_apicpmtimer(char *s) { apic_calibrate_pmtmr = 1; + notsc_setup(NULL); return setup_apicmaintimer(NULL); } __setup("apicpmtimer", setup_apicpmtimer); diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S index b150c87a08c6..7c10e9009d61 100644 --- a/arch/x86_64/kernel/entry.S +++ b/arch/x86_64/kernel/entry.S @@ -554,6 +554,7 @@ iret_label: /* running with kernel gs */ bad_iret: movq $-9999,%rdi /* better code? */ + sti jmp do_exit .previous diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S index 692c737feddb..02fc7fa0ea28 100644 --- a/arch/x86_64/kernel/head.S +++ b/arch/x86_64/kernel/head.S @@ -213,6 +213,11 @@ ENTRY(early_idt_handler) cmpl $2,early_recursion_flag(%rip) jz 1f call dump_stack +#ifdef CONFIG_KALLSYMS + leaq early_idt_ripmsg(%rip),%rdi + movq 8(%rsp),%rsi # get rip again + call __print_symbol +#endif 1: hlt jmp 1b early_recursion_flag: @@ -220,6 +225,8 @@ early_recursion_flag: early_idt_msg: .asciz "PANIC: early exception rip %lx error %lx cr2 %lx\n" +early_idt_ripmsg: + .asciz "RIP %s\n" .code32 ENTRY(no_long_mode) diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c index 4282d72b2a26..2585c1d92b26 100644 --- a/arch/x86_64/kernel/io_apic.c +++ b/arch/x86_64/kernel/io_apic.c @@ -30,6 +30,9 @@ #include <linux/mc146818rtc.h> #include <linux/acpi.h> #include <linux/sysdev.h> +#ifdef CONFIG_ACPI +#include <acpi/acpi_bus.h> +#endif #include <asm/io.h> #include <asm/smp.h> @@ -260,6 +263,8 @@ __setup("apic", enable_ioapic_setup); And another hack to disable the IOMMU on VIA chipsets. + ... and others. Really should move this somewhere else. + Kludge-O-Rama. */ void __init check_ioapic(void) { @@ -307,6 +312,17 @@ void __init check_ioapic(void) case PCI_VENDOR_ID_ATI: if (apic_runs_main_timer != 0) break; +#ifdef CONFIG_ACPI + /* Don't do this for laptops right + right now because their timer + doesn't necessarily tick in C2/3 */ + if (acpi_fadt.revision >= 3 && + (acpi_fadt.plvl2_lat + acpi_fadt.plvl3_lat) < 1100) { + printk(KERN_INFO +"ATI board detected, but seems to be a laptop. Timer might be shakey, sorry\n"); + break; + } +#endif printk(KERN_INFO "ATI board detected. Using APIC/PM timer.\n"); apic_runs_main_timer = 1; diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c index dc49bfb6db0a..9013a90b5c2e 100644 --- a/arch/x86_64/kernel/mpparse.c +++ b/arch/x86_64/kernel/mpparse.c @@ -288,9 +288,9 @@ static int __init smp_read_mpc(struct mp_config_table *mpc) memcpy(str,mpc->mpc_productid,12); str[12]=0; - printk(KERN_INFO "Product ID: %s ",str); + printk("Product ID: %s ",str); - printk(KERN_INFO "APIC at: 0x%X\n",mpc->mpc_lapic); + printk("APIC at: 0x%X\n",mpc->mpc_lapic); /* save the local APIC address, it might be non-default */ if (!acpi_lapic) diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c index 8be407a1f62d..5bf17e41cd2d 100644 --- a/arch/x86_64/kernel/nmi.c +++ b/arch/x86_64/kernel/nmi.c @@ -236,6 +236,7 @@ static void enable_lapic_nmi_watchdog(void) { if (nmi_active < 0) { nmi_watchdog = NMI_LOCAL_APIC; + touch_nmi_watchdog(); setup_apic_nmi_watchdog(); } } @@ -456,15 +457,17 @@ static DEFINE_PER_CPU(int, nmi_touch); void touch_nmi_watchdog (void) { - int i; + if (nmi_watchdog > 0) { + unsigned cpu; - /* - * Tell other CPUs to reset their alert counters. We cannot - * do it ourselves because the alert count increase is not - * atomic. - */ - for (i = 0; i < NR_CPUS; i++) - per_cpu(nmi_touch, i) = 1; + /* + * Tell other CPUs to reset their alert counters. We cannot + * do it ourselves because the alert count increase is not + * atomic. + */ + for_each_present_cpu (cpu) + per_cpu(nmi_touch, cpu) = 1; + } touch_softlockup_watchdog(); } diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index 3c58c30506a1..67841d11ed1f 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c @@ -1327,8 +1327,7 @@ static int __init nohpet_setup(char *s) __setup("nohpet", nohpet_setup); - -static int __init notsc_setup(char *s) +int __init notsc_setup(char *s) { notsc = 1; return 0; diff --git a/arch/x86_64/mm/k8topology.c b/arch/x86_64/mm/k8topology.c index a5663e0bb01c..dd60e71fdba6 100644 --- a/arch/x86_64/mm/k8topology.c +++ b/arch/x86_64/mm/k8topology.c @@ -155,7 +155,7 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end) if (!found) return -1; - memnode_shift = compute_hash_shift(nodes, numnodes); + memnode_shift = compute_hash_shift(nodes, 8); if (memnode_shift < 0) { printk(KERN_ERR "No NUMA node hash function found. Contact maintainer\n"); return -1; diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c index 6ef9f9a76235..22e51beee8d3 100644 --- a/arch/x86_64/mm/numa.c +++ b/arch/x86_64/mm/numa.c @@ -351,7 +351,7 @@ void __init init_cpu_to_node(void) continue; if (apicid_to_node[apicid] == NUMA_NO_NODE) continue; - cpu_to_node[i] = apicid_to_node[apicid]; + numa_set_node(i,apicid_to_node[apicid]); } } diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c index cd25300726fc..482c25767369 100644 --- a/arch/x86_64/mm/srat.c +++ b/arch/x86_64/mm/srat.c @@ -228,7 +228,8 @@ static int nodes_cover_memory(void) } e820ram = end_pfn - e820_hole_size(0, end_pfn); - if (pxmram < e820ram) { + /* We seem to lose 3 pages somewhere. Allow a bit of slack. */ + if ((long)(e820ram - pxmram) >= 1*1024*1024) { printk(KERN_ERR "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n", (pxmram << PAGE_SHIFT) >> 20, @@ -270,7 +271,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end) return -1; } - memnode_shift = compute_hash_shift(nodes, nodes_weight(nodes_parsed)); + memnode_shift = compute_hash_shift(nodes, MAX_NUMNODES); if (memnode_shift < 0) { printk(KERN_ERR "SRAT: No NUMA node hash function found. Contact maintainer\n"); diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c index 7d6481d9fbec..4038dbfa63a0 100644 --- a/drivers/acpi/resources/rscalc.c +++ b/drivers/acpi/resources/rscalc.c @@ -391,8 +391,7 @@ acpi_rs_get_list_length(u8 * aml_buffer, * Ensure a 32-bit boundary for the structure */ extra_struct_bytes = - ACPI_ROUND_UP_to_32_bITS(resource_length) - - resource_length; + ACPI_ROUND_UP_to_32_bITS(resource_length); break; case ACPI_RESOURCE_NAME_END_TAG: @@ -408,8 +407,7 @@ acpi_rs_get_list_length(u8 * aml_buffer, * Add vendor data and ensure a 32-bit boundary for the structure */ extra_struct_bytes = - ACPI_ROUND_UP_to_32_bITS(resource_length) - - resource_length; + ACPI_ROUND_UP_to_32_bITS(resource_length); break; case ACPI_RESOURCE_NAME_ADDRESS32: diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 4e7dbcc425ff..93e44d0292ab 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -645,7 +645,7 @@ static void pkt_copy_bio_data(struct bio *src_bio, int seg, int offs, struct pag * b) The data can be used as cache to avoid read requests if we receive a * new write request for the same zone. */ -static void pkt_make_local_copy(struct packet_data *pkt, struct page **pages, int *offsets) +static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec) { int f, p, offs; @@ -653,15 +653,15 @@ static void pkt_make_local_copy(struct packet_data *pkt, struct page **pages, in p = 0; offs = 0; for (f = 0; f < pkt->frames; f++) { - if (pages[f] != pkt->pages[p]) { - void *vfrom = kmap_atomic(pages[f], KM_USER0) + offsets[f]; + if (bvec[f].bv_page != pkt->pages[p]) { + void *vfrom = kmap_atomic(bvec[f].bv_page, KM_USER0) + bvec[f].bv_offset; void *vto = page_address(pkt->pages[p]) + offs; memcpy(vto, vfrom, CD_FRAMESIZE); kunmap_atomic(vfrom, KM_USER0); - pages[f] = pkt->pages[p]; - offsets[f] = offs; + bvec[f].bv_page = pkt->pages[p]; + bvec[f].bv_offset = offs; } else { - BUG_ON(offsets[f] != offs); + BUG_ON(bvec[f].bv_offset != offs); } offs += CD_FRAMESIZE; if (offs >= PAGE_SIZE) { @@ -991,18 +991,17 @@ try_next_bio: static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) { struct bio *bio; - struct page *pages[PACKET_MAX_SIZE]; - int offsets[PACKET_MAX_SIZE]; int f; int frames_write; + struct bio_vec *bvec = pkt->w_bio->bi_io_vec; for (f = 0; f < pkt->frames; f++) { - pages[f] = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE]; - offsets[f] = (f * CD_FRAMESIZE) % PAGE_SIZE; + bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE]; + bvec[f].bv_offset = (f * CD_FRAMESIZE) % PAGE_SIZE; } /* - * Fill-in pages[] and offsets[] with data from orig_bios. + * Fill-in bvec with data from orig_bios. */ frames_write = 0; spin_lock(&pkt->lock); @@ -1024,11 +1023,11 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) } if (src_bvl->bv_len - src_offs >= CD_FRAMESIZE) { - pages[f] = src_bvl->bv_page; - offsets[f] = src_bvl->bv_offset + src_offs; + bvec[f].bv_page = src_bvl->bv_page; + bvec[f].bv_offset = src_bvl->bv_offset + src_offs; } else { pkt_copy_bio_data(bio, segment, src_offs, - pages[f], offsets[f]); + bvec[f].bv_page, bvec[f].bv_offset); } src_offs += CD_FRAMESIZE; frames_write++; @@ -1042,7 +1041,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) BUG_ON(frames_write != pkt->write_size); if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) { - pkt_make_local_copy(pkt, pages, offsets); + pkt_make_local_copy(pkt, bvec); pkt->cache_valid = 1; } else { pkt->cache_valid = 0; @@ -1055,17 +1054,9 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) pkt->w_bio->bi_bdev = pd->bdev; pkt->w_bio->bi_end_io = pkt_end_io_packet_write; pkt->w_bio->bi_private = pkt; - for (f = 0; f < pkt->frames; f++) { - if ((f + 1 < pkt->frames) && (pages[f + 1] == pages[f]) && - (offsets[f + 1] = offsets[f] + CD_FRAMESIZE)) { - if (!bio_add_page(pkt->w_bio, pages[f], CD_FRAMESIZE * 2, offsets[f])) - BUG(); - f++; - } else { - if (!bio_add_page(pkt->w_bio, pages[f], CD_FRAMESIZE, offsets[f])) - BUG(); - } - } + for (f = 0; f < pkt->frames; f++) + if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset)) + BUG(); VPRINTK("pktcdvd: vcnt=%d\n", pkt->w_bio->bi_vcnt); atomic_set(&pkt->io_wait, 1); @@ -1548,7 +1539,7 @@ static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di) case 0x12: /* DVD-RAM */ return 0; default: - printk("pktcdvd: Wrong disc profile (%x)\n", pd->mmc3_profile); + VPRINTK("pktcdvd: Wrong disc profile (%x)\n", pd->mmc3_profile); return 1; } @@ -1894,8 +1885,8 @@ static int pkt_open_write(struct pktcdvd_device *pd) unsigned int write_speed, media_write_speed, read_speed; if ((ret = pkt_probe_settings(pd))) { - DPRINTK("pktcdvd: %s failed probe\n", pd->name); - return -EIO; + VPRINTK("pktcdvd: %s failed probe\n", pd->name); + return -EROFS; } if ((ret = pkt_set_write_settings(pd))) { @@ -2053,10 +2044,9 @@ static int pkt_open(struct inode *inode, struct file *file) goto out_dec; } } else { - if (pkt_open_dev(pd, file->f_mode & FMODE_WRITE)) { - ret = -EIO; + ret = pkt_open_dev(pd, file->f_mode & FMODE_WRITE); + if (ret) goto out_dec; - } /* * needed here as well, since ext2 (among others) may change * the blocksize at mount time @@ -2436,11 +2426,12 @@ static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, u * The door gets locked when the device is opened, so we * have to unlock it or else the eject command fails. */ - pkt_lock_door(pd, 0); + if (pd->refcnt == 1) + pkt_lock_door(pd, 0); return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg); default: - printk("pktcdvd: Unknown ioctl for %s (%x)\n", pd->name, cmd); + VPRINTK("pktcdvd: Unknown ioctl for %s (%x)\n", pd->name, cmd); return -ENOTTY; } diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c index e522d19ad886..7e21b1ff27c4 100644 --- a/drivers/bluetooth/bt3c_cs.c +++ b/drivers/bluetooth/bt3c_cs.c @@ -474,18 +474,6 @@ static int bt3c_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long /* ======================== Card services HCI interaction ======================== */ -static struct device *bt3c_device(void) -{ - static struct device dev = { - .bus_id = "pcmcia", - }; - kobject_set_name(&dev.kobj, "bt3c"); - kobject_init(&dev.kobj); - - return &dev; -} - - static int bt3c_load_firmware(bt3c_info_t *info, unsigned char *firmware, int count) { char *ptr = (char *) firmware; @@ -574,6 +562,7 @@ static int bt3c_open(bt3c_info_t *info) { const struct firmware *firmware; struct hci_dev *hdev; + client_handle_t handle; int err; spin_lock_init(&(info->lock)); @@ -605,8 +594,10 @@ static int bt3c_open(bt3c_info_t *info) hdev->owner = THIS_MODULE; + handle = info->link.handle; + /* Load firmware */ - err = request_firmware(&firmware, "BT3CPCC.bin", bt3c_device()); + err = request_firmware(&firmware, "BT3CPCC.bin", &handle_to_dev(handle)); if (err < 0) { BT_ERR("Firmware request failed"); goto error; diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h index 8fd6357a48da..2c17e88a8847 100644 --- a/drivers/char/drm/drm_pciids.h +++ b/drivers/char/drm/drm_pciids.h @@ -85,7 +85,6 @@ {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ {0x1002, 0x596A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ {0x1002, 0x596B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ - {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|CHIP_IS_MOBILITY}, \ {0x1002, 0x5c62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|CHIP_IS_MOBILITY}, \ diff --git a/drivers/char/esp.c b/drivers/char/esp.c index 57539d8f9f7c..09dc4b01232c 100644 --- a/drivers/char/esp.c +++ b/drivers/char/esp.c @@ -150,17 +150,6 @@ static void rs_wait_until_sent(struct tty_struct *, int); /* Standard COM flags (except for COM4, because of the 8514 problem) */ #define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) -/* - * tmp_buf is used as a temporary buffer by serial_write. We need to - * lock it in case the memcpy_fromfs blocks while swapping in a page, - * and some other program tries to do a serial write at the same time. - * Since the lock will only come under contention when the system is - * swapping and available memory is low, it makes sense to share one - * buffer across all the serial ports, since it significantly saves - * memory if large numbers of serial ports are open. - */ -static unsigned char *tmp_buf; - static inline int serial_paranoia_check(struct esp_struct *info, char *name, const char *routine) { @@ -1267,7 +1256,7 @@ static int rs_write(struct tty_struct * tty, if (serial_paranoia_check(info, tty->name, "rs_write")) return 0; - if (!tty || !info->xmit_buf || !tmp_buf) + if (!tty || !info->xmit_buf) return 0; while (1) { @@ -2291,11 +2280,7 @@ static int esp_open(struct tty_struct *tty, struct file * filp) tty->driver_data = info; info->tty = tty; - if (!tmp_buf) { - tmp_buf = (unsigned char *) get_zeroed_page(GFP_KERNEL); - if (!tmp_buf) - return -ENOMEM; - } + spin_unlock_irqrestore(&info->lock, flags); /* * Start up serial port @@ -2602,9 +2587,6 @@ static void __exit espserial_exit(void) free_pages((unsigned long)dma_buffer, get_order(DMA_BUFFER_SZ)); - if (tmp_buf) - free_page((unsigned long)tmp_buf); - while (free_pio_buf) { pio_buf = free_pio_buf->next; kfree(free_pio_buf); diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 66a2fee06eb9..ef140ebde117 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -956,22 +956,18 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data) } } else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) { struct acpi_resource_extended_irq *irqp; - int i; + int i, irq; irqp = &res->data.extended_irq; - if (irqp->interrupt_count > 0) { - hdp->hd_nirqs = irqp->interrupt_count; - - for (i = 0; i < hdp->hd_nirqs; i++) { - int rc = - acpi_register_gsi(irqp->interrupts[i], - irqp->triggering, - irqp->polarity); - if (rc < 0) - return AE_ERROR; - hdp->hd_irq[i] = rc; - } + for (i = 0; i < irqp->interrupt_count; i++) { + irq = acpi_register_gsi(irqp->interrupts[i], + irqp->triggering, irqp->polarity); + if (irq < 0) + return AE_ERROR; + + hdp->hd_irq[hdp->hd_nirqs] = irq; + hdp->hd_nirqs++; } } diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c index ec7590951af5..24095f6ee6da 100644 --- a/drivers/char/tpm/tpm_infineon.c +++ b/drivers/char/tpm/tpm_infineon.c @@ -33,6 +33,7 @@ static int TPM_INF_DATA; static int TPM_INF_ADDR; static int TPM_INF_BASE; +static int TPM_INF_ADDR_LEN; static int TPM_INF_PORT_LEN; /* TPM header definitions */ @@ -195,6 +196,7 @@ static int tpm_inf_recv(struct tpm_chip *chip, u8 * buf, size_t count) int i; int ret; u32 size = 0; + number_of_wtx = 0; recv_begin: /* start receiving header */ @@ -378,24 +380,35 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) { TPM_INF_ADDR = pnp_port_start(dev, 0); + TPM_INF_ADDR_LEN = pnp_port_len(dev, 0); TPM_INF_DATA = (TPM_INF_ADDR + 1); TPM_INF_BASE = pnp_port_start(dev, 1); TPM_INF_PORT_LEN = pnp_port_len(dev, 1); - if (!TPM_INF_PORT_LEN) - return -EINVAL; + if ((TPM_INF_PORT_LEN < 4) || (TPM_INF_ADDR_LEN < 2)) { + rc = -EINVAL; + goto err_last; + } dev_info(&dev->dev, "Found %s with ID %s\n", dev->name, dev_id->id); - if (!((TPM_INF_BASE >> 8) & 0xff)) - return -EINVAL; + if (!((TPM_INF_BASE >> 8) & 0xff)) { + rc = -EINVAL; + goto err_last; + } /* publish my base address and request region */ tpm_inf.base = TPM_INF_BASE; if (request_region (tpm_inf.base, TPM_INF_PORT_LEN, "tpm_infineon0") == NULL) { - release_region(tpm_inf.base, TPM_INF_PORT_LEN); - return -EINVAL; + rc = -EINVAL; + goto err_last; + } + if (request_region(TPM_INF_ADDR, TPM_INF_ADDR_LEN, + "tpm_infineon0") == NULL) { + rc = -EINVAL; + goto err_last; } } else { - return -EINVAL; + rc = -EINVAL; + goto err_last; } /* query chip for its vendor, its version number a.s.o. */ @@ -443,8 +456,8 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, dev_err(&dev->dev, "Could not set IO-ports to 0x%lx\n", tpm_inf.base); - release_region(tpm_inf.base, TPM_INF_PORT_LEN); - return -EIO; + rc = -EIO; + goto err_release_region; } /* activate register */ @@ -471,14 +484,21 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, rc = tpm_register_hardware(&dev->dev, &tpm_inf); if (rc < 0) { - release_region(tpm_inf.base, TPM_INF_PORT_LEN); - return -ENODEV; + rc = -ENODEV; + goto err_release_region; } return 0; } else { - dev_info(&dev->dev, "No Infineon TPM found!\n"); - return -ENODEV; + rc = -ENODEV; + goto err_release_region; } + +err_release_region: + release_region(tpm_inf.base, TPM_INF_PORT_LEN); + release_region(TPM_INF_ADDR, TPM_INF_ADDR_LEN); + +err_last: + return rc; } static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev) @@ -518,5 +538,5 @@ module_exit(cleanup_inf); MODULE_AUTHOR("Marcel Selhorst <selhorst@crypto.rub.de>"); MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2"); -MODULE_VERSION("1.6"); +MODULE_VERSION("1.7"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index a23816d3e9a1..e9bba94fc898 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -1841,7 +1841,6 @@ static void release_dev(struct file * filp) tty_closing = tty->count <= 1; o_tty_closing = o_tty && (o_tty->count <= (pty_master ? 1 : 0)); - up(&tty_sem); do_sleep = 0; if (tty_closing) { @@ -1869,6 +1868,7 @@ static void release_dev(struct file * filp) printk(KERN_WARNING "release_dev: %s: read/write wait queue " "active!\n", tty_name(tty, buf)); + up(&tty_sem); schedule(); } @@ -1877,8 +1877,6 @@ static void release_dev(struct file * filp) * both sides, and we've completed the last operation that could * block, so it's safe to proceed with closing. */ - - down(&tty_sem); if (pty_master) { if (--o_tty->count < 0) { printk(KERN_WARNING "release_dev: bad pty slave count " @@ -1892,7 +1890,6 @@ static void release_dev(struct file * filp) tty->count, tty_name(tty, buf)); tty->count = 0; } - up(&tty_sem); /* * We've decremented tty->count, so we need to remove this file @@ -1937,6 +1934,8 @@ static void release_dev(struct file * filp) read_unlock(&tasklist_lock); } + up(&tty_sem); + /* check whether both sides are closing ... */ if (!tty_closing || (o_tty && !o_tty_closing)) return; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 7a511479ae29..9582de1c9cad 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -35,8 +35,8 @@ * level driver of CPUFreq support, and its spinlock. This lock * also protects the cpufreq_cpu_data array. */ -static struct cpufreq_driver *cpufreq_driver; -static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS]; +static struct cpufreq_driver *cpufreq_driver; +static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS]; static DEFINE_SPINLOCK(cpufreq_driver_lock); /* internal prototypes */ @@ -50,15 +50,15 @@ static void handle_update(void *data); * changes to devices when the CPU clock speed changes. * The mutex locks both lists. */ -static struct notifier_block *cpufreq_policy_notifier_list; -static struct notifier_block *cpufreq_transition_notifier_list; -static DECLARE_RWSEM (cpufreq_notifier_rwsem); +static struct notifier_block *cpufreq_policy_notifier_list; +static struct notifier_block *cpufreq_transition_notifier_list; +static DECLARE_RWSEM (cpufreq_notifier_rwsem); static LIST_HEAD(cpufreq_governor_list); -static DEFINE_MUTEX (cpufreq_governor_mutex); +static DEFINE_MUTEX (cpufreq_governor_mutex); -struct cpufreq_policy * cpufreq_cpu_get(unsigned int cpu) +struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) { struct cpufreq_policy *data; unsigned long flags; @@ -85,20 +85,19 @@ struct cpufreq_policy * cpufreq_cpu_get(unsigned int cpu) if (!kobject_get(&data->kobj)) goto err_out_put_module; - spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - return data; - err_out_put_module: +err_out_put_module: module_put(cpufreq_driver->owner); - err_out_unlock: +err_out_unlock: spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - err_out: +err_out: return NULL; } EXPORT_SYMBOL_GPL(cpufreq_cpu_get); + void cpufreq_cpu_put(struct cpufreq_policy *data) { kobject_put(&data->kobj); @@ -229,44 +228,53 @@ static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) { /** - * cpufreq_notify_transition - call notifier chain and adjust_jiffies on frequency transition + * cpufreq_notify_transition - call notifier chain and adjust_jiffies + * on frequency transition. * - * This function calls the transition notifiers and the "adjust_jiffies" function. It is called - * twice on all CPU frequency changes that have external effects. + * This function calls the transition notifiers and the "adjust_jiffies" + * function. It is called twice on all CPU frequency changes that have + * external effects. */ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) { + struct cpufreq_policy *policy; + BUG_ON(irqs_disabled()); freqs->flags = cpufreq_driver->flags; - dprintk("notification %u of frequency transition to %u kHz\n", state, freqs->new); + dprintk("notification %u of frequency transition to %u kHz\n", + state, freqs->new); down_read(&cpufreq_notifier_rwsem); + + policy = cpufreq_cpu_data[freqs->cpu]; switch (state) { + case CPUFREQ_PRECHANGE: - /* detect if the driver reported a value as "old frequency" which - * is not equal to what the cpufreq core thinks is "old frequency". + /* detect if the driver reported a value as "old frequency" + * which is not equal to what the cpufreq core thinks is + * "old frequency". */ if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { - if ((likely(cpufreq_cpu_data[freqs->cpu])) && - (likely(cpufreq_cpu_data[freqs->cpu]->cpu == freqs->cpu)) && - (likely(cpufreq_cpu_data[freqs->cpu]->cur)) && - (unlikely(freqs->old != cpufreq_cpu_data[freqs->cpu]->cur))) - { - dprintk(KERN_WARNING "Warning: CPU frequency is %u, " - "cpufreq assumed %u kHz.\n", freqs->old, cpufreq_cpu_data[freqs->cpu]->cur); - freqs->old = cpufreq_cpu_data[freqs->cpu]->cur; + if ((policy) && (policy->cpu == freqs->cpu) && + (policy->cur) && (policy->cur != freqs->old)) { + dprintk(KERN_WARNING "Warning: CPU frequency is" + " %u, cpufreq assumed %u kHz.\n", + freqs->old, policy->cur); + freqs->old = policy->cur; } } - notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_PRECHANGE, freqs); + notifier_call_chain(&cpufreq_transition_notifier_list, + CPUFREQ_PRECHANGE, freqs); adjust_jiffies(CPUFREQ_PRECHANGE, freqs); break; + case CPUFREQ_POSTCHANGE: adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); - notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs); - if ((likely(cpufreq_cpu_data[freqs->cpu])) && - (likely(cpufreq_cpu_data[freqs->cpu]->cpu == freqs->cpu))) - cpufreq_cpu_data[freqs->cpu]->cur = freqs->new; + notifier_call_chain(&cpufreq_transition_notifier_list, + CPUFREQ_POSTCHANGE, freqs); + if (likely(policy) && likely(policy->cpu == freqs->cpu)) + policy->cur = freqs->new; break; } up_read(&cpufreq_notifier_rwsem); @@ -308,7 +316,7 @@ static int cpufreq_parse_governor (char *str_governor, unsigned int *policy, return 0; } } - out: +out: mutex_unlock(&cpufreq_governor_mutex); } return -EINVAL; @@ -415,7 +423,6 @@ static ssize_t store_scaling_governor (struct cpufreq_policy * policy, return -EINVAL; ret = cpufreq_set_policy(&new_policy); - return ret ? ret : count; } @@ -446,7 +453,7 @@ static ssize_t show_scaling_available_governors (struct cpufreq_policy * policy, goto out; i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name); } - out: +out: i += sprintf(&buf[i], "\n"); return i; } @@ -789,7 +796,6 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) kfree(data); cpufreq_debug_enable_ratelimit(); - return 0; } @@ -870,8 +876,7 @@ unsigned int cpufreq_get(unsigned int cpu) ret = cpufreq_driver->get(cpu); - if (ret && policy->cur && !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) - { + if (ret && policy->cur && !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { /* verify no discrepancy between actual and saved value exists */ if (unlikely(ret != policy->cur)) { cpufreq_out_of_sync(cpu, policy->cur, ret); @@ -881,7 +886,7 @@ unsigned int cpufreq_get(unsigned int cpu) mutex_unlock(&policy->lock); - out: +out: cpufreq_cpu_put(policy); return (ret); @@ -962,7 +967,7 @@ static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg) cpu_policy->cur = cur_freq; } - out: +out: cpufreq_cpu_put(cpu_policy); return 0; } @@ -1169,7 +1174,6 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, mutex_unlock(&policy->lock); cpufreq_cpu_put(policy); - return ret; } EXPORT_SYMBOL_GPL(cpufreq_driver_target); @@ -1208,7 +1212,6 @@ int cpufreq_governor(unsigned int cpu, unsigned int event) mutex_unlock(&policy->lock); cpufreq_cpu_put(policy); - return ret; } EXPORT_SYMBOL_GPL(cpufreq_governor); @@ -1232,7 +1235,6 @@ int cpufreq_register_governor(struct cpufreq_governor *governor) list_add(&governor->governor_list, &cpufreq_governor_list); mutex_unlock(&cpufreq_governor_mutex); - return 0; } EXPORT_SYMBOL_GPL(cpufreq_register_governor); @@ -1277,7 +1279,6 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) mutex_unlock(&cpu_policy->lock); cpufreq_cpu_put(cpu_policy); - return 0; } EXPORT_SYMBOL(cpufreq_get_policy); @@ -1291,9 +1292,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_poli dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, policy->min, policy->max); - memcpy(&policy->cpuinfo, - &data->cpuinfo, - sizeof(struct cpufreq_cpuinfo)); + memcpy(&policy->cpuinfo, &data->cpuinfo, sizeof(struct cpufreq_cpuinfo)); /* verify the cpu speed can be set within this limit */ ret = cpufreq_driver->verify(policy); @@ -1324,8 +1323,8 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_poli up_read(&cpufreq_notifier_rwsem); - data->min = policy->min; - data->max = policy->max; + data->min = policy->min; + data->max = policy->max; dprintk("new min and max freqs are %u - %u kHz\n", data->min, data->max); @@ -1362,7 +1361,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_poli __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); } - error_out: +error_out: cpufreq_debug_enable_ratelimit(); return ret; } @@ -1421,9 +1420,7 @@ int cpufreq_update_policy(unsigned int cpu) mutex_lock(&data->lock); dprintk("updating policy for CPU %u\n", cpu); - memcpy(&policy, - data, - sizeof(struct cpufreq_policy)); + memcpy(&policy, data, sizeof(struct cpufreq_policy)); policy.min = data->user_policy.min; policy.max = data->user_policy.max; policy.policy = data->user_policy.policy; @@ -1433,8 +1430,13 @@ int cpufreq_update_policy(unsigned int cpu) -> ask driver for current freq and notify governors about a change */ if (cpufreq_driver->get) { policy.cur = cpufreq_driver->get(cpu); - if (data->cur != policy.cur) - cpufreq_out_of_sync(cpu, data->cur, policy.cur); + if (!data->cur) { + dprintk("Driver did not initialize current freq"); + data->cur = policy.cur; + } else { + if (data->cur != policy.cur) + cpufreq_out_of_sync(cpu, data->cur, policy.cur); + } } ret = __cpufreq_set_policy(data, &policy); diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c index e87d52c59940..d7a9401600bb 100644 --- a/drivers/hwmon/it87.c +++ b/drivers/hwmon/it87.c @@ -1186,7 +1186,8 @@ static int __init sm_it87_init(void) static void __exit sm_it87_exit(void) { - i2c_isa_del_driver(&it87_isa_driver); + if (isa_address) + i2c_isa_del_driver(&it87_isa_driver); i2c_del_driver(&it87_driver); } diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c index 3eb08f004c0f..271e9cb9532c 100644 --- a/drivers/hwmon/vt8231.c +++ b/drivers/hwmon/vt8231.c @@ -437,12 +437,12 @@ static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \ show_temp, NULL, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \ show_temp_max, set_temp_max, offset - 1); \ -static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \ +static SENSOR_DEVICE_ATTR(temp##offset##_max_hyst, S_IRUGO | S_IWUSR, \ show_temp_min, set_temp_min, offset - 1) static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp0, NULL); static DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp0_max, set_temp0_max); -static DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR, show_temp0_min, set_temp0_min); +static DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp0_min, set_temp0_min); define_temperature_sysfs(2); define_temperature_sysfs(3); @@ -451,7 +451,7 @@ define_temperature_sysfs(5); define_temperature_sysfs(6); #define CFG_INFO_TEMP(id) { &sensor_dev_attr_temp##id##_input.dev_attr, \ - &sensor_dev_attr_temp##id##_min.dev_attr, \ + &sensor_dev_attr_temp##id##_max_hyst.dev_attr, \ &sensor_dev_attr_temp##id##_max.dev_attr } #define CFG_INFO_VOLT(id) { &sensor_dev_attr_in##id##_input.dev_attr, \ &sensor_dev_attr_in##id##_min.dev_attr, \ @@ -464,7 +464,7 @@ struct str_device_attr_table { }; static struct str_device_attr_table cfg_info_temp[] = { - { &dev_attr_temp1_input, &dev_attr_temp1_min, &dev_attr_temp1_max }, + { &dev_attr_temp1_input, &dev_attr_temp1_max_hyst, &dev_attr_temp1_max }, CFG_INFO_TEMP(2), CFG_INFO_TEMP(3), CFG_INFO_TEMP(4), diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c index 557114872f3c..64c1f8af5bb2 100644 --- a/drivers/hwmon/w83781d.c +++ b/drivers/hwmon/w83781d.c @@ -95,11 +95,16 @@ MODULE_PARM_DESC(init, "Set to zero to bypass chip initialization"); (0x39))) #define W83781D_REG_CONFIG 0x40 + +/* Interrupt status (W83781D, AS99127F) */ #define W83781D_REG_ALARM1 0x41 #define W83781D_REG_ALARM2 0x42 -#define W83781D_REG_ALARM3 0x450 /* not on W83781D */ -#define W83781D_REG_IRQ 0x4C +/* Real-time status (W83782D, W83783S, W83627HF) */ +#define W83782D_REG_ALARM1 0x459 +#define W83782D_REG_ALARM2 0x45A +#define W83782D_REG_ALARM3 0x45B + #define W83781D_REG_BEEP_CONFIG 0x4D #define W83781D_REG_BEEP_INTS1 0x56 #define W83781D_REG_BEEP_INTS2 0x57 @@ -1513,15 +1518,6 @@ w83781d_init_client(struct i2c_client *client) W83781D_REG_TEMP3_CONFIG, tmp & 0xfe); } } - - if (type != w83781d) { - /* enable comparator mode for temp2 and temp3 so - alarm indication will work correctly */ - i = w83781d_read_value(client, W83781D_REG_IRQ); - if (!(i & 0x40)) - w83781d_write_value(client, W83781D_REG_IRQ, - i | 0x40); - } } /* Start monitoring */ @@ -1612,14 +1608,25 @@ static struct w83781d_data *w83781d_update_device(struct device *dev) data->fan_div[1] |= (i >> 4) & 0x04; data->fan_div[2] |= (i >> 5) & 0x04; } - data->alarms = - w83781d_read_value(client, - W83781D_REG_ALARM1) + - (w83781d_read_value(client, W83781D_REG_ALARM2) << 8); if ((data->type == w83782d) || (data->type == w83627hf)) { - data->alarms |= - w83781d_read_value(client, - W83781D_REG_ALARM3) << 16; + data->alarms = w83781d_read_value(client, + W83782D_REG_ALARM1) + | (w83781d_read_value(client, + W83782D_REG_ALARM2) << 8) + | (w83781d_read_value(client, + W83782D_REG_ALARM3) << 16); + } else if (data->type == w83783s) { + data->alarms = w83781d_read_value(client, + W83782D_REG_ALARM1) + | (w83781d_read_value(client, + W83782D_REG_ALARM2) << 8); + } else { + /* No real-time status registers, fall back to + interrupt status registers */ + data->alarms = w83781d_read_value(client, + W83781D_REG_ALARM1) + | (w83781d_read_value(client, + W83781D_REG_ALARM2) << 8); } i = w83781d_read_value(client, W83781D_REG_BEEP_INTS2); data->beep_enable = i >> 7; diff --git a/drivers/i2c/busses/i2c-isa.c b/drivers/i2c/busses/i2c-isa.c index 9f2ffef4d812..4344ae6b1fcb 100644 --- a/drivers/i2c/busses/i2c-isa.c +++ b/drivers/i2c/busses/i2c-isa.c @@ -72,16 +72,6 @@ static ssize_t show_adapter_name(struct device *dev, } static DEVICE_ATTR(name, S_IRUGO, show_adapter_name, NULL); -static int i2c_isa_device_probe(struct device *dev) -{ - return -ENODEV; -} - -static int i2c_isa_device_remove(struct device *dev) -{ - return 0; -} - /* We implement an interface which resembles i2c_{add,del}_driver, but for i2c-isa drivers. We don't have to remember and handle lists @@ -93,8 +83,6 @@ int i2c_isa_add_driver(struct i2c_driver *driver) /* Add the driver to the list of i2c drivers in the driver core */ driver->driver.bus = &i2c_bus_type; - driver->driver.probe = i2c_isa_device_probe; - driver->driver.remove = i2c_isa_device_remove; res = driver_register(&driver->driver); if (res) return res; diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index 9834dce4e20f..0606bd2f6020 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c @@ -34,6 +34,7 @@ #include <linux/kernel.h> #include <linux/timer.h> #include <linux/mm.h> +#include <linux/sched.h> #include <linux/interrupt.h> #include <linux/major.h> #include <linux/errno.h> @@ -314,6 +315,8 @@ static void ide_pio_datablock(ide_drive_t *drive, struct request *rq, if (rq->bio) /* fs request */ rq->errors = 0; + touch_softlockup_watchdog(); + switch (drive->hwif->data_phase) { case TASKFILE_MULTI_IN: case TASKFILE_MULTI_OUT: diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c index 2b286e865163..43b96e298363 100644 --- a/drivers/ide/pci/sgiioc4.c +++ b/drivers/ide/pci/sgiioc4.c @@ -13,11 +13,6 @@ * License along with this program; if not, write the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * - * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, - * Mountain View, CA 94043, or: - * - * http://www.sgi.com - * * For further information regarding this notice, see: * * http://oss.sgi.com/projects/GenInfo/NoticeExplan diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index d393b504bf26..c82f47a66e48 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -665,7 +665,15 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, struct ib_wc mad_wc; struct ib_send_wr *send_wr = &mad_send_wr->send_wr; - if (!smi_handle_dr_smp_send(smp, device->node_type, port_num)) { + /* + * Directed route handling starts if the initial LID routed part of + * a request or the ending LID routed part of a response is empty. + * If we are at the start of the LID routed part, don't update the + * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec. + */ + if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == + IB_LID_PERMISSIVE && + !smi_handle_dr_smp_send(smp, device->node_type, port_num)) { ret = -EINVAL; printk(KERN_ERR PFX "Invalid directed route\n"); goto out; diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index f9b9b93dc501..2825615ce81c 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c @@ -1029,25 +1029,6 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, MTHCA_GET(size, outbox, QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET); dev_lim->uar_scratch_entry_sz = size; - mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", - dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz); - mthca_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", - dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz); - mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", - dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz); - mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", - dev_lim->max_eqs, dev_lim->reserved_eqs, dev_lim->eqc_entry_sz); - mthca_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", - dev_lim->reserved_mrws, dev_lim->reserved_mtts); - mthca_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", - dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars); - mthca_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", - dev_lim->max_pds, dev_lim->reserved_mgms); - mthca_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", - dev_lim->max_cq_sz, dev_lim->max_qp_sz, dev_lim->max_srq_sz); - - mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags); - if (mthca_is_memfree(dev)) { MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET); dev_lim->max_srq_sz = 1 << field; @@ -1093,6 +1074,25 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, dev_lim->mpt_entry_sz = MTHCA_MPT_ENTRY_SIZE; } + mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", + dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz); + mthca_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", + dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz); + mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", + dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz); + mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", + dev_lim->max_eqs, dev_lim->reserved_eqs, dev_lim->eqc_entry_sz); + mthca_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", + dev_lim->reserved_mrws, dev_lim->reserved_mtts); + mthca_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", + dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars); + mthca_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", + dev_lim->max_pds, dev_lim->reserved_mgms); + mthca_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", + dev_lim->max_cq_sz, dev_lim->max_qp_sz, dev_lim->max_srq_sz); + + mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags); + out: mthca_free_mailbox(dev, mailbox); return err; diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index 2a165fd06e57..e481037288d6 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h @@ -53,8 +53,8 @@ #define DRV_NAME "ib_mthca" #define PFX DRV_NAME ": " -#define DRV_VERSION "0.06" -#define DRV_RELDATE "June 23, 2005" +#define DRV_VERSION "0.07" +#define DRV_RELDATE "February 13, 2006" enum { MTHCA_FLAG_DDR_HIDDEN = 1 << 1, diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index e0a5412b7e68..2f85a9a831b1 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -78,6 +78,7 @@ enum { IPOIB_FLAG_SUBINTERFACE = 4, IPOIB_MCAST_RUN = 5, IPOIB_STOP_REAPER = 6, + IPOIB_MCAST_STARTED = 7, IPOIB_MAX_BACKOFF_SECONDS = 16, diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index ccaa0c387076..a2408d7ec598 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -533,8 +533,10 @@ void ipoib_mcast_join_task(void *dev_ptr) } if (!priv->broadcast) { - priv->broadcast = ipoib_mcast_alloc(dev, 1); - if (!priv->broadcast) { + struct ipoib_mcast *broadcast; + + broadcast = ipoib_mcast_alloc(dev, 1); + if (!broadcast) { ipoib_warn(priv, "failed to allocate broadcast group\n"); mutex_lock(&mcast_mutex); if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) @@ -544,10 +546,11 @@ void ipoib_mcast_join_task(void *dev_ptr) return; } - memcpy(priv->broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4, + spin_lock_irq(&priv->lock); + memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4, sizeof (union ib_gid)); + priv->broadcast = broadcast; - spin_lock_irq(&priv->lock); __ipoib_mcast_add(dev, priv->broadcast); spin_unlock_irq(&priv->lock); } @@ -601,6 +604,10 @@ int ipoib_mcast_start_thread(struct net_device *dev) queue_work(ipoib_workqueue, &priv->mcast_task); mutex_unlock(&mcast_mutex); + spin_lock_irq(&priv->lock); + set_bit(IPOIB_MCAST_STARTED, &priv->flags); + spin_unlock_irq(&priv->lock); + return 0; } @@ -611,6 +618,10 @@ int ipoib_mcast_stop_thread(struct net_device *dev, int flush) ipoib_dbg_mcast(priv, "stopping multicast thread\n"); + spin_lock_irq(&priv->lock); + clear_bit(IPOIB_MCAST_STARTED, &priv->flags); + spin_unlock_irq(&priv->lock); + mutex_lock(&mcast_mutex); clear_bit(IPOIB_MCAST_RUN, &priv->flags); cancel_delayed_work(&priv->mcast_task); @@ -693,6 +704,14 @@ void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid, */ spin_lock(&priv->lock); + if (!test_bit(IPOIB_MCAST_STARTED, &priv->flags) || + !priv->broadcast || + !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { + ++priv->stats.tx_dropped; + dev_kfree_skb_any(skb); + goto unlock; + } + mcast = __ipoib_mcast_find(dev, mgid); if (!mcast) { /* Let's create a new send only group now */ @@ -754,6 +773,7 @@ out: ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); } +unlock: spin_unlock(&priv->lock); } diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile index 6e0afbb22383..2708167ba175 100644 --- a/drivers/input/keyboard/Makefile +++ b/drivers/input/keyboard/Makefile @@ -11,7 +11,6 @@ obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o obj-$(CONFIG_KEYBOARD_LOCOMO) += locomokbd.o obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o -obj-$(CONFIG_KEYBOARD_98KBD) += 98kbd.o obj-$(CONFIG_KEYBOARD_CORGI) += corgikbd.o obj-$(CONFIG_KEYBOARD_SPITZ) += spitzkbd.o obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index 184c4129470d..415c49178985 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -7,7 +7,6 @@ obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o -obj-$(CONFIG_INPUT_98SPKR) += 98spkr.o obj-$(CONFIG_INPUT_UINPUT) += uinput.o obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c index d448bb5e4869..3a6ae85cd69c 100644 --- a/drivers/input/misc/ixp4xx-beeper.c +++ b/drivers/input/misc/ixp4xx-beeper.c @@ -19,6 +19,7 @@ #include <linux/input.h> #include <linux/delay.h> #include <linux/platform_device.h> +#include <linux/interrupt.h> #include <asm/hardware.h> MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>"); diff --git a/drivers/input/mouse/logips2pp.c b/drivers/input/mouse/logips2pp.c index c88520d3d13c..40333d61093c 100644 --- a/drivers/input/mouse/logips2pp.c +++ b/drivers/input/mouse/logips2pp.c @@ -232,6 +232,7 @@ static struct ps2pp_info *get_model_info(unsigned char model) { 88, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, { 96, 0, 0 }, { 97, PS2PP_KIND_TP3, PS2PP_WHEEL | PS2PP_HWHEEL }, + { 99, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, { 100, PS2PP_KIND_MX, /* MX510 */ PS2PP_WHEEL | PS2PP_SIDE_BTN | PS2PP_TASK_BTN | PS2PP_EXTRA_BTN | PS2PP_NAV_BTN }, diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c index b4898d8a68e2..6d9ec9ab1b90 100644 --- a/drivers/input/mouse/trackpoint.c +++ b/drivers/input/mouse/trackpoint.c @@ -68,15 +68,19 @@ struct trackpoint_attr_data { size_t field_offset; unsigned char command; unsigned char mask; + unsigned char inverted; }; static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse, void *data, char *buf) { struct trackpoint_data *tp = psmouse->private; struct trackpoint_attr_data *attr = data; - unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset); + unsigned char value = *(unsigned char *)((char *)tp + attr->field_offset); + + if (attr->inverted) + value = !value; - return sprintf(buf, "%u\n", *field); + return sprintf(buf, "%u\n", value); } static ssize_t trackpoint_set_int_attr(struct psmouse *psmouse, void *data, @@ -120,6 +124,9 @@ static ssize_t trackpoint_set_bit_attr(struct psmouse *psmouse, void *data, if (*rest || value > 1) return -EINVAL; + if (attr->inverted) + value = !value; + if (*field != value) { *field = value; trackpoint_toggle_bit(&psmouse->ps2dev, attr->command, attr->mask); @@ -129,11 +136,12 @@ static ssize_t trackpoint_set_bit_attr(struct psmouse *psmouse, void *data, } -#define TRACKPOINT_BIT_ATTR(_name, _command, _mask) \ +#define TRACKPOINT_BIT_ATTR(_name, _command, _mask, _inv) \ static struct trackpoint_attr_data trackpoint_attr_##_name = { \ .field_offset = offsetof(struct trackpoint_data, _name), \ .command = _command, \ .mask = _mask, \ + .inverted = _inv, \ }; \ PSMOUSE_DEFINE_ATTR(_name, S_IWUSR | S_IRUGO, \ &trackpoint_attr_##_name, \ @@ -150,9 +158,9 @@ TRACKPOINT_INT_ATTR(upthresh, TP_UP_THRESH); TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME); TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV); -TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON); -TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK); -TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV); +TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0); +TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, 0); +TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, 1); static struct attribute *trackpoint_attrs[] = { &psmouse_attr_sensitivity.dattr.attr, diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h index 9857d8b6ad66..050298b1a09d 100644 --- a/drivers/input/mouse/trackpoint.h +++ b/drivers/input/mouse/trackpoint.h @@ -78,7 +78,7 @@ #define TP_TOGGLE_MB 0x23 /* Disable/Enable Middle Button */ #define TP_MASK_MB 0x01 -#define TP_TOGGLE_EXT_DEV 0x23 /* Toggle external device */ +#define TP_TOGGLE_EXT_DEV 0x23 /* Disable external device */ #define TP_MASK_EXT_DEV 0x02 #define TP_TOGGLE_DRIFT 0x23 /* Drift Correction */ #define TP_MASK_DRIFT 0x80 @@ -125,7 +125,7 @@ #define TP_DEF_MB 0x00 #define TP_DEF_PTSON 0x00 #define TP_DEF_SKIPBACK 0x00 -#define TP_DEF_EXT_DEV 0x01 +#define TP_DEF_EXT_DEV 0x00 /* 0 means enabled */ #define MAKE_PS2_CMD(params, results, cmd) ((params<<12) | (results<<8) | (cmd)) diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile index 678a8599f9ff..4155197867a3 100644 --- a/drivers/input/serio/Makefile +++ b/drivers/input/serio/Makefile @@ -13,7 +13,6 @@ obj-$(CONFIG_SERIO_RPCKBD) += rpckbd.o obj-$(CONFIG_SERIO_SA1111) += sa1111ps2.o obj-$(CONFIG_SERIO_AMBAKMI) += ambakmi.o obj-$(CONFIG_SERIO_Q40KBD) += q40kbd.o -obj-$(CONFIG_SERIO_98KBD) += 98kbd-io.o obj-$(CONFIG_SERIO_GSCPS2) += gscps2.o obj-$(CONFIG_HP_SDC) += hp_sdc.o obj-$(CONFIG_HIL_MLC) += hp_sdc_mlc.o hil_mlc.o diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index b45a45ca7cc9..8c12a974b411 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -48,10 +48,13 @@ #define TS_POLL_PERIOD msecs_to_jiffies(10) +/* this driver doesn't aim at the peak continuous sample rate */ +#define SAMPLE_BITS (8 /*cmd*/ + 16 /*sample*/ + 2 /* before, after */) + struct ts_event { /* For portability, we can't read 12 bit values using SPI (which * would make the controller deliver them as native byteorder u16 - * with msbs zeroed). Instead, we read them as two 8-byte values, + * with msbs zeroed). Instead, we read them as two 8-bit values, * which need byteswapping then range adjustment. */ __be16 x; @@ -60,7 +63,7 @@ struct ts_event { }; struct ads7846 { - struct input_dev input; + struct input_dev *input; char phys[32]; struct spi_device *spi; @@ -68,6 +71,7 @@ struct ads7846 { u16 vref_delay_usecs; u16 x_plate_ohms; + u8 read_x, read_y, read_z1, read_z2; struct ts_event tc; struct spi_transfer xfer[8]; @@ -117,10 +121,10 @@ struct ads7846 { #define READ_12BIT_DFR(x) (ADS_START | ADS_A2A1A0_d_ ## x \ | ADS_12_BIT | ADS_DFR) -static const u8 read_y = READ_12BIT_DFR(y) | ADS_PD10_ADC_ON; -static const u8 read_z1 = READ_12BIT_DFR(z1) | ADS_PD10_ADC_ON; -static const u8 read_z2 = READ_12BIT_DFR(z2) | ADS_PD10_ADC_ON; -static const u8 read_x = READ_12BIT_DFR(x) | ADS_PD10_PDOWN; /* LAST */ +#define READ_Y (READ_12BIT_DFR(y) | ADS_PD10_ADC_ON) +#define READ_Z1 (READ_12BIT_DFR(z1) | ADS_PD10_ADC_ON) +#define READ_Z2 (READ_12BIT_DFR(z2) | ADS_PD10_ADC_ON) +#define READ_X (READ_12BIT_DFR(x) | ADS_PD10_PDOWN) /* LAST */ /* single-ended samples need to first power up reference voltage; * we leave both ADC and VREF powered @@ -128,8 +132,8 @@ static const u8 read_x = READ_12BIT_DFR(x) | ADS_PD10_PDOWN; /* LAST */ #define READ_12BIT_SER(x) (ADS_START | ADS_A2A1A0_ ## x \ | ADS_12_BIT | ADS_SER) -static const u8 ref_on = READ_12BIT_DFR(x) | ADS_PD10_ALL_ON; -static const u8 ref_off = READ_12BIT_DFR(y) | ADS_PD10_PDOWN; +#define REF_ON (READ_12BIT_DFR(x) | ADS_PD10_ALL_ON) +#define REF_OFF (READ_12BIT_DFR(y) | ADS_PD10_PDOWN) /*--------------------------------------------------------------------------*/ @@ -138,7 +142,9 @@ static const u8 ref_off = READ_12BIT_DFR(y) | ADS_PD10_PDOWN; */ struct ser_req { + u8 ref_on; u8 command; + u8 ref_off; u16 scratch; __be16 sample; struct spi_message msg; @@ -152,7 +158,7 @@ static int ads7846_read12_ser(struct device *dev, unsigned command) struct ser_req *req = kzalloc(sizeof *req, SLAB_KERNEL); int status; int sample; - int i; + int i; if (!req) return -ENOMEM; @@ -160,7 +166,8 @@ static int ads7846_read12_ser(struct device *dev, unsigned command) INIT_LIST_HEAD(&req->msg.transfers); /* activate reference, so it has time to settle; */ - req->xfer[0].tx_buf = &ref_on; + req->ref_on = REF_ON; + req->xfer[0].tx_buf = &req->ref_on; req->xfer[0].len = 1; req->xfer[1].rx_buf = &req->scratch; req->xfer[1].len = 2; @@ -182,7 +189,8 @@ static int ads7846_read12_ser(struct device *dev, unsigned command) /* REVISIT: take a few more samples, and compare ... */ /* turn off reference */ - req->xfer[4].tx_buf = &ref_off; + req->ref_off = REF_OFF; + req->xfer[4].tx_buf = &req->ref_off; req->xfer[4].len = 1; req->xfer[5].rx_buf = &req->scratch; req->xfer[5].len = 2; @@ -236,11 +244,12 @@ SHOW(vbatt) static void ads7846_rx(void *ads) { - struct ads7846 *ts = ads; - unsigned Rt; - unsigned sync = 0; - u16 x, y, z1, z2; - unsigned long flags; + struct ads7846 *ts = ads; + struct input_dev *input_dev = ts->input; + unsigned Rt; + unsigned sync = 0; + u16 x, y, z1, z2; + unsigned long flags; /* adjust: 12 bit samples (left aligned), built from * two 8 bit values writen msb-first. @@ -276,21 +285,21 @@ static void ads7846_rx(void *ads) * won't notice that, even if nPENIRQ never fires ... */ if (!ts->pendown && Rt != 0) { - input_report_key(&ts->input, BTN_TOUCH, 1); + input_report_key(input_dev, BTN_TOUCH, 1); sync = 1; } else if (ts->pendown && Rt == 0) { - input_report_key(&ts->input, BTN_TOUCH, 0); + input_report_key(input_dev, BTN_TOUCH, 0); sync = 1; } if (Rt) { - input_report_abs(&ts->input, ABS_X, x); - input_report_abs(&ts->input, ABS_Y, y); - input_report_abs(&ts->input, ABS_PRESSURE, Rt); + input_report_abs(input_dev, ABS_X, x); + input_report_abs(input_dev, ABS_Y, y); + input_report_abs(input_dev, ABS_PRESSURE, Rt); sync = 1; } if (sync) - input_sync(&ts->input); + input_sync(input_dev); #ifdef VERBOSE if (Rt || ts->pendown) @@ -396,9 +405,10 @@ static int ads7846_resume(struct spi_device *spi) static int __devinit ads7846_probe(struct spi_device *spi) { struct ads7846 *ts; + struct input_dev *input_dev; struct ads7846_platform_data *pdata = spi->dev.platform_data; struct spi_transfer *x; - int i; + int err; if (!spi->irq) { dev_dbg(&spi->dev, "no IRQ?\n"); @@ -411,9 +421,9 @@ static int __devinit ads7846_probe(struct spi_device *spi) } /* don't exceed max specified sample rate */ - if (spi->max_speed_hz > (125000 * 16)) { + if (spi->max_speed_hz > (125000 * SAMPLE_BITS)) { dev_dbg(&spi->dev, "f(sample) %d KHz?\n", - (spi->max_speed_hz/16)/1000); + (spi->max_speed_hz/SAMPLE_BITS)/1000); return -EINVAL; } @@ -423,13 +433,18 @@ static int __devinit ads7846_probe(struct spi_device *spi) * to discard the four garbage LSBs. */ - if (!(ts = kzalloc(sizeof(struct ads7846), GFP_KERNEL))) - return -ENOMEM; + ts = kzalloc(sizeof(struct ads7846), GFP_KERNEL); + input_dev = input_allocate_device(); + if (!ts || !input_dev) { + err = -ENOMEM; + goto err_free_mem; + } dev_set_drvdata(&spi->dev, ts); + spi->dev.power.power_state = PMSG_ON; ts->spi = spi; - spi->dev.power.power_state = PMSG_ON; + ts->input = input_dev; init_timer(&ts->timer); ts->timer.data = (unsigned long) ts; @@ -439,70 +454,80 @@ static int __devinit ads7846_probe(struct spi_device *spi) ts->vref_delay_usecs = pdata->vref_delay_usecs ? : 100; ts->x_plate_ohms = pdata->x_plate_ohms ? : 400; - init_input_dev(&ts->input); + snprintf(ts->phys, sizeof(ts->phys), "%s/input0", spi->dev.bus_id); - ts->input.dev = &spi->dev; - ts->input.name = "ADS784x Touchscreen"; - snprintf(ts->phys, sizeof ts->phys, "%s/input0", spi->dev.bus_id); - ts->input.phys = ts->phys; + input_dev->name = "ADS784x Touchscreen"; + input_dev->phys = ts->phys; + input_dev->cdev.dev = &spi->dev; - ts->input.evbit[0] = BIT(EV_KEY) | BIT(EV_ABS); - ts->input.keybit[LONG(BTN_TOUCH)] = BIT(BTN_TOUCH); - input_set_abs_params(&ts->input, ABS_X, + input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS); + input_dev->keybit[LONG(BTN_TOUCH)] = BIT(BTN_TOUCH); + input_set_abs_params(input_dev, ABS_X, pdata->x_min ? : 0, pdata->x_max ? : MAX_12BIT, 0, 0); - input_set_abs_params(&ts->input, ABS_Y, + input_set_abs_params(input_dev, ABS_Y, pdata->y_min ? : 0, pdata->y_max ? : MAX_12BIT, 0, 0); - input_set_abs_params(&ts->input, ABS_PRESSURE, + input_set_abs_params(input_dev, ABS_PRESSURE, pdata->pressure_min, pdata->pressure_max, 0, 0); - input_register_device(&ts->input); - /* set up the transfers to read touchscreen state; this assumes we * use formula #2 for pressure, not #3. */ + INIT_LIST_HEAD(&ts->msg.transfers); x = ts->xfer; /* y- still on; turn on only y+ (and ADC) */ - x->tx_buf = &read_y; + ts->read_y = READ_Y; + x->tx_buf = &ts->read_y; x->len = 1; + spi_message_add_tail(x, &ts->msg); + x++; x->rx_buf = &ts->tc.y; x->len = 2; - x++; + spi_message_add_tail(x, &ts->msg); /* turn y+ off, x- on; we'll use formula #2 */ if (ts->model == 7846) { - x->tx_buf = &read_z1; + x++; + ts->read_z1 = READ_Z1; + x->tx_buf = &ts->read_z1; x->len = 1; + spi_message_add_tail(x, &ts->msg); + x++; x->rx_buf = &ts->tc.z1; x->len = 2; - x++; + spi_message_add_tail(x, &ts->msg); - x->tx_buf = &read_z2; + x++; + ts->read_z2 = READ_Z2; + x->tx_buf = &ts->read_z2; x->len = 1; + spi_message_add_tail(x, &ts->msg); + x++; x->rx_buf = &ts->tc.z2; x->len = 2; - x++; + spi_message_add_tail(x, &ts->msg); } /* turn y- off, x+ on, then leave in lowpower */ - x->tx_buf = &read_x; + x++; + ts->read_x = READ_X; + x->tx_buf = &ts->read_x; x->len = 1; + spi_message_add_tail(x, &ts->msg); + x++; x->rx_buf = &ts->tc.x; x->len = 2; - x++; - - CS_CHANGE(x[-1]); + CS_CHANGE(*x); + spi_message_add_tail(x, &ts->msg); - for (i = 0; i < x - ts->xfer; i++) - spi_message_add_tail(&ts->xfer[i], &ts->msg); ts->msg.complete = ads7846_rx; ts->msg.context = ts; @@ -510,9 +535,8 @@ static int __devinit ads7846_probe(struct spi_device *spi) SA_SAMPLE_RANDOM | SA_TRIGGER_FALLING, spi->dev.bus_id, ts)) { dev_dbg(&spi->dev, "irq %d busy?\n", spi->irq); - input_unregister_device(&ts->input); - kfree(ts); - return -EBUSY; + err = -EBUSY; + goto err_free_mem; } dev_info(&spi->dev, "touchscreen, irq %d\n", spi->irq); @@ -534,7 +558,18 @@ static int __devinit ads7846_probe(struct spi_device *spi) device_create_file(&spi->dev, &dev_attr_vbatt); device_create_file(&spi->dev, &dev_attr_vaux); + err = input_register_device(input_dev); + if (err) + goto err_free_irq; + return 0; + + err_free_irq: + free_irq(spi->irq, ts); + err_free_mem: + input_free_device(input_dev); + kfree(ts); + return err; } static int __devexit ads7846_remove(struct spi_device *spi) @@ -554,7 +589,7 @@ static int __devexit ads7846_remove(struct spi_device *spi) device_remove_file(&spi->dev, &dev_attr_vbatt); device_remove_file(&spi->dev, &dev_attr_vaux); - input_unregister_device(&ts->input); + input_unregister_device(ts->input); kfree(ts); dev_dbg(&spi->dev, "unregistered touchscreen\n"); diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c index f190a99604f0..393633681f49 100644 --- a/drivers/isdn/i4l/isdn_tty.c +++ b/drivers/isdn/i4l/isdn_tty.c @@ -2359,8 +2359,8 @@ isdn_tty_at_cout(char *msg, modem_info * info) /* use queue instead of direct, if online and */ /* data is in queue or buffer is full */ - if ((info->online && tty_buffer_request_room(tty, l) < l) || - (!skb_queue_empty(&dev->drv[info->isdn_driver]->rpqueue[info->isdn_channel]))) { + if (info->online && ((tty_buffer_request_room(tty, l) < l) || + !skb_queue_empty(&dev->drv[info->isdn_driver]->rpqueue[info->isdn_channel]))) { skb = alloc_skb(l, GFP_ATOMIC); if (!skb) { spin_unlock_irqrestore(&info->readlock, flags); diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c index 3a32c59494f2..24e51d5e97fc 100644 --- a/drivers/macintosh/windfarm_smu_sat.c +++ b/drivers/macintosh/windfarm_smu_sat.c @@ -151,6 +151,7 @@ struct smu_sdbp_header *smu_sat_get_sdb_partition(unsigned int sat_id, int id, kfree(buf); return NULL; } +EXPORT_SYMBOL_GPL(smu_sat_get_sdb_partition); /* refresh the cache */ static int wf_sat_read_cache(struct wf_sat *sat) diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 9a2c7605d49c..642a61b6d0a4 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -452,8 +452,7 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply) } else if (func == MPI_FUNCTION_EVENT_ACK) { dprintk((MYIOC_s_INFO_FMT "mpt_base_reply, EventAck reply received\n", ioc->name)); - } else if (func == MPI_FUNCTION_CONFIG || - func == MPI_FUNCTION_TOOLBOX) { + } else if (func == MPI_FUNCTION_CONFIG) { CONFIGPARMS *pCfg; unsigned long flags; @@ -5327,115 +5326,6 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * mpt_toolbox - Generic function to issue toolbox message - * @ioc - Pointer to an adapter structure - * @cfg - Pointer to a toolbox structure. Struct contains - * action, page address, direction, physical address - * and pointer to a configuration page header - * Page header is updated. - * - * Returns 0 for success - * -EPERM if not allowed due to ISR context - * -EAGAIN if no msg frames currently available - * -EFAULT for non-successful reply or no reply (timeout) - */ -int -mpt_toolbox(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) -{ - ToolboxIstwiReadWriteRequest_t *pReq; - MPT_FRAME_HDR *mf; - struct pci_dev *pdev; - unsigned long flags; - int rc; - u32 flagsLength; - int in_isr; - - /* Prevent calling wait_event() (below), if caller happens - * to be in ISR context, because that is fatal! - */ - in_isr = in_interrupt(); - if (in_isr) { - dcprintk((MYIOC_s_WARN_FMT "toobox request not allowed in ISR context!\n", - ioc->name)); - return -EPERM; - } - - /* Get and Populate a free Frame - */ - if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { - dcprintk((MYIOC_s_WARN_FMT "mpt_toolbox: no msg frames!\n", - ioc->name)); - return -EAGAIN; - } - pReq = (ToolboxIstwiReadWriteRequest_t *)mf; - pReq->Tool = pCfg->action; - pReq->Reserved = 0; - pReq->ChainOffset = 0; - pReq->Function = MPI_FUNCTION_TOOLBOX; - pReq->Reserved1 = 0; - pReq->Reserved2 = 0; - pReq->MsgFlags = 0; - pReq->Flags = pCfg->dir; - pReq->BusNum = 0; - pReq->Reserved3 = 0; - pReq->NumAddressBytes = 0x01; - pReq->Reserved4 = 0; - pReq->DataLength = cpu_to_le16(0x04); - pdev = ioc->pcidev; - if (pdev->devfn & 1) - pReq->DeviceAddr = 0xB2; - else - pReq->DeviceAddr = 0xB0; - pReq->Addr1 = 0; - pReq->Addr2 = 0; - pReq->Addr3 = 0; - pReq->Reserved5 = 0; - - /* Add a SGE to the config request. - */ - - flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | 4; - - mpt_add_sge((char *)&pReq->SGL, flagsLength, pCfg->physAddr); - - dcprintk((MYIOC_s_INFO_FMT "Sending Toolbox request, Tool=%x\n", - ioc->name, pReq->Tool)); - - /* Append pCfg pointer to end of mf - */ - *((void **) (((u8 *) mf) + (ioc->req_sz - sizeof(void *)))) = (void *) pCfg; - - /* Initalize the timer - */ - init_timer(&pCfg->timer); - pCfg->timer.data = (unsigned long) ioc; - pCfg->timer.function = mpt_timer_expired; - pCfg->wait_done = 0; - - /* Set the timer; ensure 10 second minimum */ - if (pCfg->timeout < 10) - pCfg->timer.expires = jiffies + HZ*10; - else - pCfg->timer.expires = jiffies + HZ*pCfg->timeout; - - /* Add to end of Q, set timer and then issue this command */ - spin_lock_irqsave(&ioc->FreeQlock, flags); - list_add_tail(&pCfg->linkage, &ioc->configQ); - spin_unlock_irqrestore(&ioc->FreeQlock, flags); - - add_timer(&pCfg->timer); - mpt_put_msg_frame(mpt_base_index, ioc, mf); - wait_event(mpt_waitq, pCfg->wait_done); - - /* mf has been freed - do not access */ - - rc = pCfg->status; - - return rc; -} - -/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * mpt_timer_expired - Call back for timer process. * Used only internal config functionality. @@ -6142,7 +6032,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply if (ioc->events && (ioc->eventTypes & ( 1 << event))) { int idx; - idx = ioc->eventContext % ioc->eventLogSize; + idx = ioc->eventContext % MPTCTL_EVENT_LOG_SIZE; ioc->events[idx].event = event; ioc->events[idx].eventContext = ioc->eventContext; @@ -6540,7 +6430,6 @@ EXPORT_SYMBOL(mpt_lan_index); EXPORT_SYMBOL(mpt_stm_index); EXPORT_SYMBOL(mpt_HardResetHandler); EXPORT_SYMBOL(mpt_config); -EXPORT_SYMBOL(mpt_toolbox); EXPORT_SYMBOL(mpt_findImVolumes); EXPORT_SYMBOL(mpt_read_ioc_pg_3); EXPORT_SYMBOL(mpt_alloc_fw_memory); diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index ea2649ecad1f..723d54300953 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -616,6 +616,7 @@ typedef struct _MPT_ADAPTER * increments by 32 bytes */ int errata_flag_1064; + int aen_event_read_flag; /* flag to indicate event log was read*/ u8 FirstWhoInit; u8 upload_fw; /* If set, do a fw upload */ u8 reload_fw; /* Force a FW Reload on next reset */ @@ -1026,7 +1027,6 @@ extern u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked); extern void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buf, int *size, int len, int showlan); extern int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag); extern int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *cfg); -extern int mpt_toolbox(MPT_ADAPTER *ioc, CONFIGPARMS *cfg); extern void mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size); extern void mpt_free_fw_memory(MPT_ADAPTER *ioc); extern int mpt_findImVolumes(MPT_ADAPTER *ioc); diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index bdf709987982..9b64e07400da 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -136,6 +136,12 @@ static void mptctl_free_tm_flags(MPT_ADAPTER *ioc); */ static int mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase); +/* + * Event Handler function + */ +static int mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); +struct fasync_struct *async_queue=NULL; + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * Scatter gather list (SGL) sizes and limits... @@ -385,18 +391,18 @@ static int mptctl_bus_reset(MPT_IOCTL *ioctl) } /* Now wait for the command to complete */ - ii = wait_event_interruptible_timeout(mptctl_wait, + ii = wait_event_timeout(mptctl_wait, ioctl->wait_done == 1, HZ*5 /* 5 second timeout */); if(ii <=0 && (ioctl->wait_done != 1 )) { + mpt_free_msg_frame(hd->ioc, mf); ioctl->wait_done = 0; retval = -1; /* return failure */ } mptctl_bus_reset_done: - mpt_free_msg_frame(hd->ioc, mf); mptctl_free_tm_flags(ioctl->ioc); return retval; } @@ -472,6 +478,69 @@ mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* ASYNC Event Notification Support */ +static int +mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) +{ + u8 event; + + event = le32_to_cpu(pEvReply->Event) & 0xFF; + + dctlprintk(("%s() called\n", __FUNCTION__)); + if(async_queue == NULL) + return 1; + + /* Raise SIGIO for persistent events. + * TODO - this define is not in MPI spec yet, + * but they plan to set it to 0x21 + */ + if (event == 0x21 ) { + ioc->aen_event_read_flag=1; + dctlprintk(("Raised SIGIO to application\n")); + devtprintk(("Raised SIGIO to application\n")); + kill_fasync(&async_queue, SIGIO, POLL_IN); + return 1; + } + + /* This flag is set after SIGIO was raised, and + * remains set until the application has read + * the event log via ioctl=MPTEVENTREPORT + */ + if(ioc->aen_event_read_flag) + return 1; + + /* Signal only for the events that are + * requested for by the application + */ + if (ioc->events && (ioc->eventTypes & ( 1 << event))) { + ioc->aen_event_read_flag=1; + dctlprintk(("Raised SIGIO to application\n")); + devtprintk(("Raised SIGIO to application\n")); + kill_fasync(&async_queue, SIGIO, POLL_IN); + } + return 1; +} + +static int +mptctl_fasync(int fd, struct file *filep, int mode) +{ + MPT_ADAPTER *ioc; + + list_for_each_entry(ioc, &ioc_list, list) + ioc->aen_event_read_flag=0; + + dctlprintk(("%s() called\n", __FUNCTION__)); + return fasync_helper(fd, filep, mode, &async_queue); +} + +static int +mptctl_release(struct inode *inode, struct file *filep) +{ + dctlprintk(("%s() called\n", __FUNCTION__)); + return fasync_helper(-1, filep, 0, &async_queue); +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * MPT ioctl handler * cmd - specify the particular IOCTL command to be issued @@ -674,22 +743,23 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) u16 iocstat; pFWDownloadReply_t ReplyMsg = NULL; - dctlprintk((KERN_INFO "mptctl_do_fwdl called. mptctl_id = %xh.\n", mptctl_id)); + dctlprintk(("mptctl_do_fwdl called. mptctl_id = %xh.\n", mptctl_id)); - dctlprintk((KERN_INFO "DbG: kfwdl.bufp = %p\n", ufwbuf)); - dctlprintk((KERN_INFO "DbG: kfwdl.fwlen = %d\n", (int)fwlen)); - dctlprintk((KERN_INFO "DbG: kfwdl.ioc = %04xh\n", ioc)); + dctlprintk(("DbG: kfwdl.bufp = %p\n", ufwbuf)); + dctlprintk(("DbG: kfwdl.fwlen = %d\n", (int)fwlen)); + dctlprintk(("DbG: kfwdl.ioc = %04xh\n", ioc)); - if ((ioc = mpt_verify_adapter(ioc, &iocp)) < 0) { - dctlprintk(("%s@%d::_ioctl_fwdl - ioc%d not found!\n", - __FILE__, __LINE__, ioc)); + if (mpt_verify_adapter(ioc, &iocp) < 0) { + dctlprintk(("ioctl_fwdl - ioc%d not found!\n", + ioc)); return -ENODEV; /* (-6) No such device or address */ - } + } else { - /* Valid device. Get a message frame and construct the FW download message. - */ - if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) - return -EAGAIN; + /* Valid device. Get a message frame and construct the FW download message. + */ + if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) + return -EAGAIN; + } dlmsg = (FWDownload_t*) mf; ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL; sgOut = (char *) (ptsge + 1); @@ -702,7 +772,11 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) dlmsg->ChainOffset = 0; dlmsg->Function = MPI_FUNCTION_FW_DOWNLOAD; dlmsg->Reserved1[0] = dlmsg->Reserved1[1] = dlmsg->Reserved1[2] = 0; - dlmsg->MsgFlags = 0; + if (iocp->facts.MsgVersion >= MPI_VERSION_01_05) + dlmsg->MsgFlags = MPI_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT; + else + dlmsg->MsgFlags = 0; + /* Set up the Transaction SGE. */ @@ -754,7 +828,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) goto fwdl_out; } - dctlprintk((KERN_INFO "DbG: sgl buffer = %p, sgfrags = %d\n", sgl, numfrags)); + dctlprintk(("DbG: sgl buffer = %p, sgfrags = %d\n", sgl, numfrags)); /* * Parse SG list, copying sgl itself, @@ -803,11 +877,11 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) /* * Finally, perform firmware download. */ - iocp->ioctl->wait_done = 0; + ReplyMsg = NULL; mpt_put_msg_frame(mptctl_id, iocp, mf); /* Now wait for the command to complete */ - ret = wait_event_interruptible_timeout(mptctl_wait, + ret = wait_event_timeout(mptctl_wait, iocp->ioctl->wait_done == 1, HZ*60); @@ -1145,7 +1219,9 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size) /* Fill in the data and return the structure to the calling * program */ - if (ioc->bus_type == FC) + if (ioc->bus_type == SAS) + karg->adapterType = MPT_IOCTL_INTERFACE_SAS; + else if (ioc->bus_type == FC) karg->adapterType = MPT_IOCTL_INTERFACE_FC; else karg->adapterType = MPT_IOCTL_INTERFACE_SCSI; @@ -1170,12 +1246,11 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size) karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn ); karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); } else if (cim_rev == 2) { - /* Get the PCI bus, device, function and segment ID numbers + /* Get the PCI bus, device, function and segment ID numbers for the IOC */ karg->pciInfo.u.bits.busNumber = pdev->bus->number; karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn ); karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); - karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); karg->pciInfo.segmentID = pci_domain_nr(pdev->bus); } @@ -1500,7 +1575,7 @@ mptctl_eventquery (unsigned long arg) return -ENODEV; } - karg.eventEntries = ioc->eventLogSize; + karg.eventEntries = MPTCTL_EVENT_LOG_SIZE; karg.eventTypes = ioc->eventTypes; /* Copy the data from kernel memory to user memory @@ -1550,7 +1625,6 @@ mptctl_eventenable (unsigned long arg) memset(ioc->events, 0, sz); ioc->alloc_total += sz; - ioc->eventLogSize = MPTCTL_EVENT_LOG_SIZE; ioc->eventContext = 0; } @@ -1590,7 +1664,7 @@ mptctl_eventreport (unsigned long arg) maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS); - max = ioc->eventLogSize < maxEvents ? ioc->eventLogSize : maxEvents; + max = MPTCTL_EVENT_LOG_SIZE < maxEvents ? MPTCTL_EVENT_LOG_SIZE : maxEvents; /* If fewer than 1 event is requested, there must have * been some type of error. @@ -1598,6 +1672,9 @@ mptctl_eventreport (unsigned long arg) if ((max < 1) || !ioc->events) return -ENODATA; + /* reset this flag so SIGIO can restart */ + ioc->aen_event_read_flag=0; + /* Copy the data from kernel memory to user memory */ numBytes = max * sizeof(MPT_IOCTL_EVENTS); @@ -1817,6 +1894,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) case MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: case MPI_FUNCTION_FW_DOWNLOAD: case MPI_FUNCTION_FC_PRIMITIVE_SEND: + case MPI_FUNCTION_TOOLBOX: + case MPI_FUNCTION_SAS_IO_UNIT_CONTROL: break; case MPI_FUNCTION_SCSI_IO_REQUEST: @@ -1837,7 +1916,9 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) goto done_free_mem; } - pScsiReq->MsgFlags = mpt_msg_flags(); + pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; + pScsiReq->MsgFlags |= mpt_msg_flags(); + /* verify that app has not requested * more sense data than driver @@ -1888,6 +1969,25 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) } break; + case MPI_FUNCTION_SMP_PASSTHROUGH: + /* Check mf->PassthruFlags to determine if + * transfer is ImmediateMode or not. + * Immediate mode returns data in the ReplyFrame. + * Else, we are sending request and response data + * in two SGLs at the end of the mf. + */ + break; + + case MPI_FUNCTION_SATA_PASSTHROUGH: + if (!ioc->sh) { + printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " + "SCSI driver is not loaded. \n", + __FILE__, __LINE__); + rc = -EFAULT; + goto done_free_mem; + } + break; + case MPI_FUNCTION_RAID_ACTION: /* Just add a SGE */ @@ -1900,7 +2000,9 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) int scsidir = MPI_SCSIIO_CONTROL_READ; int dataSize; - pScsiReq->MsgFlags = mpt_msg_flags(); + pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; + pScsiReq->MsgFlags |= mpt_msg_flags(); + /* verify that app has not requested * more sense data than driver @@ -2130,7 +2232,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) /* Now wait for the command to complete */ timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT; - timeout = wait_event_interruptible_timeout(mptctl_wait, + timeout = wait_event_timeout(mptctl_wait, ioc->ioctl->wait_done == 1, HZ*timeout); @@ -2246,13 +2348,16 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) hp_host_info_t __user *uarg = (void __user *) arg; MPT_ADAPTER *ioc; struct pci_dev *pdev; - char *pbuf; + char *pbuf=NULL; dma_addr_t buf_dma; hp_host_info_t karg; CONFIGPARMS cfg; ConfigPageHeader_t hdr; int iocnum; int rc, cim_rev; + ToolboxIstwiReadWriteRequest_t *IstwiRWRequest; + MPT_FRAME_HDR *mf = NULL; + MPIHeader_t *mpi_hdr; dctlprintk((": mptctl_hp_hostinfo called.\n")); /* Reset long to int. Should affect IA64 and SPARC only @@ -2370,7 +2475,7 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) karg.base_io_addr = pci_resource_start(pdev, 0); - if (ioc->bus_type == FC) + if ((ioc->bus_type == SAS) || (ioc->bus_type == FC)) karg.bus_phys_width = HP_BUS_WIDTH_UNK; else karg.bus_phys_width = HP_BUS_WIDTH_16; @@ -2388,20 +2493,67 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) } } - cfg.pageAddr = 0; - cfg.action = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL; - cfg.dir = MPI_TB_ISTWI_FLAGS_READ; - cfg.timeout = 10; + /* + * Gather ISTWI(Industry Standard Two Wire Interface) Data + */ + if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { + dfailprintk((MYIOC_s_WARN_FMT "%s, no msg frames!!\n", + ioc->name,__FUNCTION__)); + goto out; + } + + IstwiRWRequest = (ToolboxIstwiReadWriteRequest_t *)mf; + mpi_hdr = (MPIHeader_t *) mf; + memset(IstwiRWRequest,0,sizeof(ToolboxIstwiReadWriteRequest_t)); + IstwiRWRequest->Function = MPI_FUNCTION_TOOLBOX; + IstwiRWRequest->Tool = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL; + IstwiRWRequest->MsgContext = mpi_hdr->MsgContext; + IstwiRWRequest->Flags = MPI_TB_ISTWI_FLAGS_READ; + IstwiRWRequest->NumAddressBytes = 0x01; + IstwiRWRequest->DataLength = cpu_to_le16(0x04); + if (pdev->devfn & 1) + IstwiRWRequest->DeviceAddr = 0xB2; + else + IstwiRWRequest->DeviceAddr = 0xB0; + pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma); - if (pbuf) { - cfg.physAddr = buf_dma; - if ((mpt_toolbox(ioc, &cfg)) == 0) { - karg.rsvd = *(u32 *)pbuf; - } - pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma); - pbuf = NULL; + if (!pbuf) + goto out; + mpt_add_sge((char *)&IstwiRWRequest->SGL, + (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma); + + ioc->ioctl->wait_done = 0; + mpt_put_msg_frame(mptctl_id, ioc, mf); + + rc = wait_event_timeout(mptctl_wait, + ioc->ioctl->wait_done == 1, + HZ*MPT_IOCTL_DEFAULT_TIMEOUT /* 10 sec */); + + if(rc <=0 && (ioc->ioctl->wait_done != 1 )) { + /* + * Now we need to reset the board + */ + mpt_free_msg_frame(ioc, mf); + mptctl_timeout_expired(ioc->ioctl); + goto out; } + /* + *ISTWI Data Definition + * pbuf[0] = FW_VERSION = 0x4 + * pbuf[1] = Bay Count = 6 or 4 or 2, depending on + * the config, you should be seeing one out of these three values + * pbuf[2] = Drive Installed Map = bit pattern depend on which + * bays have drives in them + * pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3) + */ + if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID) + karg.rsvd = *(u32 *)pbuf; + + out: + if (pbuf) + pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma); + /* Copy the data from kernel memory to user memory */ if (copy_to_user((char __user *)arg, &karg, sizeof(hp_host_info_t))) { @@ -2459,7 +2611,7 @@ mptctl_hp_targetinfo(unsigned long arg) /* There is nothing to do for FCP parts. */ - if (ioc->bus_type == FC) + if ((ioc->bus_type == SAS) || (ioc->bus_type == FC)) return 0; if ((ioc->spi_data.sdp0length == 0) || (ioc->sh == NULL)) @@ -2569,6 +2721,8 @@ mptctl_hp_targetinfo(unsigned long arg) static struct file_operations mptctl_fops = { .owner = THIS_MODULE, .llseek = no_llseek, + .release = mptctl_release, + .fasync = mptctl_fasync, .unlocked_ioctl = mptctl_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_mpctl_ioctl, @@ -2813,6 +2967,11 @@ static int __init mptctl_init(void) /* FIXME! */ } + if (mpt_event_register(mptctl_id, mptctl_event_process) == 0) { + devtprintk((KERN_INFO MYNAM + ": Registered for IOC event notifications\n")); + } + return 0; out_fail: diff --git a/drivers/message/fusion/mptctl.h b/drivers/message/fusion/mptctl.h index 518996e03481..a2f8a97992e6 100644 --- a/drivers/message/fusion/mptctl.h +++ b/drivers/message/fusion/mptctl.h @@ -169,8 +169,10 @@ struct mpt_ioctl_pci_info2 { * Read only. * Data starts at offset 0xC */ -#define MPT_IOCTL_INTERFACE_FC (0x01) #define MPT_IOCTL_INTERFACE_SCSI (0x00) +#define MPT_IOCTL_INTERFACE_FC (0x01) +#define MPT_IOCTL_INTERFACE_FC_IP (0x02) +#define MPT_IOCTL_INTERFACE_SAS (0x03) #define MPT_IOCTL_VERSION_LENGTH (32) struct mpt_ioctl_iocinfo { diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 05789e505464..4fee6befc93d 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -2489,7 +2489,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR int idx; MPT_ADAPTER *ioc = hd->ioc; - idx = ioc->eventContext % ioc->eventLogSize; + idx = ioc->eventContext % MPTCTL_EVENT_LOG_SIZE; ioc->events[idx].event = MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE; ioc->events[idx].eventContext = ioc->eventContext; diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c index 37ee7f8dc82f..9fef29d978b5 100644 --- a/drivers/mmc/mmci.c +++ b/drivers/mmc/mmci.c @@ -97,6 +97,13 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) if (data->flags & MMC_DATA_READ) { datactrl |= MCI_DPSM_DIRECTION; irqmask = MCI_RXFIFOHALFFULLMASK; + + /* + * If we have less than a FIFOSIZE of bytes to transfer, + * trigger a PIO interrupt as soon as any data is available. + */ + if (host->size < MCI_FIFOSIZE) + irqmask |= MCI_RXDATAAVLBLMASK; } else { /* * We don't actually need to include "FIFO empty" here diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 47c72a63dfe1..e45a8f959719 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2020,8 +2020,8 @@ config SIS190 will be called sis190. This is recommended. config SKGE - tristate "New SysKonnect GigaEthernet support (EXPERIMENTAL)" - depends on PCI && EXPERIMENTAL + tristate "New SysKonnect GigaEthernet support" + depends on PCI select CRC32 ---help--- This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx @@ -2082,7 +2082,6 @@ config SK98LIN - Allied Telesyn AT-2971SX Gigabit Ethernet Adapter - Allied Telesyn AT-2971T Gigabit Ethernet Adapter - Belkin Gigabit Desktop Card 10/100/1000Base-T Adapter, Copper RJ-45 - - DGE-530T Gigabit Ethernet Adapter - EG1032 v2 Instant Gigabit Network Adapter - EG1064 v2 Instant Gigabit Network Adapter - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Abit) diff --git a/drivers/net/appletalk/cops.h b/drivers/net/appletalk/cops.h index c68ba9c2ef46..fd2750b269c8 100644 --- a/drivers/net/appletalk/cops.h +++ b/drivers/net/appletalk/cops.h @@ -51,7 +51,7 @@ struct ltfirmware { unsigned int length; - unsigned char * data; + const unsigned char *data; }; #define DAYNA 1 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index e0f51afec778..bcf9f17daf0d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1581,6 +1581,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) printk(KERN_INFO DRV_NAME ": %s: %s not enslaved\n", bond_dev->name, slave_dev->name); + write_unlock_bh(&bond->lock); return -EINVAL; } diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c index b420182eec4b..ed4bc91638d2 100644 --- a/drivers/net/sis190.c +++ b/drivers/net/sis190.c @@ -1791,6 +1791,8 @@ static int __devinit sis190_init_one(struct pci_dev *pdev, goto out; } + pci_set_drvdata(pdev, dev); + tp = netdev_priv(dev); ioaddr = tp->mmio_addr; @@ -1827,8 +1829,6 @@ static int __devinit sis190_init_one(struct pci_dev *pdev, if (rc < 0) goto err_remove_mii; - pci_set_drvdata(pdev, dev); - net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), " "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", pci_name(pdev), sis_chip_info[ent->driver_data].name, diff --git a/drivers/net/skge.c b/drivers/net/skge.c index bf55a4cfb3d2..67fb19b8fde9 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -1697,6 +1697,7 @@ static void yukon_mac_init(struct skge_hw *hw, int port) skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET); skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR); skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); + if (skge->autoneg == AUTONEG_DISABLE) { reg = GM_GPCR_AU_ALL_DIS; gma_write16(hw, port, GM_GP_CTRL, @@ -1704,16 +1705,23 @@ static void yukon_mac_init(struct skge_hw *hw, int port) switch (skge->speed) { case SPEED_1000: + reg &= ~GM_GPCR_SPEED_100; reg |= GM_GPCR_SPEED_1000; - /* fallthru */ + break; case SPEED_100: + reg &= ~GM_GPCR_SPEED_1000; reg |= GM_GPCR_SPEED_100; + break; + case SPEED_10: + reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100); + break; } if (skge->duplex == DUPLEX_FULL) reg |= GM_GPCR_DUP_FULL; } else reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; + switch (skge->flow_control) { case FLOW_MODE_NONE: skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index cae2edf23004..bfeba5b9cd7a 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -520,10 +520,16 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) switch (sky2->speed) { case SPEED_1000: + reg &= ~GM_GPCR_SPEED_100; reg |= GM_GPCR_SPEED_1000; - /* fallthru */ + break; case SPEED_100: + reg &= ~GM_GPCR_SPEED_1000; reg |= GM_GPCR_SPEED_100; + break; + case SPEED_10: + reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100); + break; } if (sky2->duplex == DUPLEX_FULL) @@ -1446,6 +1452,29 @@ static void sky2_link_up(struct sky2_port *sky2) sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); reg = gma_read16(hw, port, GM_GP_CTRL); + if (sky2->autoneg == AUTONEG_DISABLE) { + reg |= GM_GPCR_AU_ALL_DIS; + + /* Is write/read necessary? Copied from sky2_mac_init */ + gma_write16(hw, port, GM_GP_CTRL, reg); + gma_read16(hw, port, GM_GP_CTRL); + + switch (sky2->speed) { + case SPEED_1000: + reg &= ~GM_GPCR_SPEED_100; + reg |= GM_GPCR_SPEED_1000; + break; + case SPEED_100: + reg &= ~GM_GPCR_SPEED_1000; + reg |= GM_GPCR_SPEED_100; + break; + case SPEED_10: + reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100); + break; + } + } else + reg &= ~GM_GPCR_AU_ALL_DIS; + if (sky2->duplex == DUPLEX_FULL || sky2->autoneg == AUTONEG_ENABLE) reg |= GM_GPCR_DUP_FULL; diff --git a/drivers/net/tokenring/smctr.h b/drivers/net/tokenring/smctr.h index b306c7e4c793..88dfa2e01d6e 100644 --- a/drivers/net/tokenring/smctr.h +++ b/drivers/net/tokenring/smctr.h @@ -1042,7 +1042,7 @@ typedef struct net_local { __u16 functional_address[2]; __u16 bitwise_group_address[2]; - __u8 *ptr_ucode; + const __u8 *ptr_ucode; __u8 cleanup; diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c index 98a76f10a0f7..dfc24016ba81 100644 --- a/drivers/net/wireless/atmel.c +++ b/drivers/net/wireless/atmel.c @@ -1872,7 +1872,7 @@ static int atmel_set_encodeext(struct net_device *dev, struct atmel_private *priv = netdev_priv(dev); struct iw_point *encoding = &wrqu->encoding; struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; - int idx, key_len; + int idx, key_len, alg = ext->alg, set_key = 1; /* Determine and validate the key index */ idx = encoding->flags & IW_ENCODE_INDEX; @@ -1883,39 +1883,42 @@ static int atmel_set_encodeext(struct net_device *dev, } else idx = priv->default_key; - if ((encoding->flags & IW_ENCODE_DISABLED) || - ext->alg == IW_ENCODE_ALG_NONE) { - priv->wep_is_on = 0; - priv->encryption_level = 0; - priv->pairwise_cipher_suite = CIPHER_SUITE_NONE; - } + if (encoding->flags & IW_ENCODE_DISABLED) + alg = IW_ENCODE_ALG_NONE; - if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) + if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { priv->default_key = idx; + set_key = ext->key_len > 0 ? 1 : 0; + } - /* Set the requested key */ - switch (ext->alg) { - case IW_ENCODE_ALG_NONE: - break; - case IW_ENCODE_ALG_WEP: - if (ext->key_len > 5) { - priv->wep_key_len[idx] = 13; - priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128; - priv->encryption_level = 2; - } else if (ext->key_len > 0) { - priv->wep_key_len[idx] = 5; - priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64; - priv->encryption_level = 1; - } else { + if (set_key) { + /* Set the requested key first */ + switch (alg) { + case IW_ENCODE_ALG_NONE: + priv->wep_is_on = 0; + priv->encryption_level = 0; + priv->pairwise_cipher_suite = CIPHER_SUITE_NONE; + break; + case IW_ENCODE_ALG_WEP: + if (ext->key_len > 5) { + priv->wep_key_len[idx] = 13; + priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128; + priv->encryption_level = 2; + } else if (ext->key_len > 0) { + priv->wep_key_len[idx] = 5; + priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64; + priv->encryption_level = 1; + } else { + return -EINVAL; + } + priv->wep_is_on = 1; + memset(priv->wep_keys[idx], 0, 13); + key_len = min ((int)ext->key_len, priv->wep_key_len[idx]); + memcpy(priv->wep_keys[idx], ext->key, key_len); + break; + default: return -EINVAL; } - priv->wep_is_on = 1; - memset(priv->wep_keys[idx], 0, 13); - key_len = min ((int)ext->key_len, priv->wep_key_len[idx]); - memcpy(priv->wep_keys[idx], ext->key, key_len); - break; - default: - return -EINVAL; } return -EINPROGRESS; @@ -3061,17 +3064,26 @@ static void authenticate(struct atmel_private *priv, u16 frame_len) } if (status == C80211_MGMT_SC_Success && priv->wep_is_on) { + int should_associate = 0; /* WEP */ if (trans_seq_no != priv->ExpectedAuthentTransactionSeqNum) return; - if (trans_seq_no == 0x0002 && - auth->el_id == C80211_MGMT_ElementID_ChallengeText) { - send_authentication_request(priv, system, auth->chall_text, auth->chall_text_len); - return; + if (system == C80211_MGMT_AAN_OPENSYSTEM) { + if (trans_seq_no == 0x0002) { + should_associate = 1; + } + } else if (system == C80211_MGMT_AAN_SHAREDKEY) { + if (trans_seq_no == 0x0002 && + auth->el_id == C80211_MGMT_ElementID_ChallengeText) { + send_authentication_request(priv, system, auth->chall_text, auth->chall_text_len); + return; + } else if (trans_seq_no == 0x0004) { + should_associate = 1; + } } - if (trans_seq_no == 0x0004) { + if (should_associate) { if(priv->station_was_associated) { atmel_enter_state(priv, STATION_STATE_REASSOCIATING); send_association_request(priv, 1); @@ -3084,11 +3096,13 @@ static void authenticate(struct atmel_private *priv, u16 frame_len) } } - if (status == C80211_MGMT_SC_AuthAlgNotSupported) { + if (status == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) { /* Do opensystem first, then try sharedkey */ - if (system == C80211_MGMT_AAN_OPENSYSTEM) { + if (system == WLAN_AUTH_OPEN) { priv->CurrentAuthentTransactionSeqNum = 0x001; - send_authentication_request(priv, C80211_MGMT_AAN_SHAREDKEY, NULL, 0); + priv->exclude_unencrypted = 1; + send_authentication_request(priv, WLAN_AUTH_SHARED_KEY, NULL, 0); + return; } else if (priv->connect_to_any_BSS) { int bss_index; @@ -3439,10 +3453,13 @@ static void atmel_management_timer(u_long a) priv->AuthenticationRequestRetryCnt = 0; restart_search(priv); } else { + int auth = C80211_MGMT_AAN_OPENSYSTEM; priv->AuthenticationRequestRetryCnt++; priv->CurrentAuthentTransactionSeqNum = 0x0001; mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); - send_authentication_request(priv, C80211_MGMT_AAN_OPENSYSTEM, NULL, 0); + if (priv->wep_is_on && priv->exclude_unencrypted) + auth = C80211_MGMT_AAN_SHAREDKEY; + send_authentication_request(priv, auth, NULL, 0); } break; @@ -3541,12 +3558,15 @@ static void atmel_command_irq(struct atmel_private *priv) priv->station_was_associated = priv->station_is_associated; atmel_enter_state(priv, STATION_STATE_READY); } else { + int auth = C80211_MGMT_AAN_OPENSYSTEM; priv->AuthenticationRequestRetryCnt = 0; atmel_enter_state(priv, STATION_STATE_AUTHENTICATING); mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); priv->CurrentAuthentTransactionSeqNum = 0x0001; - send_authentication_request(priv, C80211_MGMT_AAN_SHAREDKEY, NULL, 0); + if (priv->wep_is_on && priv->exclude_unencrypted) + auth = C80211_MGMT_AAN_SHAREDKEY; + send_authentication_request(priv, auth, NULL, 0); } return; } diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c index cf373625fc70..98122f3a4bc2 100644 --- a/drivers/net/wireless/wavelan_cs.c +++ b/drivers/net/wireless/wavelan_cs.c @@ -950,16 +950,8 @@ wv_82593_cmd(struct net_device * dev, static inline int wv_diag(struct net_device * dev) { - int ret = FALSE; - - if(wv_82593_cmd(dev, "wv_diag(): diagnose", - OP0_DIAGNOSE, SR0_DIAGNOSE_PASSED)) - ret = TRUE; - -#ifdef DEBUG_CONFIG_ERRORS - printk(KERN_INFO "wavelan_cs: i82593 Self Test failed!\n"); -#endif - return(ret); + return(wv_82593_cmd(dev, "wv_diag(): diagnose", + OP0_DIAGNOSE, SR0_DIAGNOSE_PASSED)); } /* wv_diag */ /*------------------------------------------------------------------*/ @@ -3604,8 +3596,8 @@ wv_82593_config(struct net_device * dev) cfblk.lin_prio = 0; /* conform to 802.3 backoff algoritm */ cfblk.exp_prio = 5; /* conform to 802.3 backoff algoritm */ cfblk.bof_met = 1; /* conform to 802.3 backoff algoritm */ - cfblk.ifrm_spc = 0x20; /* 32 bit times interframe spacing */ - cfblk.slottim_low = 0x20; /* 32 bit times slot time */ + cfblk.ifrm_spc = 0x20 >> 4; /* 32 bit times interframe spacing */ + cfblk.slottim_low = 0x20 >> 5; /* 32 bit times slot time */ cfblk.slottim_hi = 0x0; cfblk.max_retr = 15; cfblk.prmisc = ((lp->promiscuous) ? TRUE: FALSE); /* Promiscuous mode */ diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index f46e8438e0d2..93f8a8fa8890 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -40,6 +40,8 @@ #include <linux/string.h> #include <linux/pci.h> #include <linux/reboot.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> #include <asm/byteorder.h> #include <asm/cache.h> /* for L1_CACHE_BYTES */ @@ -1019,62 +1021,33 @@ static struct hppa_dma_ops ccio_ops = { }; #ifdef CONFIG_PROC_FS -static int proc_append(char *src, int len, char **dst, off_t *offset, int *max) -{ - if (len < *offset) { - *offset -= len; - return 0; - } - if (*offset > 0) { - src += *offset; - len -= *offset; - *offset = 0; - } - if (len > *max) { - len = *max; - } - memcpy(*dst, src, len); - *dst += len; - *max -= len; - return (*max == 0); -} - -static int ccio_proc_info(char *buf, char **start, off_t offset, int count, - int *eof, void *data) +static int ccio_proc_info(struct seq_file *m, void *p) { - int max = count; - char tmp[80]; /* width of an ANSI-standard terminal */ + int len = 0; struct ioc *ioc = ioc_list; while (ioc != NULL) { unsigned int total_pages = ioc->res_size << 3; unsigned long avg = 0, min, max; - int j, len; + int j; - len = sprintf(tmp, "%s\n", ioc->name); - if (proc_append(tmp, len, &buf, &offset, &count)) - break; + len += seq_printf(m, "%s\n", ioc->name); - len = sprintf(tmp, "Cujo 2.0 bug : %s\n", - (ioc->cujo20_bug ? "yes" : "no")); - if (proc_append(tmp, len, &buf, &offset, &count)) - break; + len += seq_printf(m, "Cujo 2.0 bug : %s\n", + (ioc->cujo20_bug ? "yes" : "no")); - len = sprintf(tmp, "IO PDIR size : %d bytes (%d entries)\n", - total_pages * 8, total_pages); - if (proc_append(tmp, len, &buf, &offset, &count)) - break; + len += seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n", + total_pages * 8, total_pages); + #ifdef CCIO_MAP_STATS - len = sprintf(tmp, "IO PDIR entries : %ld free %ld used (%d%%)\n", - total_pages - ioc->used_pages, ioc->used_pages, - (int)(ioc->used_pages * 100 / total_pages)); - if (proc_append(tmp, len, &buf, &offset, &count)) - break; + len += seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n", + total_pages - ioc->used_pages, ioc->used_pages, + (int)(ioc->used_pages * 100 / total_pages)); #endif - len = sprintf(tmp, "Resource bitmap : %d bytes (%d pages)\n", - ioc->res_size, total_pages); - if (proc_append(tmp, len, &buf, &offset, &count)) - break; + + len += seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n", + ioc->res_size, total_pages); + #ifdef CCIO_SEARCH_TIME min = max = ioc->avg_search[0]; for(j = 0; j < CCIO_SEARCH_SAMPLE; ++j) { @@ -1085,70 +1058,83 @@ static int ccio_proc_info(char *buf, char **start, off_t offset, int count, min = ioc->avg_search[j]; } avg /= CCIO_SEARCH_SAMPLE; - len = sprintf(tmp, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n", - min, avg, max); - if (proc_append(tmp, len, &buf, &offset, &count)) - break; + len += seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n", + min, avg, max); #endif #ifdef CCIO_MAP_STATS - len = sprintf(tmp, "pci_map_single(): %8ld calls %8ld pages (avg %d/1000)\n", - ioc->msingle_calls, ioc->msingle_pages, - (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls)); - if (proc_append(tmp, len, &buf, &offset, &count)) - break; - + len += seq_printf(m, "pci_map_single(): %8ld calls %8ld pages (avg %d/1000)\n", + ioc->msingle_calls, ioc->msingle_pages, + (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls)); /* KLUGE - unmap_sg calls unmap_single for each mapped page */ min = ioc->usingle_calls - ioc->usg_calls; max = ioc->usingle_pages - ioc->usg_pages; - len = sprintf(tmp, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n", - min, max, (int)((max * 1000)/min)); - if (proc_append(tmp, len, &buf, &offset, &count)) - break; + len += seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n", + min, max, (int)((max * 1000)/min)); - len = sprintf(tmp, "pci_map_sg() : %8ld calls %8ld pages (avg %d/1000)\n", - ioc->msg_calls, ioc->msg_pages, - (int)((ioc->msg_pages * 1000)/ioc->msg_calls)); - if (proc_append(tmp, len, &buf, &offset, &count)) - break; - len = sprintf(tmp, "pci_unmap_sg() : %8ld calls %8ld pages (avg %d/1000)\n\n\n", - ioc->usg_calls, ioc->usg_pages, - (int)((ioc->usg_pages * 1000)/ioc->usg_calls)); - if (proc_append(tmp, len, &buf, &offset, &count)) - break; + len += seq_printf(m, "pci_map_sg() : %8ld calls %8ld pages (avg %d/1000)\n", + ioc->msg_calls, ioc->msg_pages, + (int)((ioc->msg_pages * 1000)/ioc->msg_calls)); + + len += seq_printf(m, "pci_unmap_sg() : %8ld calls %8ld pages (avg %d/1000)\n\n\n", + ioc->usg_calls, ioc->usg_pages, + (int)((ioc->usg_pages * 1000)/ioc->usg_calls)); #endif /* CCIO_MAP_STATS */ + ioc = ioc->next; } - if (count == 0) { - *eof = 1; - } - return (max - count); + return 0; +} + +static int ccio_proc_info_open(struct inode *inode, struct file *file) +{ + return single_open(file, &ccio_proc_info, NULL); } -static int ccio_resource_map(char *buf, char **start, off_t offset, int len, - int *eof, void *data) +static struct file_operations ccio_proc_info_fops = { + .owner = THIS_MODULE, + .open = ccio_proc_info_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int ccio_proc_bitmap_info(struct seq_file *m, void *p) { + int len = 0; struct ioc *ioc = ioc_list; - buf[0] = '\0'; while (ioc != NULL) { u32 *res_ptr = (u32 *)ioc->res_map; int j; for (j = 0; j < (ioc->res_size / sizeof(u32)); j++) { if ((j & 7) == 0) - strcat(buf,"\n "); - sprintf(buf, "%s %08x", buf, *res_ptr); + len += seq_puts(m, "\n "); + len += seq_printf(m, "%08x", *res_ptr); res_ptr++; } - strcat(buf, "\n\n"); + len += seq_puts(m, "\n\n"); ioc = ioc->next; break; /* XXX - remove me */ } - return strlen(buf); + return 0; } + +static int ccio_proc_bitmap_open(struct inode *inode, struct file *file) +{ + return single_open(file, &ccio_proc_bitmap_info, NULL); +} + +static struct file_operations ccio_proc_bitmap_fops = { + .owner = THIS_MODULE, + .open = ccio_proc_bitmap_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; #endif /** @@ -1556,6 +1542,7 @@ static int ccio_probe(struct parisc_device *dev) { int i; struct ioc *ioc, **ioc_p = &ioc_list; + struct proc_dir_entry *info_entry, *bitmap_entry; ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL); if (ioc == NULL) { @@ -1583,13 +1570,14 @@ static int ccio_probe(struct parisc_device *dev) BUG_ON(dev->dev.platform_data == NULL); HBA_DATA(dev->dev.platform_data)->iommu = ioc; - if (ioc_count == 0) { - /* FIXME: Create separate entries for each ioc */ - create_proc_read_entry(MODULE_NAME, S_IRWXU, proc_runway_root, - ccio_proc_info, NULL); - create_proc_read_entry(MODULE_NAME"-bitmap", S_IRWXU, - proc_runway_root, ccio_resource_map, NULL); + info_entry = create_proc_entry(MODULE_NAME, 0, proc_runway_root); + if (info_entry) + info_entry->proc_fops = &ccio_proc_info_fops; + + bitmap_entry = create_proc_entry(MODULE_NAME"-bitmap", 0, proc_runway_root); + if (bitmap_entry) + bitmap_entry->proc_fops = &ccio_proc_bitmap_fops; } ioc_count++; diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 52f265e97729..5d47c5965c51 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -37,6 +37,8 @@ #include <asm/hardware.h> /* for register_parisc_driver() stuff */ #include <linux/proc_fs.h> +#include <linux/seq_file.h> + #include <asm/runway.h> /* for proc_runway_root */ #include <asm/pdc.h> /* for PDC_MODEL_* */ #include <asm/pdcpat.h> /* for is_pdc_pat() */ @@ -1892,46 +1894,43 @@ sba_common_init(struct sba_device *sba_dev) } #ifdef CONFIG_PROC_FS -static int sba_proc_info(char *buf, char **start, off_t offset, int len) +static int sba_proc_info(struct seq_file *m, void *p) { struct sba_device *sba_dev = sba_list; struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */ int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */ - unsigned long i; #ifdef SBA_COLLECT_STATS unsigned long avg = 0, min, max; #endif + int i, len = 0; - sprintf(buf, "%s rev %d.%d\n", + len += seq_printf(m, "%s rev %d.%d\n", sba_dev->name, (sba_dev->hw_rev & 0x7) + 1, (sba_dev->hw_rev & 0x18) >> 3 ); - sprintf(buf, "%sIO PDIR size : %d bytes (%d entries)\n", - buf, + len += seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n", (int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */ total_pages); - sprintf(buf, "%sResource bitmap : %d bytes (%d pages)\n", - buf, ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */ + len += seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n", + ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */ - sprintf(buf, "%sLMMIO_BASE/MASK/ROUTE %08x %08x %08x\n", - buf, + len += seq_printf(m, "LMMIO_BASE/MASK/ROUTE %08x %08x %08x\n", READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_BASE), READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_MASK), READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_ROUTE) ); for (i=0; i<4; i++) - sprintf(buf, "%sDIR%ld_BASE/MASK/ROUTE %08x %08x %08x\n", - buf, i, + len += seq_printf(m, "DIR%d_BASE/MASK/ROUTE %08x %08x %08x\n", i, READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_BASE + i*0x18), READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_MASK + i*0x18), READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_ROUTE + i*0x18) ); #ifdef SBA_COLLECT_STATS - sprintf(buf, "%sIO PDIR entries : %ld free %ld used (%d%%)\n", buf, + len += seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n", total_pages - ioc->used_pages, ioc->used_pages, (int) (ioc->used_pages * 100 / total_pages)); @@ -1942,53 +1941,76 @@ static int sba_proc_info(char *buf, char **start, off_t offset, int len) if (ioc->avg_search[i] < min) min = ioc->avg_search[i]; } avg /= SBA_SEARCH_SAMPLE; - sprintf(buf, "%s Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n", - buf, min, avg, max); + len += seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n", + min, avg, max); - sprintf(buf, "%spci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n", - buf, ioc->msingle_calls, ioc->msingle_pages, + len += seq_printf(m, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n", + ioc->msingle_calls, ioc->msingle_pages, (int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls)); /* KLUGE - unmap_sg calls unmap_single for each mapped page */ min = ioc->usingle_calls; max = ioc->usingle_pages - ioc->usg_pages; - sprintf(buf, "%spci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n", - buf, min, max, - (int) ((max * 1000)/min)); + len += seq_printf(m, "pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n", + min, max, (int) ((max * 1000)/min)); - sprintf(buf, "%spci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n", - buf, ioc->msg_calls, ioc->msg_pages, + len += seq_printf(m, "pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n", + ioc->msg_calls, ioc->msg_pages, (int) ((ioc->msg_pages * 1000)/ioc->msg_calls)); - sprintf(buf, "%spci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n", - buf, ioc->usg_calls, ioc->usg_pages, + len += seq_printf(m, "pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n", + ioc->usg_calls, ioc->usg_pages, (int) ((ioc->usg_pages * 1000)/ioc->usg_calls)); #endif - return strlen(buf); + return 0; } -#if 0 -/* XXX too much output - exceeds 4k limit and needs to be re-written */ static int -sba_resource_map(char *buf, char **start, off_t offset, int len) +sba_proc_open(struct inode *i, struct file *f) +{ + return single_open(f, &sba_proc_info, NULL); +} + +static struct file_operations sba_proc_fops = { + .owner = THIS_MODULE, + .open = sba_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int +sba_proc_bitmap_info(struct seq_file *m, void *p) { struct sba_device *sba_dev = sba_list; - struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Mutli-IOC suppoer! */ + struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */ unsigned int *res_ptr = (unsigned int *)ioc->res_map; - int i; + int i, len = 0; - buf[0] = '\0'; - for(i = 0; i < (ioc->res_size / sizeof(unsigned int)); ++i, ++res_ptr) { + for (i = 0; i < (ioc->res_size/sizeof(unsigned int)); ++i, ++res_ptr) { if ((i & 7) == 0) - strcat(buf,"\n "); - sprintf(buf, "%s %08x", buf, *res_ptr); + len += seq_printf(m, "\n "); + len += seq_printf(m, " %08x", *res_ptr); } - strcat(buf, "\n"); + len += seq_printf(m, "\n"); - return strlen(buf); + return 0; } -#endif /* 0 */ + +static int +sba_proc_bitmap_open(struct inode *i, struct file *f) +{ + return single_open(f, &sba_proc_bitmap_info, NULL); +} + +static struct file_operations sba_proc_bitmap_fops = { + .owner = THIS_MODULE, + .open = sba_proc_bitmap_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; #endif /* CONFIG_PROC_FS */ static struct parisc_device_id sba_tbl[] = { @@ -2021,6 +2043,7 @@ sba_driver_callback(struct parisc_device *dev) int i; char *version; void __iomem *sba_addr = ioremap(dev->hpa.start, SBA_FUNC_SIZE); + struct proc_dir_entry *info_entry, *bitmap_entry, *root; sba_dump_ranges(sba_addr); @@ -2088,19 +2111,27 @@ sba_driver_callback(struct parisc_device *dev) hppa_dma_ops = &sba_ops; #ifdef CONFIG_PROC_FS - if (IS_ASTRO(&dev->id)) { - create_proc_info_entry("Astro", 0, proc_runway_root, sba_proc_info); - } else if (IS_IKE(&dev->id)) { - create_proc_info_entry("Ike", 0, proc_runway_root, sba_proc_info); - } else if (IS_PLUTO(&dev->id)) { - create_proc_info_entry("Pluto", 0, proc_mckinley_root, sba_proc_info); - } else { - create_proc_info_entry("Reo", 0, proc_runway_root, sba_proc_info); + switch (dev->id.hversion) { + case PLUTO_MCKINLEY_PORT: + root = proc_mckinley_root; + break; + case ASTRO_RUNWAY_PORT: + case IKE_MERCED_PORT: + default: + root = proc_runway_root; + break; } -#if 0 - create_proc_info_entry("bitmap", 0, proc_runway_root, sba_resource_map); -#endif + + info_entry = create_proc_entry("sba_iommu", 0, root); + bitmap_entry = create_proc_entry("sba_iommu-bitmap", 0, root); + + if (info_entry) + info_entry->proc_fops = &sba_proc_fops; + + if (bitmap_entry) + bitmap_entry->proc_fops = &sba_proc_bitmap_fops; #endif + parisc_vmerge_boundary = IOVP_SIZE; parisc_vmerge_max_size = IOVP_SIZE * BITS_PER_LONG; parisc_has_iommu(); diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 062fb100d94c..afc4e88551ad 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -359,7 +359,7 @@ ccw_device_set_online(struct ccw_device *cdev) else pr_debug("ccw_device_offline returned %d, device %s\n", ret, cdev->dev.bus_id); - return (ret = 0) ? -ENODEV : ret; + return (ret == 0) ? -ENODEV : ret; } static ssize_t diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index d2a5b04d7cba..85b1020a1fcc 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c @@ -405,7 +405,7 @@ __ccw_device_disband_start(struct ccw_device *cdev) cdev->private->iretry = 5; cdev->private->imask >>= 1; } - ccw_device_verify_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV); + ccw_device_disband_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV); } /* diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index dad4dd9887c9..6c762b43f921 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c @@ -317,7 +317,6 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb) /* * We have ending status but no sense information. Do a basic sense. */ - sch = to_subchannel(cdev->dev.parent); sch->sense_ccw.cmd_code = CCW_CMD_BASIC_SENSE; sch->sense_ccw.cda = (__u32) __pa(cdev->private->irb.ecw); sch->sense_ccw.count = SENSE_MAX_COUNT; diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 6229ba4995ad..9cf88d7201d3 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -98,9 +98,9 @@ lcs_register_debug_facility(void) return -ENOMEM; } debug_register_view(lcs_dbf_setup, &debug_hex_ascii_view); - debug_set_level(lcs_dbf_setup, 4); + debug_set_level(lcs_dbf_setup, 2); debug_register_view(lcs_dbf_trace, &debug_hex_ascii_view); - debug_set_level(lcs_dbf_trace, 4); + debug_set_level(lcs_dbf_trace, 2); return 0; } @@ -1292,9 +1292,8 @@ lcs_set_multicast_list(struct net_device *dev) LCS_DBF_TEXT(4, trace, "setmulti"); card = (struct lcs_card *) dev->priv; - if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD)) { + if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD)) schedule_work(&card->kernel_thread_starter); - } } #endif /* CONFIG_IP_MULTICAST */ @@ -1459,6 +1458,8 @@ lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer) lcs_release_buffer(channel, buffer); card = (struct lcs_card *) ((char *) channel - offsetof(struct lcs_card, write)); + if (netif_queue_stopped(card->dev)) + netif_wake_queue(card->dev); spin_lock(&card->lock); card->tx_emitted--; if (card->tx_emitted <= 0 && card->tx_buffer != NULL) @@ -1478,6 +1479,7 @@ __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb, struct net_device *dev) { struct lcs_header *header; + int rc = 0; LCS_DBF_TEXT(5, trace, "hardxmit"); if (skb == NULL) { @@ -1492,10 +1494,8 @@ __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb, card->stats.tx_carrier_errors++; return 0; } - if (netif_queue_stopped(dev) ) { - card->stats.tx_dropped++; - return -EBUSY; - } + netif_stop_queue(card->dev); + spin_lock(&card->lock); if (card->tx_buffer != NULL && card->tx_buffer->count + sizeof(struct lcs_header) + skb->len + sizeof(u16) > LCS_IOBUFFERSIZE) @@ -1506,7 +1506,8 @@ __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb, card->tx_buffer = lcs_get_buffer(&card->write); if (card->tx_buffer == NULL) { card->stats.tx_dropped++; - return -EBUSY; + rc = -EBUSY; + goto out; } card->tx_buffer->callback = lcs_txbuffer_cb; card->tx_buffer->count = 0; @@ -1518,13 +1519,18 @@ __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb, header->type = card->lan_type; header->slot = card->portno; memcpy(header + 1, skb->data, skb->len); + spin_unlock(&card->lock); card->stats.tx_bytes += skb->len; card->stats.tx_packets++; dev_kfree_skb(skb); - if (card->tx_emitted <= 0) + netif_wake_queue(card->dev); + spin_lock(&card->lock); + if (card->tx_emitted <= 0 && card->tx_buffer != NULL) /* If this is the first tx buffer emit it immediately. */ __lcs_emit_txbuffer(card); - return 0; +out: + spin_unlock(&card->lock); + return rc; } static int @@ -1535,9 +1541,7 @@ lcs_start_xmit(struct sk_buff *skb, struct net_device *dev) LCS_DBF_TEXT(5, trace, "pktxmit"); card = (struct lcs_card *) dev->priv; - spin_lock(&card->lock); rc = __lcs_start_xmit(card, skb, dev); - spin_unlock(&card->lock); return rc; } @@ -2319,7 +2323,6 @@ __init lcs_init_module(void) PRINT_ERR("Initialization failed\n"); return rc; } - return 0; } diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h index 08e60ad43916..2fad5e40c2e4 100644 --- a/drivers/s390/net/lcs.h +++ b/drivers/s390/net/lcs.h @@ -95,7 +95,7 @@ do { \ */ #define LCS_ILLEGAL_OFFSET 0xffff #define LCS_IOBUFFERSIZE 0x5000 -#define LCS_NUM_BUFFS 8 /* needs to be power of 2 */ +#define LCS_NUM_BUFFS 32 /* needs to be power of 2 */ #define LCS_MAC_LENGTH 6 #define LCS_INVALID_PORT_NO -1 #define LCS_LANCMD_TIMEOUT_DEFAULT 5 diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h index 9a064d4727ad..4df0fcd7b10b 100644 --- a/drivers/s390/net/qeth.h +++ b/drivers/s390/net/qeth.h @@ -1076,16 +1076,6 @@ qeth_get_qdio_q_format(struct qeth_card *card) } static inline int -qeth_isdigit(char * buf) -{ - while (*buf) { - if (!isdigit(*buf++)) - return 0; - } - return 1; -} - -static inline int qeth_isxdigit(char * buf) { while (*buf) { @@ -1104,33 +1094,17 @@ qeth_ipaddr4_to_string(const __u8 *addr, char *buf) static inline int qeth_string_to_ipaddr4(const char *buf, __u8 *addr) { - const char *start, *end; - char abuf[4]; - char *tmp; - int len; - int i; - - start = buf; - for (i = 0; i < 4; i++) { - if (i == 3) { - end = strchr(start,0xa); - if (end) - len = end - start; - else - len = strlen(start); - } - else { - end = strchr(start, '.'); - len = end - start; - } - if ((len <= 0) || (len > 3)) - return -EINVAL; - memset(abuf, 0, 4); - strncpy(abuf, start, len); - if (!qeth_isdigit(abuf)) + int count = 0, rc = 0; + int in[4]; + + rc = sscanf(buf, "%d.%d.%d.%d%n", + &in[0], &in[1], &in[2], &in[3], &count); + if (rc != 4 || count) + return -EINVAL; + for (count = 0; count < 4; count++) { + if (in[count] > 255) return -EINVAL; - addr[i] = simple_strtoul(abuf, &tmp, 10); - start = end + 1; + addr[count] = in[count]; } return 0; } @@ -1149,36 +1123,44 @@ qeth_ipaddr6_to_string(const __u8 *addr, char *buf) static inline int qeth_string_to_ipaddr6(const char *buf, __u8 *addr) { - const char *start, *end; - u16 *tmp_addr; - char abuf[5]; - char *tmp; - int len; - int i; - - tmp_addr = (u16 *)addr; - start = buf; - for (i = 0; i < 8; i++) { - if (i == 7) { - end = strchr(start,0xa); - if (end) - len = end - start; - else - len = strlen(start); - } - else { - end = strchr(start, ':'); - len = end - start; + char *end, *start; + __u16 *in; + char num[5]; + int num2, cnt, out, found, save_cnt; + unsigned short in_tmp[8] = {0, }; + + cnt = out = found = save_cnt = num2 = 0; + end = start = (char *) buf; + in = (__u16 *) addr; + memset(in, 0, 16); + while (end) { + end = strchr(end,':'); + if (end == NULL) { + end = (char *)buf + (strlen(buf)); + out = 1; + } + if ((end - start)) { + memset(num, 0, 5); + memcpy(num, start, end - start); + if (!qeth_isxdigit(num)) + return -EINVAL; + sscanf(start, "%x", &num2); + if (found) + in_tmp[save_cnt++] = num2; + else + in[cnt++] = num2; + if (out) + break; + } else { + if (found) + return -EINVAL; + found = 1; } - if ((len <= 0) || (len > 4)) - return -EINVAL; - memset(abuf, 0, 5); - strncpy(abuf, start, len); - if (!qeth_isxdigit(abuf)) - return -EINVAL; - tmp_addr[i] = simple_strtoul(abuf, &tmp, 16); - start = end + 1; - } + start = ++end; + } + cnt = 7; + while (save_cnt) + in[cnt--] = in_tmp[--save_cnt]; return 0; } diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c index b02313127780..82cb4af2f0e7 100644 --- a/drivers/s390/net/qeth_eddp.c +++ b/drivers/s390/net/qeth_eddp.c @@ -59,8 +59,7 @@ qeth_eddp_free_context(struct qeth_eddp_context *ctx) for (i = 0; i < ctx->num_pages; ++i) free_page((unsigned long)ctx->pages[i]); kfree(ctx->pages); - if (ctx->elements != NULL) - kfree(ctx->elements); + kfree(ctx->elements); kfree(ctx); } @@ -413,6 +412,13 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, QETH_DBF_TEXT(trace, 5, "eddpftcp"); eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl; + if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { + eddp->skb_offset += sizeof(struct ethhdr); +#ifdef CONFIG_QETH_VLAN + if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) + eddp->skb_offset += VLAN_HLEN; +#endif /* CONFIG_QETH_VLAN */ + } tcph = eddp->skb->h.th; while (eddp->skb_offset < eddp->skb->len) { data_len = min((int)skb_shinfo(eddp->skb)->tso_size, @@ -483,6 +489,7 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, return -ENOMEM; } if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { + skb->mac.raw = (skb->data) + sizeof(struct qeth_hdr); memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN); #ifdef CONFIG_QETH_VLAN if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) { diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 410abeada6c4..dba7f7f02e79 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c @@ -516,7 +516,8 @@ __qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode) QETH_DBF_TEXT(setup, 3, "setoffl"); QETH_DBF_HEX(setup, 3, &card, sizeof(void *)); - netif_carrier_off(card->dev); + if (card->dev && netif_carrier_ok(card->dev)) + netif_carrier_off(card->dev); recover_flag = card->state; if (qeth_stop_card(card, recovery_mode) == -ERESTARTSYS){ PRINT_WARN("Stopping card %s interrupted by user!\n", @@ -1679,6 +1680,7 @@ qeth_cmd_timeout(unsigned long data) spin_unlock_irqrestore(&reply->card->lock, flags); } + static struct qeth_ipa_cmd * qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) { @@ -1699,7 +1701,8 @@ qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) QETH_CARD_IFNAME(card), card->info.chpid); card->lan_online = 0; - netif_carrier_off(card->dev); + if (card->dev && netif_carrier_ok(card->dev)) + netif_carrier_off(card->dev); return NULL; case IPA_CMD_STARTLAN: PRINT_INFO("Link reestablished on %s " @@ -5562,7 +5565,7 @@ qeth_set_multicast_list(struct net_device *dev) if (card->info.type == QETH_CARD_TYPE_OSN) return ; - QETH_DBF_TEXT(trace,3,"setmulti"); + QETH_DBF_TEXT(trace, 3, "setmulti"); qeth_delete_mc_addresses(card); if (card->options.layer2) { qeth_layer2_add_multicast(card); @@ -5579,7 +5582,6 @@ out: return; if (qeth_set_thread_start_bit(card, QETH_SET_PROMISC_MODE_THREAD)==0) schedule_work(&card->kernel_thread_starter); - } static int @@ -7452,6 +7454,7 @@ qeth_softsetup_card(struct qeth_card *card) card->lan_online = 1; if (card->info.type==QETH_CARD_TYPE_OSN) goto out; + qeth_set_large_send(card, card->options.large_send); if (card->options.layer2) { card->dev->features |= NETIF_F_HW_VLAN_FILTER | @@ -7468,12 +7471,6 @@ qeth_softsetup_card(struct qeth_card *card) #endif goto out; } - if ((card->options.large_send == QETH_LARGE_SEND_EDDP) || - (card->options.large_send == QETH_LARGE_SEND_TSO)) - card->dev->features |= NETIF_F_TSO | NETIF_F_SG; - else - card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG); - if ((rc = qeth_setadapter_parms(card))) QETH_DBF_TEXT_(setup, 2, "2err%d", rc); if ((rc = qeth_start_ipassists(card))) diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 4d7d47cf2394..a5f2ba9a8fdb 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -710,10 +710,9 @@ static inline void _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, struct zfcp_adapter *adapter, struct scsi_cmnd *scsi_cmnd, - struct zfcp_fsf_req *new_fsf_req) + struct zfcp_fsf_req *fsf_req, + struct zfcp_fsf_req *old_fsf_req) { - struct zfcp_fsf_req *fsf_req = - (struct zfcp_fsf_req *)scsi_cmnd->host_scribble; struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf; struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; unsigned long flags; @@ -727,19 +726,20 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, if (offset == 0) { strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE); - if (scsi_cmnd->device) { - rec->scsi_id = scsi_cmnd->device->id; - rec->scsi_lun = scsi_cmnd->device->lun; + if (scsi_cmnd != NULL) { + if (scsi_cmnd->device) { + rec->scsi_id = scsi_cmnd->device->id; + rec->scsi_lun = scsi_cmnd->device->lun; + } + rec->scsi_result = scsi_cmnd->result; + rec->scsi_cmnd = (unsigned long)scsi_cmnd; + rec->scsi_serial = scsi_cmnd->serial_number; + memcpy(rec->scsi_opcode, &scsi_cmnd->cmnd, + min((int)scsi_cmnd->cmd_len, + ZFCP_DBF_SCSI_OPCODE)); + rec->scsi_retries = scsi_cmnd->retries; + rec->scsi_allowed = scsi_cmnd->allowed; } - rec->scsi_result = scsi_cmnd->result; - rec->scsi_cmnd = (unsigned long)scsi_cmnd; - rec->scsi_serial = scsi_cmnd->serial_number; - memcpy(rec->scsi_opcode, - &scsi_cmnd->cmnd, - min((int)scsi_cmnd->cmd_len, - ZFCP_DBF_SCSI_OPCODE)); - rec->scsi_retries = scsi_cmnd->retries; - rec->scsi_allowed = scsi_cmnd->allowed; if (fsf_req != NULL) { fcp_rsp = (struct fcp_rsp_iu *) &(fsf_req->qtcb->bottom.io.fcp_rsp); @@ -772,15 +772,8 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, rec->fsf_seqno = fsf_req->seq_no; rec->fsf_issued = fsf_req->issued; } - if (new_fsf_req != NULL) { - rec->type.new_fsf_req.fsf_reqid = - (unsigned long) - new_fsf_req; - rec->type.new_fsf_req.fsf_seqno = - new_fsf_req->seq_no; - rec->type.new_fsf_req.fsf_issued = - new_fsf_req->issued; - } + rec->type.old_fsf_reqid = + (unsigned long) old_fsf_req; } else { strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE); dump->total_size = buflen; @@ -801,19 +794,21 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, inline void zfcp_scsi_dbf_event_result(const char *tag, int level, struct zfcp_adapter *adapter, - struct scsi_cmnd *scsi_cmnd) + struct scsi_cmnd *scsi_cmnd, + struct zfcp_fsf_req *fsf_req) { - _zfcp_scsi_dbf_event_common("rslt", - tag, level, adapter, scsi_cmnd, NULL); + _zfcp_scsi_dbf_event_common("rslt", tag, level, + adapter, scsi_cmnd, fsf_req, NULL); } inline void zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, struct scsi_cmnd *scsi_cmnd, - struct zfcp_fsf_req *new_fsf_req) + struct zfcp_fsf_req *new_fsf_req, + struct zfcp_fsf_req *old_fsf_req) { - _zfcp_scsi_dbf_event_common("abrt", - tag, 1, adapter, scsi_cmnd, new_fsf_req); + _zfcp_scsi_dbf_event_common("abrt", tag, 1, + adapter, scsi_cmnd, new_fsf_req, old_fsf_req); } inline void @@ -823,7 +818,7 @@ zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, struct zfcp_adapter *adapter = unit->port->adapter; _zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst", - tag, 1, adapter, scsi_cmnd, NULL); + tag, 1, adapter, scsi_cmnd, NULL, NULL); } static int @@ -856,6 +851,10 @@ zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view, rec->scsi_retries); len += zfcp_dbf_view(out_buf + len, "scsi_allowed", "0x%02x", rec->scsi_allowed); + if (strncmp(rec->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) { + len += zfcp_dbf_view(out_buf + len, "old_fsf_reqid", "0x%0Lx", + rec->type.old_fsf_reqid); + } len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx", rec->fsf_reqid); len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x", @@ -883,21 +882,6 @@ zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view, min((int)rec->type.fcp.sns_info_len, ZFCP_DBF_SCSI_FCP_SNS_INFO), 0, rec->type.fcp.sns_info_len); - } else if (strncmp(rec->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) { - len += zfcp_dbf_view(out_buf + len, "fsf_reqid_abort", "0x%0Lx", - rec->type.new_fsf_req.fsf_reqid); - len += zfcp_dbf_view(out_buf + len, "fsf_seqno_abort", "0x%08x", - rec->type.new_fsf_req.fsf_seqno); - len += zfcp_dbf_stck(out_buf + len, "fsf_issued", - rec->type.new_fsf_req.fsf_issued); - } else if ((strncmp(rec->tag, "trst", ZFCP_DBF_TAG_SIZE) == 0) || - (strncmp(rec->tag, "lrst", ZFCP_DBF_TAG_SIZE) == 0)) { - len += zfcp_dbf_view(out_buf + len, "fsf_reqid_reset", "0x%0Lx", - rec->type.new_fsf_req.fsf_reqid); - len += zfcp_dbf_view(out_buf + len, "fsf_seqno_reset", "0x%08x", - rec->type.new_fsf_req.fsf_seqno); - len += zfcp_dbf_stck(out_buf + len, "fsf_issued", - rec->type.new_fsf_req.fsf_issued); } len += sprintf(out_buf + len, "\n"); diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index e260d19fa717..7f551d66f47f 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -152,11 +152,6 @@ typedef u32 scsi_lun_t; #define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 100 #define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7 -/* Retry 5 times every 2 second, then every minute */ -#define ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES 5 -#define ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP 200 -#define ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP 6000 - /* timeout value for "default timer" for fsf requests */ #define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ); @@ -429,11 +424,7 @@ struct zfcp_scsi_dbf_record { u32 fsf_seqno; u64 fsf_issued; union { - struct { - u64 fsf_reqid; - u32 fsf_seqno; - u64 fsf_issued; - } new_fsf_req; + u64 old_fsf_reqid; struct { u8 rsp_validity; u8 rsp_scsi_status; @@ -915,8 +906,6 @@ struct zfcp_adapter { wwn_t peer_wwnn; /* P2P peer WWNN */ wwn_t peer_wwpn; /* P2P peer WWPN */ u32 peer_d_id; /* P2P peer D_ID */ - wwn_t physical_wwpn; /* WWPN of physical port */ - u32 physical_s_id; /* local FC port ID */ struct ccw_device *ccw_device; /* S/390 ccw device */ u8 fc_service_class; u32 hydra_version; /* Hydra version */ diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index da947e662031..e3c4bdd29a60 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -2246,15 +2246,6 @@ zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action) { int retval; - if ((atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, - &erp_action->adapter->status)) && - (erp_action->adapter->adapter_features & - FSF_FEATURE_HBAAPI_MANAGEMENT)) { - zfcp_erp_adapter_strategy_open_fsf_xport(erp_action); - atomic_set(&erp_action->adapter->erp_counter, 0); - return ZFCP_ERP_FAILED; - } - retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action); if (retval == ZFCP_ERP_FAILED) return ZFCP_ERP_FAILED; @@ -2266,13 +2257,6 @@ zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action) return zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action); } -/* - * function: - * - * purpose: - * - * returns: - */ static int zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action) { @@ -2350,48 +2334,40 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action) { int ret; - int retries; - int sleep; - struct zfcp_adapter *adapter = erp_action->adapter; + struct zfcp_adapter *adapter; + adapter = erp_action->adapter; atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); - retries = 0; - do { - write_lock(&adapter->erp_lock); - zfcp_erp_action_to_running(erp_action); - write_unlock(&adapter->erp_lock); - zfcp_erp_timeout_init(erp_action); - ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL); - if (ret == -EOPNOTSUPP) { - debug_text_event(adapter->erp_dbf, 3, "a_xport_notsupp"); - return ZFCP_ERP_SUCCEEDED; - } else if (ret) { - debug_text_event(adapter->erp_dbf, 3, "a_xport_failed"); - return ZFCP_ERP_FAILED; - } - debug_text_event(adapter->erp_dbf, 6, "a_xport_ok"); + write_lock(&adapter->erp_lock); + zfcp_erp_action_to_running(erp_action); + write_unlock(&adapter->erp_lock); - down(&adapter->erp_ready_sem); - if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { - ZFCP_LOG_INFO("error: exchange of port data " - "for adapter %s timed out\n", - zfcp_get_busid_by_adapter(adapter)); - break; - } - if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, - &adapter->status)) - break; + zfcp_erp_timeout_init(erp_action); + ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL); + if (ret == -EOPNOTSUPP) { + debug_text_event(adapter->erp_dbf, 3, "a_xport_notsupp"); + return ZFCP_ERP_SUCCEEDED; + } else if (ret) { + debug_text_event(adapter->erp_dbf, 3, "a_xport_failed"); + return ZFCP_ERP_FAILED; + } + debug_text_event(adapter->erp_dbf, 6, "a_xport_ok"); - if (retries < ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES) { - sleep = ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP; - retries++; - } else - sleep = ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP; - schedule_timeout(sleep); - } while (1); + ret = ZFCP_ERP_SUCCEEDED; + down(&adapter->erp_ready_sem); + if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { + ZFCP_LOG_INFO("error: exchange port data timed out (adapter " + "%s)\n", zfcp_get_busid_by_adapter(adapter)); + ret = ZFCP_ERP_FAILED; + } + if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) { + ZFCP_LOG_INFO("error: exchange port data failed (adapter " + "%s\n", zfcp_get_busid_by_adapter(adapter)); + ret = ZFCP_ERP_FAILED; + } - return ZFCP_ERP_SUCCEEDED; + return ret; } /* @@ -3439,6 +3415,8 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter, "(adapter %s, wwpn=0x%016Lx)\n", zfcp_get_busid_by_port(port), port->wwpn); + else + scsi_flush_work(adapter->scsi_host); } zfcp_port_put(port); break; diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index c1ba7cf1b496..700f5402a978 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -194,9 +194,10 @@ extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *); extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *); extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *, - struct scsi_cmnd *); + struct scsi_cmnd *, + struct zfcp_fsf_req *); extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *, - struct scsi_cmnd *, + struct scsi_cmnd *, struct zfcp_fsf_req *, struct zfcp_fsf_req *); extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, struct scsi_cmnd *); diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 9f0cb3d820c0..662ec571d73b 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -388,6 +388,7 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req) case FSF_PROT_LINK_DOWN: zfcp_fsf_link_down_info_eval(adapter, &prot_status_qual->link_down_info); + zfcp_erp_adapter_reopen(adapter, 0); fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; @@ -558,10 +559,8 @@ zfcp_fsf_link_down_info_eval(struct zfcp_adapter *adapter, atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); - if (link_down == NULL) { - zfcp_erp_adapter_reopen(adapter, 0); - return; - } + if (link_down == NULL) + goto out; switch (link_down->error_code) { case FSF_PSQ_LINK_NO_LIGHT: @@ -643,16 +642,8 @@ zfcp_fsf_link_down_info_eval(struct zfcp_adapter *adapter, link_down->explanation_code, link_down->vendor_specific_code); - switch (link_down->error_code) { - case FSF_PSQ_LINK_NO_LIGHT: - case FSF_PSQ_LINK_WRAP_PLUG: - case FSF_PSQ_LINK_NO_FCP: - case FSF_PSQ_LINK_FIRMWARE_UPDATE: - zfcp_erp_adapter_reopen(adapter, 0); - break; - default: - zfcp_erp_adapter_failed(adapter); - } + out: + zfcp_erp_adapter_failed(adapter); } /* @@ -2304,6 +2295,35 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action, return retval; } +/** + * zfcp_fsf_exchange_port_evaluate + * @fsf_req: fsf_req which belongs to xchg port data request + * @xchg_ok: specifies if xchg port data was incomplete or complete (0/1) + */ +static void +zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok) +{ + struct zfcp_adapter *adapter; + struct fsf_qtcb *qtcb; + struct fsf_qtcb_bottom_port *bottom, *data; + struct Scsi_Host *shost; + + adapter = fsf_req->adapter; + qtcb = fsf_req->qtcb; + bottom = &qtcb->bottom.port; + shost = adapter->scsi_host; + + data = (struct fsf_qtcb_bottom_port*) fsf_req->data; + if (data) + memcpy(data, bottom, sizeof(struct fsf_qtcb_bottom_port)); + + if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) + fc_host_permanent_port_name(shost) = bottom->wwpn; + else + fc_host_permanent_port_name(shost) = fc_host_port_name(shost); + fc_host_maxframe_size(shost) = bottom->maximum_frame_size; + fc_host_supported_speeds(shost) = bottom->supported_speed; +} /** * zfcp_fsf_exchange_port_data_handler - handler for exchange_port_data request @@ -2312,38 +2332,26 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action, static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req) { - struct zfcp_adapter *adapter = fsf_req->adapter; - struct Scsi_Host *shost = adapter->scsi_host; - struct fsf_qtcb *qtcb = fsf_req->qtcb; - struct fsf_qtcb_bottom_port *bottom, *data; + struct zfcp_adapter *adapter; + struct fsf_qtcb *qtcb; + + adapter = fsf_req->adapter; + qtcb = fsf_req->qtcb; if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) return; switch (qtcb->header.fsf_status) { case FSF_GOOD: + zfcp_fsf_exchange_port_evaluate(fsf_req, 1); atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); - - bottom = &qtcb->bottom.port; - data = (struct fsf_qtcb_bottom_port*) fsf_req->data; - if (data) - memcpy(data, bottom, sizeof(struct fsf_qtcb_bottom_port)); - if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) - fc_host_permanent_port_name(shost) = bottom->wwpn; - else - fc_host_permanent_port_name(shost) = - fc_host_port_name(shost); - fc_host_maxframe_size(shost) = bottom->maximum_frame_size; - fc_host_supported_speeds(shost) = bottom->supported_speed; break; - case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: + zfcp_fsf_exchange_port_evaluate(fsf_req, 0); atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); - zfcp_fsf_link_down_info_eval(adapter, &qtcb->header.fsf_status_qual.link_down_info); break; - default: debug_text_event(adapter->erp_dbf, 0, "xchg-port-ng"); debug_event(adapter->erp_dbf, 0, @@ -4203,11 +4211,11 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req) ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result); if (scpnt->result != 0) - zfcp_scsi_dbf_event_result("erro", 3, fsf_req->adapter, scpnt); + zfcp_scsi_dbf_event_result("erro", 3, fsf_req->adapter, scpnt, fsf_req); else if (scpnt->retries > 0) - zfcp_scsi_dbf_event_result("retr", 4, fsf_req->adapter, scpnt); + zfcp_scsi_dbf_event_result("retr", 4, fsf_req->adapter, scpnt, fsf_req); else - zfcp_scsi_dbf_event_result("norm", 6, fsf_req->adapter, scpnt); + zfcp_scsi_dbf_event_result("norm", 6, fsf_req->adapter, scpnt, fsf_req); /* cleanup pointer (need this especially for abort) */ scpnt->host_scribble = NULL; diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index e0803757c0fa..9f6b4d7a46f3 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -242,7 +242,7 @@ zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) if ((scpnt->device != NULL) && (scpnt->device->host != NULL)) zfcp_scsi_dbf_event_result("fail", 4, (struct zfcp_adapter*) scpnt->device->host->hostdata[0], - scpnt); + scpnt, NULL); /* return directly */ scpnt->scsi_done(scpnt); } @@ -446,7 +446,7 @@ zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) old_fsf_req = (struct zfcp_fsf_req *) scpnt->host_scribble; if (!old_fsf_req) { write_unlock_irqrestore(&adapter->abort_lock, flags); - zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, new_fsf_req); + zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, NULL); retval = SUCCESS; goto out; } @@ -460,6 +460,8 @@ zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) adapter, unit, 0); if (!new_fsf_req) { ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n"); + zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL, + old_fsf_req); retval = FAILED; goto out; } @@ -470,13 +472,16 @@ zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) /* status should be valid since signals were not permitted */ if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) { - zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, new_fsf_req); + zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, new_fsf_req, + NULL); retval = SUCCESS; } else if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) { - zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, new_fsf_req); + zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, new_fsf_req, + NULL); retval = SUCCESS; } else { - zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, new_fsf_req); + zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, new_fsf_req, + NULL); retval = FAILED; } zfcp_fsf_req_free(new_fsf_req); diff --git a/drivers/s390/scsi/zfcp_sysfs_adapter.c b/drivers/s390/scsi/zfcp_sysfs_adapter.c index dfc07370f412..b29ac25e07f3 100644 --- a/drivers/s390/scsi/zfcp_sysfs_adapter.c +++ b/drivers/s390/scsi/zfcp_sysfs_adapter.c @@ -55,8 +55,6 @@ ZFCP_DEFINE_ADAPTER_ATTR(status, "0x%08x\n", atomic_read(&adapter->status)); ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn); ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn); ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id); -ZFCP_DEFINE_ADAPTER_ATTR(physical_wwpn, "0x%016llx\n", adapter->physical_wwpn); -ZFCP_DEFINE_ADAPTER_ATTR(physical_s_id, "0x%06x\n", adapter->physical_s_id); ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version); ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version); ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n", @@ -241,8 +239,6 @@ static struct attribute *zfcp_adapter_attrs[] = { &dev_attr_peer_wwnn.attr, &dev_attr_peer_wwpn.attr, &dev_attr_peer_d_id.attr, - &dev_attr_physical_wwpn.attr, - &dev_attr_physical_s_id.attr, &dev_attr_card_version.attr, &dev_attr_lic_version.attr, &dev_attr_status.attr, diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 31c497542272..d9152d02088c 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -61,6 +61,7 @@ Add support for embedded firmware error strings. 2.26.02.003 - Correctly handle single sgl's with use_sg=1. 2.26.02.004 - Add support for 9550SX controllers. + 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher. */ #include <linux/module.h> @@ -84,7 +85,7 @@ #include "3w-9xxx.h" /* Globals */ -#define TW_DRIVER_VERSION "2.26.02.004" +#define TW_DRIVER_VERSION "2.26.02.005" static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; static unsigned int twa_device_extension_count; static int twa_major = -1; @@ -1408,7 +1409,7 @@ static dma_addr_t twa_map_scsi_single_data(TW_Device_Extension *tw_dev, int requ dma_addr_t mapping; struct scsi_cmnd *cmd = tw_dev->srb[request_id]; struct pci_dev *pdev = tw_dev->tw_pci_dev; - int retval = 0; + dma_addr_t retval = 0; if (cmd->request_bufflen == 0) { retval = 0; @@ -1798,7 +1799,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, int i, sg_count; struct scsi_cmnd *srb = NULL; struct scatterlist *sglist = NULL; - u32 buffaddr = 0x0; + dma_addr_t buffaddr = 0x0; int retval = 1; if (tw_dev->srb[request_id]) { diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 7139659dd952..a16f8ded8f1d 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -173,10 +173,10 @@ int aac_get_config_status(struct aac_dev *dev) int status = 0; struct fib * fibptr; - if (!(fibptr = fib_alloc(dev))) + if (!(fibptr = aac_fib_alloc(dev))) return -ENOMEM; - fib_init(fibptr); + aac_fib_init(fibptr); { struct aac_get_config_status *dinfo; dinfo = (struct aac_get_config_status *) fib_data(fibptr); @@ -186,7 +186,7 @@ int aac_get_config_status(struct aac_dev *dev) dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data)); } - status = fib_send(ContainerCommand, + status = aac_fib_send(ContainerCommand, fibptr, sizeof (struct aac_get_config_status), FsaNormal, @@ -209,30 +209,30 @@ int aac_get_config_status(struct aac_dev *dev) status = -EINVAL; } } - fib_complete(fibptr); + aac_fib_complete(fibptr); /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ if (status >= 0) { if (commit == 1) { struct aac_commit_config * dinfo; - fib_init(fibptr); + aac_fib_init(fibptr); dinfo = (struct aac_commit_config *) fib_data(fibptr); dinfo->command = cpu_to_le32(VM_ContainerConfig); dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG); - status = fib_send(ContainerCommand, + status = aac_fib_send(ContainerCommand, fibptr, sizeof (struct aac_commit_config), FsaNormal, 1, 1, NULL, NULL); - fib_complete(fibptr); + aac_fib_complete(fibptr); } else if (commit == 0) { printk(KERN_WARNING "aac_get_config_status: Foreign device configurations are being ignored\n"); } } - fib_free(fibptr); + aac_fib_free(fibptr); return status; } @@ -255,15 +255,15 @@ int aac_get_containers(struct aac_dev *dev) instance = dev->scsi_host_ptr->unique_id; - if (!(fibptr = fib_alloc(dev))) + if (!(fibptr = aac_fib_alloc(dev))) return -ENOMEM; - fib_init(fibptr); + aac_fib_init(fibptr); dinfo = (struct aac_get_container_count *) fib_data(fibptr); dinfo->command = cpu_to_le32(VM_ContainerConfig); dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT); - status = fib_send(ContainerCommand, + status = aac_fib_send(ContainerCommand, fibptr, sizeof (struct aac_get_container_count), FsaNormal, @@ -272,7 +272,7 @@ int aac_get_containers(struct aac_dev *dev) if (status >= 0) { dresp = (struct aac_get_container_count_resp *)fib_data(fibptr); maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); - fib_complete(fibptr); + aac_fib_complete(fibptr); } if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) @@ -280,7 +280,7 @@ int aac_get_containers(struct aac_dev *dev) fsa_dev_ptr = (struct fsa_dev_info *) kmalloc( sizeof(*fsa_dev_ptr) * maximum_num_containers, GFP_KERNEL); if (!fsa_dev_ptr) { - fib_free(fibptr); + aac_fib_free(fibptr); return -ENOMEM; } memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers); @@ -294,14 +294,14 @@ int aac_get_containers(struct aac_dev *dev) fsa_dev_ptr[index].devname[0] = '\0'; - fib_init(fibptr); + aac_fib_init(fibptr); dinfo = (struct aac_query_mount *) fib_data(fibptr); dinfo->command = cpu_to_le32(VM_NameServe); dinfo->count = cpu_to_le32(index); dinfo->type = cpu_to_le32(FT_FILESYS); - status = fib_send(ContainerCommand, + status = aac_fib_send(ContainerCommand, fibptr, sizeof (struct aac_query_mount), FsaNormal, @@ -319,7 +319,7 @@ int aac_get_containers(struct aac_dev *dev) dinfo->count = cpu_to_le32(index); dinfo->type = cpu_to_le32(FT_FILESYS); - if (fib_send(ContainerCommand, + if (aac_fib_send(ContainerCommand, fibptr, sizeof(struct aac_query_mount), FsaNormal, @@ -347,7 +347,7 @@ int aac_get_containers(struct aac_dev *dev) if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) fsa_dev_ptr[index].ro = 1; } - fib_complete(fibptr); + aac_fib_complete(fibptr); /* * If there are no more containers, then stop asking. */ @@ -355,7 +355,7 @@ int aac_get_containers(struct aac_dev *dev) break; } } - fib_free(fibptr); + aac_fib_free(fibptr); return status; } @@ -413,8 +413,8 @@ static void get_container_name_callback(void *context, struct fib * fibptr) scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; - fib_complete(fibptr); - fib_free(fibptr); + aac_fib_complete(fibptr); + aac_fib_free(fibptr); scsicmd->scsi_done(scsicmd); } @@ -430,10 +430,10 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid) dev = (struct aac_dev *)scsicmd->device->host->hostdata; - if (!(cmd_fibcontext = fib_alloc(dev))) + if (!(cmd_fibcontext = aac_fib_alloc(dev))) return -ENOMEM; - fib_init(cmd_fibcontext); + aac_fib_init(cmd_fibcontext); dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext); dinfo->command = cpu_to_le32(VM_ContainerConfig); @@ -441,7 +441,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid) dinfo->cid = cpu_to_le32(cid); dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data)); - status = fib_send(ContainerCommand, + status = aac_fib_send(ContainerCommand, cmd_fibcontext, sizeof (struct aac_get_name), FsaNormal, @@ -455,14 +455,14 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid) if (status == -EINPROGRESS) return 0; - printk(KERN_WARNING "aac_get_container_name: fib_send failed with status: %d.\n", status); - fib_complete(cmd_fibcontext); - fib_free(cmd_fibcontext); + printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status); + aac_fib_complete(cmd_fibcontext); + aac_fib_free(cmd_fibcontext); return -1; } /** - * probe_container - query a logical volume + * aac_probe_container - query a logical volume * @dev: device to query * @cid: container identifier * @@ -470,7 +470,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid) * is updated in the struct fsa_dev_info structure rather than returned. */ -int probe_container(struct aac_dev *dev, int cid) +int aac_probe_container(struct aac_dev *dev, int cid) { struct fsa_dev_info *fsa_dev_ptr; int status; @@ -482,10 +482,10 @@ int probe_container(struct aac_dev *dev, int cid) fsa_dev_ptr = dev->fsa_dev; instance = dev->scsi_host_ptr->unique_id; - if (!(fibptr = fib_alloc(dev))) + if (!(fibptr = aac_fib_alloc(dev))) return -ENOMEM; - fib_init(fibptr); + aac_fib_init(fibptr); dinfo = (struct aac_query_mount *)fib_data(fibptr); @@ -493,14 +493,14 @@ int probe_container(struct aac_dev *dev, int cid) dinfo->count = cpu_to_le32(cid); dinfo->type = cpu_to_le32(FT_FILESYS); - status = fib_send(ContainerCommand, + status = aac_fib_send(ContainerCommand, fibptr, sizeof(struct aac_query_mount), FsaNormal, 1, 1, NULL, NULL); if (status < 0) { - printk(KERN_WARNING "aacraid: probe_container query failed.\n"); + printk(KERN_WARNING "aacraid: aac_probe_container query failed.\n"); goto error; } @@ -512,7 +512,7 @@ int probe_container(struct aac_dev *dev, int cid) dinfo->count = cpu_to_le32(cid); dinfo->type = cpu_to_le32(FT_FILESYS); - if (fib_send(ContainerCommand, + if (aac_fib_send(ContainerCommand, fibptr, sizeof(struct aac_query_mount), FsaNormal, @@ -535,8 +535,8 @@ int probe_container(struct aac_dev *dev, int cid) } error: - fib_complete(fibptr); - fib_free(fibptr); + aac_fib_complete(fibptr); + aac_fib_free(fibptr); return status; } @@ -700,14 +700,14 @@ int aac_get_adapter_info(struct aac_dev* dev) struct aac_bus_info *command; struct aac_bus_info_response *bus_info; - if (!(fibptr = fib_alloc(dev))) + if (!(fibptr = aac_fib_alloc(dev))) return -ENOMEM; - fib_init(fibptr); + aac_fib_init(fibptr); info = (struct aac_adapter_info *) fib_data(fibptr); memset(info,0,sizeof(*info)); - rcode = fib_send(RequestAdapterInfo, + rcode = aac_fib_send(RequestAdapterInfo, fibptr, sizeof(*info), FsaNormal, @@ -716,8 +716,8 @@ int aac_get_adapter_info(struct aac_dev* dev) NULL); if (rcode < 0) { - fib_complete(fibptr); - fib_free(fibptr); + aac_fib_complete(fibptr); + aac_fib_free(fibptr); return rcode; } memcpy(&dev->adapter_info, info, sizeof(*info)); @@ -725,13 +725,13 @@ int aac_get_adapter_info(struct aac_dev* dev) if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) { struct aac_supplement_adapter_info * info; - fib_init(fibptr); + aac_fib_init(fibptr); info = (struct aac_supplement_adapter_info *) fib_data(fibptr); memset(info,0,sizeof(*info)); - rcode = fib_send(RequestSupplementAdapterInfo, + rcode = aac_fib_send(RequestSupplementAdapterInfo, fibptr, sizeof(*info), FsaNormal, @@ -748,7 +748,7 @@ int aac_get_adapter_info(struct aac_dev* dev) * GetBusInfo */ - fib_init(fibptr); + aac_fib_init(fibptr); bus_info = (struct aac_bus_info_response *) fib_data(fibptr); @@ -761,7 +761,7 @@ int aac_get_adapter_info(struct aac_dev* dev) command->MethodId = cpu_to_le32(1); command->CtlCmd = cpu_to_le32(GetBusInfo); - rcode = fib_send(ContainerCommand, + rcode = aac_fib_send(ContainerCommand, fibptr, sizeof (*bus_info), FsaNormal, @@ -891,8 +891,8 @@ int aac_get_adapter_info(struct aac_dev* dev) } } - fib_complete(fibptr); - fib_free(fibptr); + aac_fib_complete(fibptr); + aac_fib_free(fibptr); return rcode; } @@ -976,8 +976,8 @@ static void io_callback(void *context, struct fib * fibptr) ? sizeof(scsicmd->sense_buffer) : sizeof(dev->fsa_dev[cid].sense_data)); } - fib_complete(fibptr); - fib_free(fibptr); + aac_fib_complete(fibptr); + aac_fib_free(fibptr); scsicmd->scsi_done(scsicmd); } @@ -1062,11 +1062,11 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid) /* * Alocate and initialize a Fib */ - if (!(cmd_fibcontext = fib_alloc(dev))) { + if (!(cmd_fibcontext = aac_fib_alloc(dev))) { return -1; } - fib_init(cmd_fibcontext); + aac_fib_init(cmd_fibcontext); if (dev->raw_io_interface) { struct aac_raw_io *readcmd; @@ -1086,7 +1086,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid) /* * Now send the Fib to the adapter */ - status = fib_send(ContainerRawIo, + status = aac_fib_send(ContainerRawIo, cmd_fibcontext, fibsize, FsaNormal, @@ -1112,7 +1112,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid) /* * Now send the Fib to the adapter */ - status = fib_send(ContainerCommand64, + status = aac_fib_send(ContainerCommand64, cmd_fibcontext, fibsize, FsaNormal, @@ -1136,7 +1136,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid) /* * Now send the Fib to the adapter */ - status = fib_send(ContainerCommand, + status = aac_fib_send(ContainerCommand, cmd_fibcontext, fibsize, FsaNormal, @@ -1153,14 +1153,14 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid) if (status == -EINPROGRESS) return 0; - printk(KERN_WARNING "aac_read: fib_send failed with status: %d.\n", status); + printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status); /* * For some reason, the Fib didn't queue, return QUEUE_FULL */ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL; scsicmd->scsi_done(scsicmd); - fib_complete(cmd_fibcontext); - fib_free(cmd_fibcontext); + aac_fib_complete(cmd_fibcontext); + aac_fib_free(cmd_fibcontext); return 0; } @@ -1228,12 +1228,12 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid) /* * Allocate and initialize a Fib then setup a BlockWrite command */ - if (!(cmd_fibcontext = fib_alloc(dev))) { + if (!(cmd_fibcontext = aac_fib_alloc(dev))) { scsicmd->result = DID_ERROR << 16; scsicmd->scsi_done(scsicmd); return 0; } - fib_init(cmd_fibcontext); + aac_fib_init(cmd_fibcontext); if (dev->raw_io_interface) { struct aac_raw_io *writecmd; @@ -1253,7 +1253,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid) /* * Now send the Fib to the adapter */ - status = fib_send(ContainerRawIo, + status = aac_fib_send(ContainerRawIo, cmd_fibcontext, fibsize, FsaNormal, @@ -1279,7 +1279,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid) /* * Now send the Fib to the adapter */ - status = fib_send(ContainerCommand64, + status = aac_fib_send(ContainerCommand64, cmd_fibcontext, fibsize, FsaNormal, @@ -1305,7 +1305,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid) /* * Now send the Fib to the adapter */ - status = fib_send(ContainerCommand, + status = aac_fib_send(ContainerCommand, cmd_fibcontext, fibsize, FsaNormal, @@ -1322,15 +1322,15 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid) return 0; } - printk(KERN_WARNING "aac_write: fib_send failed with status: %d\n", status); + printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status); /* * For some reason, the Fib didn't queue, return QUEUE_FULL */ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL; scsicmd->scsi_done(scsicmd); - fib_complete(cmd_fibcontext); - fib_free(cmd_fibcontext); + aac_fib_complete(cmd_fibcontext); + aac_fib_free(cmd_fibcontext); return 0; } @@ -1369,8 +1369,8 @@ static void synchronize_callback(void *context, struct fib *fibptr) sizeof(cmd->sense_buffer))); } - fib_complete(fibptr); - fib_free(fibptr); + aac_fib_complete(fibptr); + aac_fib_free(fibptr); cmd->scsi_done(cmd); } @@ -1407,10 +1407,10 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid) * Allocate and initialize a Fib */ if (!(cmd_fibcontext = - fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) + aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) return SCSI_MLQUEUE_HOST_BUSY; - fib_init(cmd_fibcontext); + aac_fib_init(cmd_fibcontext); synchronizecmd = fib_data(cmd_fibcontext); synchronizecmd->command = cpu_to_le32(VM_ContainerConfig); @@ -1422,7 +1422,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid) /* * Now send the Fib to the adapter */ - status = fib_send(ContainerCommand, + status = aac_fib_send(ContainerCommand, cmd_fibcontext, sizeof(struct aac_synchronize), FsaNormal, @@ -1437,9 +1437,9 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid) return 0; printk(KERN_WARNING - "aac_synchronize: fib_send failed with status: %d.\n", status); - fib_complete(cmd_fibcontext); - fib_free(cmd_fibcontext); + "aac_synchronize: aac_fib_send failed with status: %d.\n", status); + aac_fib_complete(cmd_fibcontext); + aac_fib_free(cmd_fibcontext); return SCSI_MLQUEUE_HOST_BUSY; } @@ -1465,7 +1465,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) * itself. */ if (scmd_id(scsicmd) != host->this_id) { - if ((scsicmd->device->channel == 0) ){ + if ((scsicmd->device->channel == CONTAINER_CHANNEL)) { if( (scsicmd->device->id >= dev->maximum_num_containers) || (scsicmd->device->lun != 0)){ scsicmd->result = DID_NO_CONNECT << 16; scsicmd->scsi_done(scsicmd); @@ -1488,7 +1488,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) case READ_CAPACITY: case TEST_UNIT_READY: spin_unlock_irq(host->host_lock); - probe_container(dev, cid); + aac_probe_container(dev, cid); if ((fsa_dev_ptr[cid].valid & 1) == 0) fsa_dev_ptr[cid].valid = 0; spin_lock_irq(host->host_lock); @@ -1935,33 +1935,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr) case SRB_STATUS_ERROR_RECOVERY: case SRB_STATUS_PENDING: case SRB_STATUS_SUCCESS: - if(scsicmd->cmnd[0] == INQUIRY ){ - u8 b; - u8 b1; - /* We can't expose disk devices because we can't tell whether they - * are the raw container drives or stand alone drives. If they have - * the removable bit set then we should expose them though. - */ - b = (*(u8*)scsicmd->buffer)&0x1f; - b1 = ((u8*)scsicmd->buffer)[1]; - if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER - || (b==TYPE_DISK && (b1&0x80)) ){ - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; - /* - * We will allow disk devices if in RAID/SCSI mode and - * the channel is 2 - */ - } else if ((dev->raid_scsi_mode) && - (scmd_channel(scsicmd) == 2)) { - scsicmd->result = DID_OK << 16 | - COMMAND_COMPLETE << 8; - } else { - scsicmd->result = DID_NO_CONNECT << 16 | - COMMAND_COMPLETE << 8; - } - } else { - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; - } + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; break; case SRB_STATUS_DATA_OVERRUN: switch(scsicmd->cmnd[0]){ @@ -1981,28 +1955,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr) scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; break; case INQUIRY: { - u8 b; - u8 b1; - /* We can't expose disk devices because we can't tell whether they - * are the raw container drives or stand alone drives - */ - b = (*(u8*)scsicmd->buffer)&0x0f; - b1 = ((u8*)scsicmd->buffer)[1]; - if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER - || (b==TYPE_DISK && (b1&0x80)) ){ - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; - /* - * We will allow disk devices if in RAID/SCSI mode and - * the channel is 2 - */ - } else if ((dev->raid_scsi_mode) && - (scmd_channel(scsicmd) == 2)) { - scsicmd->result = DID_OK << 16 | - COMMAND_COMPLETE << 8; - } else { - scsicmd->result = DID_NO_CONNECT << 16 | - COMMAND_COMPLETE << 8; - } + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; break; } default: @@ -2089,8 +2042,8 @@ static void aac_srb_callback(void *context, struct fib * fibptr) */ scsicmd->result |= le32_to_cpu(srbreply->scsi_status); - fib_complete(fibptr); - fib_free(fibptr); + aac_fib_complete(fibptr); + aac_fib_free(fibptr); scsicmd->scsi_done(scsicmd); } @@ -2142,10 +2095,10 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd) /* * Allocate and initialize a Fib then setup a BlockWrite command */ - if (!(cmd_fibcontext = fib_alloc(dev))) { + if (!(cmd_fibcontext = aac_fib_alloc(dev))) { return -1; } - fib_init(cmd_fibcontext); + aac_fib_init(cmd_fibcontext); srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext); srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); @@ -2179,7 +2132,7 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd) /* * Now send the Fib to the adapter */ - status = fib_send(ScsiPortCommand64, cmd_fibcontext, + status = aac_fib_send(ScsiPortCommand64, cmd_fibcontext, fibsize, FsaNormal, 0, 1, (fib_callback) aac_srb_callback, (void *) scsicmd); @@ -2201,7 +2154,7 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd) /* * Now send the Fib to the adapter */ - status = fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1, + status = aac_fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1, (fib_callback) aac_srb_callback, (void *) scsicmd); } /* @@ -2211,9 +2164,9 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd) return 0; } - printk(KERN_WARNING "aac_srb: fib_send failed with status: %d\n", status); - fib_complete(cmd_fibcontext); - fib_free(cmd_fibcontext); + printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status); + aac_fib_complete(cmd_fibcontext); + aac_fib_free(cmd_fibcontext); return -1; } diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 66dbb6d2c506..2d430b7e8cf4 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -1774,16 +1774,16 @@ static inline u32 cap_to_cyls(sector_t capacity, u32 divisor) struct scsi_cmnd; const char *aac_driverinfo(struct Scsi_Host *); -struct fib *fib_alloc(struct aac_dev *dev); -int fib_setup(struct aac_dev *dev); -void fib_map_free(struct aac_dev *dev); -void fib_free(struct fib * context); -void fib_init(struct fib * context); +struct fib *aac_fib_alloc(struct aac_dev *dev); +int aac_fib_setup(struct aac_dev *dev); +void aac_fib_map_free(struct aac_dev *dev); +void aac_fib_free(struct fib * context); +void aac_fib_init(struct fib * context); void aac_printf(struct aac_dev *dev, u32 val); -int fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt); +int aac_fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt); int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry); void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum); -int fib_complete(struct fib * context); +int aac_fib_complete(struct fib * context); #define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data) struct aac_dev *aac_init_adapter(struct aac_dev *dev); int aac_get_config_status(struct aac_dev *dev); @@ -1799,11 +1799,11 @@ unsigned int aac_command_normal(struct aac_queue * q); unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index); int aac_command_thread(struct aac_dev * dev); int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx); -int fib_adapter_complete(struct fib * fibptr, unsigned short size); +int aac_fib_adapter_complete(struct fib * fibptr, unsigned short size); struct aac_driver_ident* aac_get_driver_ident(int devtype); int aac_get_adapter_info(struct aac_dev* dev); int aac_send_shutdown(struct aac_dev *dev); -int probe_container(struct aac_dev *dev, int cid); +int aac_probe_container(struct aac_dev *dev, int cid); extern int numacb; extern int acbsize; extern char aac_driver_version[]; diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 4fe79cd7c957..47fefca72695 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -63,7 +63,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) unsigned size; int retval; - fibptr = fib_alloc(dev); + fibptr = aac_fib_alloc(dev); if(fibptr == NULL) { return -ENOMEM; } @@ -73,7 +73,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) * First copy in the header so that we can check the size field. */ if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) { - fib_free(fibptr); + aac_fib_free(fibptr); return -EFAULT; } /* @@ -110,13 +110,13 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) */ kfib->header.XferState = 0; } else { - retval = fib_send(le16_to_cpu(kfib->header.Command), fibptr, + retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr, le16_to_cpu(kfib->header.Size) , FsaNormal, 1, 1, NULL, NULL); if (retval) { goto cleanup; } - if (fib_complete(fibptr) != 0) { + if (aac_fib_complete(fibptr) != 0) { retval = -EINVAL; goto cleanup; } @@ -138,7 +138,7 @@ cleanup: fibptr->hw_fib_pa = hw_fib_pa; fibptr->hw_fib = hw_fib; } - fib_free(fibptr); + aac_fib_free(fibptr); return retval; } @@ -464,10 +464,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) /* * Allocate and initialize a Fib then setup a BlockWrite command */ - if (!(srbfib = fib_alloc(dev))) { + if (!(srbfib = aac_fib_alloc(dev))) { return -ENOMEM; } - fib_init(srbfib); + aac_fib_init(srbfib); srbcmd = (struct aac_srb*) fib_data(srbfib); @@ -601,7 +601,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) srbcmd->count = cpu_to_le32(byte_count); psg->count = cpu_to_le32(sg_indx+1); - status = fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL); + status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL); } else { struct user_sgmap* upsg = &user_srbcmd->sg; struct sgmap* psg = &srbcmd->sg; @@ -649,7 +649,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) } srbcmd->count = cpu_to_le32(byte_count); psg->count = cpu_to_le32(sg_indx+1); - status = fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); + status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); } if (status != 0){ @@ -684,8 +684,8 @@ cleanup: for(i=0; i <= sg_indx; i++){ kfree(sg_list[i]); } - fib_complete(srbfib); - fib_free(srbfib); + aac_fib_complete(srbfib); + aac_fib_free(srbfib); return rcode; } diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 82821d331c07..1628d094943d 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -185,17 +185,17 @@ int aac_send_shutdown(struct aac_dev * dev) struct aac_close *cmd; int status; - fibctx = fib_alloc(dev); + fibctx = aac_fib_alloc(dev); if (!fibctx) return -ENOMEM; - fib_init(fibctx); + aac_fib_init(fibctx); cmd = (struct aac_close *) fib_data(fibctx); cmd->command = cpu_to_le32(VM_CloseAll); cmd->cid = cpu_to_le32(0xffffffff); - status = fib_send(ContainerCommand, + status = aac_fib_send(ContainerCommand, fibctx, sizeof(struct aac_close), FsaNormal, @@ -203,8 +203,8 @@ int aac_send_shutdown(struct aac_dev * dev) NULL, NULL); if (status == 0) - fib_complete(fibctx); - fib_free(fibctx); + aac_fib_complete(fibctx); + aac_fib_free(fibctx); return status; } @@ -427,7 +427,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) /* * Initialize the list of fibs */ - if(fib_setup(dev)<0){ + if (aac_fib_setup(dev) < 0) { kfree(dev->queues); return NULL; } diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 014cc8d54a9f..609fd19b1844 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -67,27 +67,27 @@ static int fib_map_alloc(struct aac_dev *dev) } /** - * fib_map_free - free the fib objects + * aac_fib_map_free - free the fib objects * @dev: Adapter to free * * Free the PCI mappings and the memory allocated for FIB blocks * on this adapter. */ -void fib_map_free(struct aac_dev *dev) +void aac_fib_map_free(struct aac_dev *dev) { pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa); } /** - * fib_setup - setup the fibs + * aac_fib_setup - setup the fibs * @dev: Adapter to set up * * Allocate the PCI space for the fibs, map it and then intialise the * fib area, the unmapped fib data and also the free list */ -int fib_setup(struct aac_dev * dev) +int aac_fib_setup(struct aac_dev * dev) { struct fib *fibptr; struct hw_fib *hw_fib_va; @@ -134,14 +134,14 @@ int fib_setup(struct aac_dev * dev) } /** - * fib_alloc - allocate a fib + * aac_fib_alloc - allocate a fib * @dev: Adapter to allocate the fib for * * Allocate a fib from the adapter fib pool. If the pool is empty we * return NULL. */ -struct fib * fib_alloc(struct aac_dev *dev) +struct fib *aac_fib_alloc(struct aac_dev *dev) { struct fib * fibptr; unsigned long flags; @@ -170,14 +170,14 @@ struct fib * fib_alloc(struct aac_dev *dev) } /** - * fib_free - free a fib + * aac_fib_free - free a fib * @fibptr: fib to free up * * Frees up a fib and places it on the appropriate queue * (either free or timed out) */ -void fib_free(struct fib * fibptr) +void aac_fib_free(struct fib *fibptr) { unsigned long flags; @@ -188,7 +188,7 @@ void fib_free(struct fib * fibptr) fibptr->dev->timeout_fib = fibptr; } else { if (fibptr->hw_fib->header.XferState != 0) { - printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", + printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", (void*)fibptr, le32_to_cpu(fibptr->hw_fib->header.XferState)); } @@ -199,13 +199,13 @@ void fib_free(struct fib * fibptr) } /** - * fib_init - initialise a fib + * aac_fib_init - initialise a fib * @fibptr: The fib to initialize * * Set up the generic fib fields ready for use */ -void fib_init(struct fib *fibptr) +void aac_fib_init(struct fib *fibptr) { struct hw_fib *hw_fib = fibptr->hw_fib; @@ -362,7 +362,7 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f */ /** - * fib_send - send a fib to the adapter + * aac_fib_send - send a fib to the adapter * @command: Command to send * @fibptr: The fib * @size: Size of fib data area @@ -378,7 +378,9 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f * response FIB is received from the adapter. */ -int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data) +int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, + int priority, int wait, int reply, fib_callback callback, + void *callback_data) { struct aac_dev * dev = fibptr->dev; struct hw_fib * hw_fib = fibptr->hw_fib; @@ -493,7 +495,7 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority q->numpending++; *(q->headers.producer) = cpu_to_le32(index + 1); spin_unlock_irqrestore(q->lock, qflags); - dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index)); + dprintk((KERN_DEBUG "aac_fib_send: inserting a queue entry at index %d.\n",index)); if (!(nointr & aac_config.irq_mod)) aac_adapter_notify(dev, AdapNormCmdQueue); } @@ -520,7 +522,7 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority list_del(&fibptr->queue); spin_unlock_irqrestore(q->lock, qflags); if (wait == -1) { - printk(KERN_ERR "aacraid: fib_send: first asynchronous command timed out.\n" + printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n" "Usually a result of a PCI interrupt routing problem;\n" "update mother board BIOS or consider utilizing one of\n" "the SAFE mode kernel options (acpi, apic etc)\n"); @@ -624,7 +626,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid) } /** - * fib_adapter_complete - complete adapter issued fib + * aac_fib_adapter_complete - complete adapter issued fib * @fibptr: fib to complete * @size: size of fib * @@ -632,7 +634,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid) * the adapter. */ -int fib_adapter_complete(struct fib * fibptr, unsigned short size) +int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) { struct hw_fib * hw_fib = fibptr->hw_fib; struct aac_dev * dev = fibptr->dev; @@ -683,20 +685,20 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size) } else { - printk(KERN_WARNING "fib_adapter_complete: Unknown xferstate detected.\n"); + printk(KERN_WARNING "aac_fib_adapter_complete: Unknown xferstate detected.\n"); BUG(); } return 0; } /** - * fib_complete - fib completion handler + * aac_fib_complete - fib completion handler * @fib: FIB to complete * * Will do all necessary work to complete a FIB. */ -int fib_complete(struct fib * fibptr) +int aac_fib_complete(struct fib *fibptr) { struct hw_fib * hw_fib = fibptr->hw_fib; @@ -995,14 +997,14 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) if (!dev || !dev->scsi_host_ptr) return; /* - * force reload of disk info via probe_container + * force reload of disk info via aac_probe_container */ if ((device_config_needed == CHANGE) && (dev->fsa_dev[container].valid == 1)) dev->fsa_dev[container].valid = 2; if ((device_config_needed == CHANGE) || (device_config_needed == ADD)) - probe_container(dev, container); + aac_probe_container(dev, container); device = scsi_device_lookup(dev->scsi_host_ptr, CONTAINER_TO_CHANNEL(container), CONTAINER_TO_ID(container), @@ -1104,7 +1106,7 @@ int aac_command_thread(struct aac_dev * dev) /* Handle Driver Notify Events */ aac_handle_aif(dev, fib); *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); - fib_adapter_complete(fib, (u16)sizeof(u32)); + aac_fib_adapter_complete(fib, (u16)sizeof(u32)); } else { struct list_head *entry; /* The u32 here is important and intended. We are using @@ -1241,7 +1243,7 @@ int aac_command_thread(struct aac_dev * dev) * Set the status of this FIB */ *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); - fib_adapter_complete(fib, sizeof(u32)); + aac_fib_adapter_complete(fib, sizeof(u32)); spin_unlock_irqrestore(&dev->fib_lock, flagv); /* Free up the remaining resources */ hw_fib_p = hw_fib_pool; diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index 439948ef8251..f6bcb9486f85 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c @@ -206,7 +206,7 @@ unsigned int aac_command_normal(struct aac_queue *q) * Set the status of this FIB */ *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); - fib_adapter_complete(fib, sizeof(u32)); + aac_fib_adapter_complete(fib, sizeof(u32)); spin_lock_irqsave(q->lock, flags); } } diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 0bf5f9a943e8..271617890562 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -385,17 +385,45 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev, static int aac_slave_configure(struct scsi_device *sdev) { - struct Scsi_Host *host = sdev->host; + if (sdev_channel(sdev) == CONTAINER_CHANNEL) { + sdev->skip_ms_page_8 = 1; + sdev->skip_ms_page_3f = 1; + } + if ((sdev->type == TYPE_DISK) && + (sdev_channel(sdev) != CONTAINER_CHANNEL)) { + struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata; + if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) + sdev->no_uld_attach = 1; + } + if (sdev->tagged_supported && (sdev->type == TYPE_DISK) && + (sdev_channel(sdev) == CONTAINER_CHANNEL)) { + struct scsi_device * dev; + struct Scsi_Host *host = sdev->host; + unsigned num_lsu = 0; + unsigned num_one = 0; + unsigned depth; - if (sdev->tagged_supported) - scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, 128); - else + __shost_for_each_device(dev, host) { + if (dev->tagged_supported && (dev->type == TYPE_DISK) && + (sdev_channel(dev) == CONTAINER_CHANNEL)) + ++num_lsu; + else + ++num_one; + } + if (num_lsu == 0) + ++num_lsu; + depth = (host->can_queue - num_one) / num_lsu; + if (depth > 256) + depth = 256; + else if (depth < 2) + depth = 2; + scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth); + if (!(((struct aac_dev *)host->hostdata)->adapter_info.options & + AAC_OPT_NEW_COMM)) + blk_queue_max_segment_size(sdev->request_queue, 65536); + } else scsi_adjust_queue_depth(sdev, 0, 1); - if (!(((struct aac_dev *)host->hostdata)->adapter_info.options - & AAC_OPT_NEW_COMM)) - blk_queue_max_segment_size(sdev->request_queue, 65536); - return 0; } @@ -870,7 +898,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev, /* * max channel will be the physical channels plus 1 virtual channel - * all containers are on the virtual channel 0 + * all containers are on the virtual channel 0 (CONTAINER_CHANNEL) * physical channels are address by their actual physical number+1 */ if (aac->nondasd_support == 1) @@ -913,7 +941,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev, aac_adapter_disable_int(aac); free_irq(pdev->irq, aac); out_unmap: - fib_map_free(aac); + aac_fib_map_free(aac); pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); kfree(aac->queues); iounmap(aac->regs.sa); @@ -947,7 +975,7 @@ static void __devexit aac_remove_one(struct pci_dev *pdev) aac_send_shutdown(aac); aac_adapter_disable_int(aac); - fib_map_free(aac); + aac_fib_map_free(aac); pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); kfree(aac->queues); diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index bd3ffdf6c800..62e3cda859af 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c @@ -2816,7 +2816,7 @@ static int gdth_fill_cache_cmd(int hanum,Scsi_Cmnd *scp,ushort hdrive) } #endif - } else { + } else if (scp->request_bufflen) { scp->SCp.Status = GDTH_MAP_SINGLE; scp->SCp.Message = (read_write == 1 ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 27acf78cf8d8..2bba5e55d7bc 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -4236,35 +4236,6 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) } /** - * ipr_save_ioafp_mode_select - Save adapters mode select data - * @ioa_cfg: ioa config struct - * @scsi_cmd: scsi command struct - * - * This function saves mode select data for the adapter to - * use following an adapter reset. - * - * Return value: - * 0 on success / SCSI_MLQUEUE_HOST_BUSY on failure - **/ -static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg, - struct scsi_cmnd *scsi_cmd) -{ - if (!ioa_cfg->saved_mode_pages) { - ioa_cfg->saved_mode_pages = kmalloc(sizeof(struct ipr_mode_pages), - GFP_ATOMIC); - if (!ioa_cfg->saved_mode_pages) { - dev_err(&ioa_cfg->pdev->dev, - "IOA mode select buffer allocation failed\n"); - return SCSI_MLQUEUE_HOST_BUSY; - } - } - - memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]); - ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4]; - return 0; -} - -/** * ipr_queuecommand - Queue a mid-layer request * @scsi_cmd: scsi command struct * @done: done function @@ -4338,9 +4309,6 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd, (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; - if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT) - rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd); - if (likely(rc == 0)) rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); @@ -4829,17 +4797,11 @@ static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd) int length; ENTER; - if (ioa_cfg->saved_mode_pages) { - memcpy(mode_pages, ioa_cfg->saved_mode_pages, - ioa_cfg->saved_mode_page_len); - length = ioa_cfg->saved_mode_page_len; - } else { - ipr_scsi_bus_speed_limit(ioa_cfg); - ipr_check_term_power(ioa_cfg, mode_pages); - ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages); - length = mode_pages->hdr.length + 1; - mode_pages->hdr.length = 0; - } + ipr_scsi_bus_speed_limit(ioa_cfg); + ipr_check_term_power(ioa_cfg, mode_pages); + ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages); + length = mode_pages->hdr.length + 1; + mode_pages->hdr.length = 0; ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), @@ -5969,7 +5931,6 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg) } ipr_free_dump(ioa_cfg); - kfree(ioa_cfg->saved_mode_pages); kfree(ioa_cfg->trace); } diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index b639332131f1..fd360bfe56dd 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -36,8 +36,8 @@ /* * Literals */ -#define IPR_DRIVER_VERSION "2.1.1" -#define IPR_DRIVER_DATE "(November 15, 2005)" +#define IPR_DRIVER_VERSION "2.1.2" +#define IPR_DRIVER_DATE "(February 8, 2006)" /* * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding @@ -1000,7 +1000,6 @@ struct ipr_ioa_cfg { struct Scsi_Host *host; struct pci_dev *pdev; struct ipr_sglist *ucode_sglist; - struct ipr_mode_pages *saved_mode_pages; u8 saved_mode_page_len; struct work_struct work_q; diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 780bfcc67096..ff79e68b347c 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -146,7 +146,7 @@ iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) spin_unlock_irqrestore(&session->lock, flags); set_bit(SUSPEND_BIT, &conn->suspend_tx); set_bit(SUSPEND_BIT, &conn->suspend_rx); - iscsi_conn_error(iscsi_handle(conn), err); + iscsi_conn_error(conn->cls_conn, err); } static inline int @@ -244,12 +244,10 @@ iscsi_ctask_cleanup(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) if (sc->sc_data_direction == DMA_TO_DEVICE) { struct iscsi_data_task *dtask, *n; /* WRITE: cleanup Data-Out's if any */ - spin_lock(&conn->lock); list_for_each_entry_safe(dtask, n, &ctask->dataqueue, item) { list_del(&dtask->item); mempool_free(dtask, ctask->datapool); } - spin_unlock(&conn->lock); } ctask->xmstate = XMSTATE_IDLE; ctask->r2t = NULL; @@ -689,7 +687,7 @@ iscsi_hdr_recv(struct iscsi_conn *conn) break; if (!conn->in.datalen) { - rc = iscsi_recv_pdu(iscsi_handle(conn), hdr, + rc = iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0); if (conn->login_mtask != mtask) { spin_lock(&session->lock); @@ -737,7 +735,7 @@ iscsi_hdr_recv(struct iscsi_conn *conn) if (!conn->in.datalen) { struct iscsi_mgmt_task *mtask; - rc = iscsi_recv_pdu(iscsi_handle(conn), hdr, + rc = iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0); mtask = (struct iscsi_mgmt_task *) session->mgmt_cmds[conn->in.itt - @@ -761,7 +759,7 @@ iscsi_hdr_recv(struct iscsi_conn *conn) rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)hdr); if (!rc && hdr->ttt != ISCSI_RESERVED_TAG) - rc = iscsi_recv_pdu(iscsi_handle(conn), + rc = iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0); } else rc = ISCSI_ERR_PROTO; @@ -1044,7 +1042,7 @@ iscsi_data_recv(struct iscsi_conn *conn) goto exit; } - rc = iscsi_recv_pdu(iscsi_handle(conn), conn->in.hdr, + rc = iscsi_recv_pdu(conn->cls_conn, conn->in.hdr, conn->data, conn->in.datalen); if (!rc && conn->datadgst_en && @@ -2428,19 +2426,20 @@ iscsi_pool_free(struct iscsi_queue *q, void **items) } static struct iscsi_cls_conn * -iscsi_conn_create(struct Scsi_Host *shost, uint32_t conn_idx) +iscsi_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) { + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct iscsi_session *session = iscsi_hostdata(shost->hostdata); struct iscsi_conn *conn; struct iscsi_cls_conn *cls_conn; - cls_conn = iscsi_create_conn(hostdata_session(shost->hostdata), - conn_idx); + cls_conn = iscsi_create_conn(cls_session, conn_idx); if (!cls_conn) return NULL; conn = cls_conn->dd_data; + memset(conn, 0, sizeof(*conn)); - memset(conn, 0, sizeof(struct iscsi_conn)); + conn->cls_conn = cls_conn; conn->c_stage = ISCSI_CONN_INITIAL_STAGE; conn->in_progress = IN_PROGRESS_WAIT_HEADER; conn->id = conn_idx; @@ -2452,8 +2451,6 @@ iscsi_conn_create(struct Scsi_Host *shost, uint32_t conn_idx) conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; conn->max_recv_dlength = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; - spin_lock_init(&conn->lock); - /* initialize general xmit PDU commands queue */ conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*), GFP_KERNEL, NULL); @@ -2625,11 +2622,13 @@ iscsi_conn_destroy(struct iscsi_cls_conn *cls_conn) } static int -iscsi_conn_bind(iscsi_sessionh_t sessionh, iscsi_connh_t connh, - uint32_t transport_fd, int is_leading) +iscsi_conn_bind(struct iscsi_cls_session *cls_session, + struct iscsi_cls_conn *cls_conn, uint32_t transport_fd, + int is_leading) { - struct iscsi_session *session = iscsi_ptr(sessionh); - struct iscsi_conn *tmp = ERR_PTR(-EEXIST), *conn = iscsi_ptr(connh); + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct iscsi_session *session = iscsi_hostdata(shost->hostdata); + struct iscsi_conn *tmp = ERR_PTR(-EEXIST), *conn = cls_conn->dd_data; struct sock *sk; struct socket *sock; int err; @@ -2703,9 +2702,9 @@ iscsi_conn_bind(iscsi_sessionh_t sessionh, iscsi_connh_t connh, } static int -iscsi_conn_start(iscsi_connh_t connh) +iscsi_conn_start(struct iscsi_cls_conn *cls_conn) { - struct iscsi_conn *conn = iscsi_ptr(connh); + struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_session *session = conn->session; struct sock *sk; @@ -2754,9 +2753,9 @@ iscsi_conn_start(iscsi_connh_t connh) } static void -iscsi_conn_stop(iscsi_connh_t connh, int flag) +iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) { - struct iscsi_conn *conn = iscsi_ptr(connh); + struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_session *session = conn->session; struct sock *sk; unsigned long flags; @@ -3253,9 +3252,9 @@ static struct scsi_host_template iscsi_sht = { static struct iscsi_transport iscsi_tcp_transport; -static struct Scsi_Host * +static struct iscsi_cls_session * iscsi_session_create(struct scsi_transport_template *scsit, - uint32_t initial_cmdsn) + uint32_t initial_cmdsn, uint32_t *sid) { struct Scsi_Host *shost; struct iscsi_session *session; @@ -3268,13 +3267,14 @@ iscsi_session_create(struct scsi_transport_template *scsit, session = iscsi_hostdata(shost->hostdata); memset(session, 0, sizeof(struct iscsi_session)); session->host = shost; - session->state = ISCSI_STATE_LOGGED_IN; + session->state = ISCSI_STATE_FREE; session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX; session->cmds_max = ISCSI_XMIT_CMDS_MAX; session->cmdsn = initial_cmdsn; session->exp_cmdsn = initial_cmdsn + 1; session->max_cmdsn = initial_cmdsn + 1; session->max_r2t = 1; + *sid = shost->host_no; /* initialize SCSI PDU commands pool */ if (iscsi_pool_init(&session->cmdpool, session->cmds_max, @@ -3311,22 +3311,24 @@ iscsi_session_create(struct scsi_transport_template *scsit, if (iscsi_r2tpool_alloc(session)) goto r2tpool_alloc_fail; - return shost; + return hostdata_session(shost->hostdata); r2tpool_alloc_fail: for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) kfree(session->mgmt_cmds[cmd_i]->data); - iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds); immdata_alloc_fail: + iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds); mgmtpool_alloc_fail: iscsi_pool_free(&session->cmdpool, (void**)session->cmds); cmdpool_alloc_fail: + iscsi_transport_destroy_session(shost); return NULL; } static void -iscsi_session_destroy(struct Scsi_Host *shost) +iscsi_session_destroy(struct iscsi_cls_session *cls_session) { + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct iscsi_session *session = iscsi_hostdata(shost->hostdata); int cmd_i; struct iscsi_data_task *dtask, *n; @@ -3350,10 +3352,10 @@ iscsi_session_destroy(struct Scsi_Host *shost) } static int -iscsi_conn_set_param(iscsi_connh_t connh, enum iscsi_param param, +iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, uint32_t value) { - struct iscsi_conn *conn = iscsi_ptr(connh); + struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_session *session = conn->session; spin_lock_bh(&session->lock); @@ -3495,9 +3497,10 @@ iscsi_conn_set_param(iscsi_connh_t connh, enum iscsi_param param, } static int -iscsi_session_get_param(struct Scsi_Host *shost, +iscsi_session_get_param(struct iscsi_cls_session *cls_session, enum iscsi_param param, uint32_t *value) { + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct iscsi_session *session = iscsi_hostdata(shost->hostdata); switch(param) { @@ -3539,9 +3542,10 @@ iscsi_session_get_param(struct Scsi_Host *shost, } static int -iscsi_conn_get_param(void *data, enum iscsi_param param, uint32_t *value) +iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, + enum iscsi_param param, uint32_t *value) { - struct iscsi_conn *conn = data; + struct iscsi_conn *conn = cls_conn->dd_data; switch(param) { case ISCSI_PARAM_MAX_RECV_DLENGTH: @@ -3564,9 +3568,9 @@ iscsi_conn_get_param(void *data, enum iscsi_param param, uint32_t *value) } static void -iscsi_conn_get_stats(iscsi_connh_t connh, struct iscsi_stats *stats) +iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) { - struct iscsi_conn *conn = iscsi_ptr(connh); + struct iscsi_conn *conn = cls_conn->dd_data; stats->txdata_octets = conn->txdata_octets; stats->rxdata_octets = conn->rxdata_octets; @@ -3587,10 +3591,10 @@ iscsi_conn_get_stats(iscsi_connh_t connh, struct iscsi_stats *stats) } static int -iscsi_conn_send_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr, char *data, - uint32_t data_size) +iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, + char *data, uint32_t data_size) { - struct iscsi_conn *conn = iscsi_ptr(connh); + struct iscsi_conn *conn = cls_conn->dd_data; int rc; mutex_lock(&conn->xmitmutex); diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index f95e61b76f70..ba26741ac154 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h @@ -113,7 +113,10 @@ struct iscsi_tcp_recv { int datadgst; }; +struct iscsi_cls_conn; + struct iscsi_conn { + struct iscsi_cls_conn *cls_conn; /* ptr to class connection */ struct iscsi_hdr hdr; /* header placeholder */ char hdrext[4*sizeof(__u16) + sizeof(__u32)]; @@ -143,7 +146,6 @@ struct iscsi_conn { struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */ struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */ struct iscsi_cmd_task *ctask; /* xmit ctask in progress */ - spinlock_t lock; /* FIXME: to be removed */ /* old values for socket callbacks */ void (*old_data_ready)(struct sock *, int); diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 61cba39a6834..adc5b440c9bc 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c @@ -213,7 +213,7 @@ int ata_rwcmd_protocol(struct ata_queued_cmd *qc) } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) { /* Unable to use DMA due to host limitation */ tf->protocol = ATA_PROT_PIO; - index = dev->multi_count ? 0 : 4; + index = dev->multi_count ? 0 : 8; } else { tf->protocol = ATA_PROT_DMA; index = 16; @@ -3426,11 +3426,12 @@ static void ata_pio_error(struct ata_port *ap) { struct ata_queued_cmd *qc; - printk(KERN_WARNING "ata%u: PIO error\n", ap->id); - qc = ata_qc_from_tag(ap, ap->active_tag); WARN_ON(qc == NULL); + if (qc->tf.command != ATA_CMD_PACKET) + printk(KERN_WARNING "ata%u: PIO error\n", ap->id); + /* make sure qc->err_mask is available to * know what's wrong and recover */ diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index d101a8a6f4e8..7144674bc8e6 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -5049,7 +5049,7 @@ static struct pci_device_id megaraid_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl); static struct pci_driver megaraid_pci_driver = { - .name = "megaraid", + .name = "megaraid_legacy", .id_table = megaraid_pci_tbl, .probe = megaraid_probe_one, .remove = __devexit_p(megaraid_remove_one), diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h index 4b3e0d6e5afa..4b75fe619d9c 100644 --- a/drivers/scsi/megaraid.h +++ b/drivers/scsi/megaraid.h @@ -5,7 +5,7 @@ #include <linux/mutex.h> #define MEGARAID_VERSION \ - "v2.00.3 (Release Date: Wed Feb 19 08:51:30 EST 2003)\n" + "v2.00.4 (Release Date: Thu Feb 9 08:51:30 EST 2006)\n" /* * Driver features - change the values to enable or disable features in the diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index a487f414960e..7de267e14458 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c @@ -10,7 +10,7 @@ * 2 of the License, or (at your option) any later version. * * FILE : megaraid_sas.c - * Version : v00.00.02.02 + * Version : v00.00.02.04 * * Authors: * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com> @@ -60,6 +60,12 @@ static struct pci_device_id megasas_pci_table[] = { PCI_ANY_ID, }, { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_LSI_SAS1078R, // ppc IOP + PCI_ANY_ID, + PCI_ANY_ID, + }, + { PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5, // xscale IOP PCI_ANY_ID, @@ -199,6 +205,86 @@ static struct megasas_instance_template megasas_instance_template_xscale = { */ /** +* The following functions are defined for ppc (deviceid : 0x60) +* controllers +*/ + +/** + * megasas_enable_intr_ppc - Enables interrupts + * @regs: MFI register set + */ +static inline void +megasas_enable_intr_ppc(struct megasas_register_set __iomem * regs) +{ + writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); + + writel(~0x80000004, &(regs)->outbound_intr_mask); + + /* Dummy readl to force pci flush */ + readl(®s->outbound_intr_mask); +} + +/** + * megasas_read_fw_status_reg_ppc - returns the current FW status value + * @regs: MFI register set + */ +static u32 +megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs) +{ + return readl(&(regs)->outbound_scratch_pad); +} + +/** + * megasas_clear_interrupt_ppc - Check & clear interrupt + * @regs: MFI register set + */ +static int +megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs) +{ + u32 status; + /* + * Check if it is our interrupt + */ + status = readl(®s->outbound_intr_status); + + if (!(status & MFI_REPLY_1078_MESSAGE_INTERRUPT)) { + return 1; + } + + /* + * Clear the interrupt by writing back the same value + */ + writel(status, ®s->outbound_doorbell_clear); + + return 0; +} +/** + * megasas_fire_cmd_ppc - Sends command to the FW + * @frame_phys_addr : Physical address of cmd + * @frame_count : Number of frames for the command + * @regs : MFI register set + */ +static inline void +megasas_fire_cmd_ppc(dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) +{ + writel((frame_phys_addr | (frame_count<<1))|1, + &(regs)->inbound_queue_port); +} + +static struct megasas_instance_template megasas_instance_template_ppc = { + + .fire_cmd = megasas_fire_cmd_ppc, + .enable_intr = megasas_enable_intr_ppc, + .clear_intr = megasas_clear_intr_ppc, + .read_fw_status_reg = megasas_read_fw_status_reg_ppc, +}; + +/** +* This is the end of set of functions & definitions +* specific to ppc (deviceid : 0x60) controllers +*/ + +/** * megasas_disable_intr - Disables interrupts * @regs: MFI register set */ @@ -1607,7 +1693,17 @@ static int megasas_init_mfi(struct megasas_instance *instance) reg_set = instance->reg_set; - instance->instancet = &megasas_instance_template_xscale; + switch(instance->pdev->device) + { + case PCI_DEVICE_ID_LSI_SAS1078R: + instance->instancet = &megasas_instance_template_ppc; + break; + case PCI_DEVICE_ID_LSI_SAS1064R: + case PCI_DEVICE_ID_DELL_PERC5: + default: + instance->instancet = &megasas_instance_template_xscale; + break; + } /* * We expect the FW state to be READY @@ -1983,6 +2079,7 @@ static int megasas_io_attach(struct megasas_instance *instance) host->max_channel = MEGASAS_MAX_CHANNELS - 1; host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; host->max_lun = MEGASAS_MAX_LUN; + host->max_cmd_len = 16; /* * Notify the mid-layer about the new controller diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index d6d166c0663f..89639f0c38ef 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -18,9 +18,9 @@ /** * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "00.00.02.02" -#define MEGASAS_RELDATE "Jan 23, 2006" -#define MEGASAS_EXT_VERSION "Mon Jan 23 14:09:01 PST 2006" +#define MEGASAS_VERSION "00.00.02.04" +#define MEGASAS_RELDATE "Feb 03, 2006" +#define MEGASAS_EXT_VERSION "Fri Feb 03 14:31:44 PST 2006" /* * ===================================== * MegaRAID SAS MFI firmware definitions @@ -553,31 +553,46 @@ struct megasas_ctrl_info { #define MFI_OB_INTR_STATUS_MASK 0x00000002 #define MFI_POLL_TIMEOUT_SECS 10 +#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000 +#define PCI_DEVICE_ID_LSI_SAS1078R 0x00000060 + struct megasas_register_set { + u32 reserved_0[4]; /*0000h*/ - u32 reserved_0[4]; /*0000h */ + u32 inbound_msg_0; /*0010h*/ + u32 inbound_msg_1; /*0014h*/ + u32 outbound_msg_0; /*0018h*/ + u32 outbound_msg_1; /*001Ch*/ - u32 inbound_msg_0; /*0010h */ - u32 inbound_msg_1; /*0014h */ - u32 outbound_msg_0; /*0018h */ - u32 outbound_msg_1; /*001Ch */ + u32 inbound_doorbell; /*0020h*/ + u32 inbound_intr_status; /*0024h*/ + u32 inbound_intr_mask; /*0028h*/ - u32 inbound_doorbell; /*0020h */ - u32 inbound_intr_status; /*0024h */ - u32 inbound_intr_mask; /*0028h */ + u32 outbound_doorbell; /*002Ch*/ + u32 outbound_intr_status; /*0030h*/ + u32 outbound_intr_mask; /*0034h*/ - u32 outbound_doorbell; /*002Ch */ - u32 outbound_intr_status; /*0030h */ - u32 outbound_intr_mask; /*0034h */ + u32 reserved_1[2]; /*0038h*/ - u32 reserved_1[2]; /*0038h */ + u32 inbound_queue_port; /*0040h*/ + u32 outbound_queue_port; /*0044h*/ - u32 inbound_queue_port; /*0040h */ - u32 outbound_queue_port; /*0044h */ + u32 reserved_2[22]; /*0048h*/ - u32 reserved_2; /*004Ch */ + u32 outbound_doorbell_clear; /*00A0h*/ - u32 index_registers[1004]; /*0050h */ + u32 reserved_3[3]; /*00A4h*/ + + u32 outbound_scratch_pad ; /*00B0h*/ + + u32 reserved_4[3]; /*00B4h*/ + + u32 inbound_low_queue_port ; /*00C0h*/ + + u32 inbound_high_queue_port ; /*00C4h*/ + + u32 reserved_5; /*00C8h*/ + u32 index_registers[820]; /*00CCh*/ } __attribute__ ((packed)); diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index b17ee62dd1a9..92b3e13e9061 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -7,7 +7,6 @@ #include "qla_def.h" #include <linux/vmalloc.h> -#include <scsi/scsi_transport_fc.h> /* SYSFS attributes --------------------------------------------------------- */ @@ -114,7 +113,7 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj, char *buf, loff_t off, struct device, kobj))); unsigned long flags; - if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size) + if (!capable(CAP_SYS_ADMIN) || off != 0) return 0; /* Read NVRAM. */ @@ -123,7 +122,7 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj, char *buf, loff_t off, ha->nvram_size); spin_unlock_irqrestore(&ha->hardware_lock, flags); - return (count); + return ha->nvram_size; } static ssize_t @@ -175,19 +174,150 @@ static struct bin_attribute sysfs_nvram_attr = { .mode = S_IRUSR | S_IWUSR, .owner = THIS_MODULE, }, - .size = 0, + .size = 512, .read = qla2x00_sysfs_read_nvram, .write = qla2x00_sysfs_write_nvram, }; +static ssize_t +qla2x00_sysfs_read_optrom(struct kobject *kobj, char *buf, loff_t off, + size_t count) +{ + struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, + struct device, kobj))); + + if (ha->optrom_state != QLA_SREADING) + return 0; + if (off > ha->optrom_size) + return 0; + if (off + count > ha->optrom_size) + count = ha->optrom_size - off; + + memcpy(buf, &ha->optrom_buffer[off], count); + + return count; +} + +static ssize_t +qla2x00_sysfs_write_optrom(struct kobject *kobj, char *buf, loff_t off, + size_t count) +{ + struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, + struct device, kobj))); + + if (ha->optrom_state != QLA_SWRITING) + return -EINVAL; + if (off > ha->optrom_size) + return -ERANGE; + if (off + count > ha->optrom_size) + count = ha->optrom_size - off; + + memcpy(&ha->optrom_buffer[off], buf, count); + + return count; +} + +static struct bin_attribute sysfs_optrom_attr = { + .attr = { + .name = "optrom", + .mode = S_IRUSR | S_IWUSR, + .owner = THIS_MODULE, + }, + .size = OPTROM_SIZE_24XX, + .read = qla2x00_sysfs_read_optrom, + .write = qla2x00_sysfs_write_optrom, +}; + +static ssize_t +qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj, char *buf, loff_t off, + size_t count) +{ + struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, + struct device, kobj))); + int val; + + if (off) + return 0; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + switch (val) { + case 0: + if (ha->optrom_state != QLA_SREADING && + ha->optrom_state != QLA_SWRITING) + break; + + ha->optrom_state = QLA_SWAITING; + vfree(ha->optrom_buffer); + ha->optrom_buffer = NULL; + break; + case 1: + if (ha->optrom_state != QLA_SWAITING) + break; + + ha->optrom_state = QLA_SREADING; + ha->optrom_buffer = (uint8_t *)vmalloc(ha->optrom_size); + if (ha->optrom_buffer == NULL) { + qla_printk(KERN_WARNING, ha, + "Unable to allocate memory for optrom retrieval " + "(%x).\n", ha->optrom_size); + + ha->optrom_state = QLA_SWAITING; + return count; + } + + memset(ha->optrom_buffer, 0, ha->optrom_size); + ha->isp_ops.read_optrom(ha, ha->optrom_buffer, 0, + ha->optrom_size); + break; + case 2: + if (ha->optrom_state != QLA_SWAITING) + break; + + ha->optrom_state = QLA_SWRITING; + ha->optrom_buffer = (uint8_t *)vmalloc(ha->optrom_size); + if (ha->optrom_buffer == NULL) { + qla_printk(KERN_WARNING, ha, + "Unable to allocate memory for optrom update " + "(%x).\n", ha->optrom_size); + + ha->optrom_state = QLA_SWAITING; + return count; + } + memset(ha->optrom_buffer, 0, ha->optrom_size); + break; + case 3: + if (ha->optrom_state != QLA_SWRITING) + break; + + ha->isp_ops.write_optrom(ha, ha->optrom_buffer, 0, + ha->optrom_size); + break; + } + return count; +} + +static struct bin_attribute sysfs_optrom_ctl_attr = { + .attr = { + .name = "optrom_ctl", + .mode = S_IWUSR, + .owner = THIS_MODULE, + }, + .size = 0, + .write = qla2x00_sysfs_write_optrom_ctl, +}; + void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha) { struct Scsi_Host *host = ha->host; sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr); - sysfs_nvram_attr.size = ha->nvram_size; sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr); + sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_optrom_attr); + sysfs_create_bin_file(&host->shost_gendev.kobj, + &sysfs_optrom_ctl_attr); } void @@ -197,6 +327,12 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *ha) sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr); sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr); + sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_optrom_attr); + sysfs_remove_bin_file(&host->shost_gendev.kobj, + &sysfs_optrom_ctl_attr); + + if (ha->beacon_blink_led == 1) + ha->isp_ops.beacon_off(ha); } /* Scsi_Host attributes. */ @@ -384,6 +520,50 @@ qla2x00_zio_timer_store(struct class_device *cdev, const char *buf, return strlen(buf); } +static ssize_t +qla2x00_beacon_show(struct class_device *cdev, char *buf) +{ + scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev)); + int len = 0; + + if (ha->beacon_blink_led) + len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n"); + else + len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); + return len; +} + +static ssize_t +qla2x00_beacon_store(struct class_device *cdev, const char *buf, + size_t count) +{ + scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev)); + int val = 0; + int rval; + + if (IS_QLA2100(ha) || IS_QLA2200(ha)) + return -EPERM; + + if (test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags)) { + qla_printk(KERN_WARNING, ha, + "Abort ISP active -- ignoring beacon request.\n"); + return -EBUSY; + } + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + if (val) + rval = ha->isp_ops.beacon_on(ha); + else + rval = ha->isp_ops.beacon_off(ha); + + if (rval != QLA_SUCCESS) + count = 0; + + return count; +} + static CLASS_DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); static CLASS_DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); @@ -398,6 +578,8 @@ static CLASS_DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store); static CLASS_DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show, qla2x00_zio_timer_store); +static CLASS_DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show, + qla2x00_beacon_store); struct class_device_attribute *qla2x00_host_attrs[] = { &class_device_attr_driver_version, @@ -411,6 +593,7 @@ struct class_device_attribute *qla2x00_host_attrs[] = { &class_device_attr_state, &class_device_attr_zio, &class_device_attr_zio_timer, + &class_device_attr_beacon, NULL, }; @@ -426,6 +609,49 @@ qla2x00_get_host_port_id(struct Scsi_Host *shost) } static void +qla2x00_get_host_speed(struct Scsi_Host *shost) +{ + scsi_qla_host_t *ha = to_qla_host(shost); + uint32_t speed = 0; + + switch (ha->link_data_rate) { + case LDR_1GB: + speed = 1; + break; + case LDR_2GB: + speed = 2; + break; + case LDR_4GB: + speed = 4; + break; + } + fc_host_speed(shost) = speed; +} + +static void +qla2x00_get_host_port_type(struct Scsi_Host *shost) +{ + scsi_qla_host_t *ha = to_qla_host(shost); + uint32_t port_type = FC_PORTTYPE_UNKNOWN; + + switch (ha->current_topology) { + case ISP_CFG_NL: + port_type = FC_PORTTYPE_LPORT; + break; + case ISP_CFG_FL: + port_type = FC_PORTTYPE_NLPORT; + break; + case ISP_CFG_N: + port_type = FC_PORTTYPE_PTP; + break; + case ISP_CFG_F: + port_type = FC_PORTTYPE_NPORT; + break; + } + fc_host_port_type(shost) = port_type; +} + +static void qla2x00_get_starget_node_name(struct scsi_target *starget) { struct Scsi_Host *host = dev_to_shost(starget->dev.parent); @@ -512,6 +738,41 @@ qla2x00_issue_lip(struct Scsi_Host *shost) return 0; } +static struct fc_host_statistics * +qla2x00_get_fc_host_stats(struct Scsi_Host *shost) +{ + scsi_qla_host_t *ha = to_qla_host(shost); + int rval; + uint16_t mb_stat[1]; + link_stat_t stat_buf; + struct fc_host_statistics *pfc_host_stat; + + pfc_host_stat = &ha->fc_host_stat; + memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); + + if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) { + rval = qla24xx_get_isp_stats(ha, (uint32_t *)&stat_buf, + sizeof(stat_buf) / 4, mb_stat); + } else { + rval = qla2x00_get_link_status(ha, ha->loop_id, &stat_buf, + mb_stat); + } + if (rval != 0) { + qla_printk(KERN_WARNING, ha, + "Unable to retrieve host statistics (%d).\n", mb_stat[0]); + return pfc_host_stat; + } + + pfc_host_stat->link_failure_count = stat_buf.link_fail_cnt; + pfc_host_stat->loss_of_sync_count = stat_buf.loss_sync_cnt; + pfc_host_stat->loss_of_signal_count = stat_buf.loss_sig_cnt; + pfc_host_stat->prim_seq_protocol_err_count = stat_buf.prim_seq_err_cnt; + pfc_host_stat->invalid_tx_word_count = stat_buf.inval_xmit_word_cnt; + pfc_host_stat->invalid_crc_count = stat_buf.inval_crc_cnt; + + return pfc_host_stat; +} + struct fc_function_template qla2xxx_transport_functions = { .show_host_node_name = 1, @@ -520,6 +781,10 @@ struct fc_function_template qla2xxx_transport_functions = { .get_host_port_id = qla2x00_get_host_port_id, .show_host_port_id = 1, + .get_host_speed = qla2x00_get_host_speed, + .show_host_speed = 1, + .get_host_port_type = qla2x00_get_host_port_type, + .show_host_port_type = 1, .dd_fcrport_size = sizeof(struct fc_port *), .show_rport_supported_classes = 1, @@ -536,6 +801,7 @@ struct fc_function_template qla2xxx_transport_functions = { .show_rport_dev_loss_tmo = 1, .issue_fc_host_lip = qla2x00_issue_lip, + .get_fc_host_stats = qla2x00_get_fc_host_stats, }; void diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index bad066e5772a..b31a03bbd14f 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -29,6 +29,7 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> +#include <scsi/scsi_transport_fc.h> #if defined(CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE) #if defined(CONFIG_SCSI_QLA21XX) || defined(CONFIG_SCSI_QLA21XX_MODULE) @@ -181,6 +182,13 @@ #define WRT_REG_DWORD(addr, data) writel(data,addr) /* + * The ISP2312 v2 chip cannot access the FLASH/GPIO registers via MMIO in an + * 133Mhz slot. + */ +#define RD_REG_WORD_PIO(addr) (inw((unsigned long)addr)) +#define WRT_REG_WORD_PIO(addr, data) (outw(data,(unsigned long)addr)) + +/* * Fibre Channel device definitions. */ #define WWN_SIZE 8 /* Size of WWPN, WWN & WWNN */ @@ -432,6 +440,9 @@ struct device_reg_2xxx { #define GPIO_LED_GREEN_ON_AMBER_OFF 0x0040 #define GPIO_LED_GREEN_OFF_AMBER_ON 0x0080 #define GPIO_LED_GREEN_ON_AMBER_ON 0x00C0 +#define GPIO_LED_ALL_OFF 0x0000 +#define GPIO_LED_RED_ON_OTHER_OFF 0x0001 /* isp2322 */ +#define GPIO_LED_RGA_ON 0x00C1 /* isp2322: red green amber */ union { struct { @@ -2199,6 +2210,15 @@ struct isp_operations { void (*fw_dump) (struct scsi_qla_host *, int); void (*ascii_fw_dump) (struct scsi_qla_host *); + + int (*beacon_on) (struct scsi_qla_host *); + int (*beacon_off) (struct scsi_qla_host *); + void (*beacon_blink) (struct scsi_qla_host *); + + uint8_t * (*read_optrom) (struct scsi_qla_host *, uint8_t *, + uint32_t, uint32_t); + int (*write_optrom) (struct scsi_qla_host *, uint8_t *, uint32_t, + uint32_t); }; /* @@ -2331,6 +2351,10 @@ typedef struct scsi_qla_host { uint16_t min_external_loopid; /* First external loop Id */ uint16_t link_data_rate; /* F/W operating speed */ +#define LDR_1GB 0 +#define LDR_2GB 1 +#define LDR_4GB 3 +#define LDR_UNKNOWN 0xFFFF uint8_t current_topology; uint8_t prev_topology; @@ -2486,12 +2510,26 @@ typedef struct scsi_qla_host { uint8_t *port_name; uint32_t isp_abort_cnt; + /* Option ROM information. */ + char *optrom_buffer; + uint32_t optrom_size; + int optrom_state; +#define QLA_SWAITING 0 +#define QLA_SREADING 1 +#define QLA_SWRITING 2 + /* Needed for BEACON */ uint16_t beacon_blink_led; - uint16_t beacon_green_on; + uint8_t beacon_color_state; +#define QLA_LED_GRN_ON 0x01 +#define QLA_LED_YLW_ON 0x02 +#define QLA_LED_ABR_ON 0x04 +#define QLA_LED_ALL_ON 0x07 /* yellow, green, amber. */ + /* ISP2322: red, green, amber. */ uint16_t zio_mode; uint16_t zio_timer; + struct fc_host_statistics fc_host_stat; } scsi_qla_host_t; @@ -2557,7 +2595,9 @@ struct _qla2x00stats { /* * Flash support definitions */ -#define FLASH_IMAGE_SIZE 131072 +#define OPTROM_SIZE_2300 0x20000 +#define OPTROM_SIZE_2322 0x100000 +#define OPTROM_SIZE_24XX 0x100000 #include "qla_gbl.h" #include "qla_dbg.h" diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 35266bd5d538..ffdc2680f049 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -75,12 +75,12 @@ extern void qla2x00_cmd_timeout(srb_t *); extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int); extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int); -extern void qla2x00_blink_led(scsi_qla_host_t *); - extern int qla2x00_down_timeout(struct semaphore *, unsigned long); extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *); +extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *); + /* * Global Function Prototypes in qla_iocb.c source file. */ @@ -185,6 +185,13 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *, extern int qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map); +extern int +qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, link_stat_t *, + uint16_t *); + +extern int +qla24xx_get_isp_stats(scsi_qla_host_t *, uint32_t *, uint32_t, uint16_t *); + extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *); extern int qla24xx_abort_target(fc_port_t *); @@ -228,6 +235,22 @@ extern int qla2x00_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, uint32_t); +extern int qla2x00_beacon_on(struct scsi_qla_host *); +extern int qla2x00_beacon_off(struct scsi_qla_host *); +extern void qla2x00_beacon_blink(struct scsi_qla_host *); +extern int qla24xx_beacon_on(struct scsi_qla_host *); +extern int qla24xx_beacon_off(struct scsi_qla_host *); +extern void qla24xx_beacon_blink(struct scsi_qla_host *); + +extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *, + uint32_t, uint32_t); +extern int qla2x00_write_optrom_data(struct scsi_qla_host *, uint8_t *, + uint32_t, uint32_t); +extern uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *, uint8_t *, + uint32_t, uint32_t); +extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *, + uint32_t, uint32_t); + /* * Global Function Prototypes in qla_dbg.c source file. */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index e67bb0997818..634ee174bff2 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -8,7 +8,6 @@ #include <linux/delay.h> #include <linux/vmalloc.h> -#include <scsi/scsi_transport_fc.h> #include "qla_devtbl.h" diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 7ec0b8d6f07b..6544b6d0891d 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -814,6 +814,7 @@ qla24xx_start_scsi(srb_t *sp) cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); + host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); /* Load SCSI command packet. */ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 71a46fcee8cc..42aa7a7c1a73 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -402,9 +402,9 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) break; case MBA_LOOP_UP: /* Loop Up Event */ - ha->link_data_rate = 0; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { link_speed = link_speeds[0]; + ha->link_data_rate = LDR_1GB; } else { link_speed = link_speeds[LS_UNKNOWN]; if (mb[1] < 5) @@ -436,7 +436,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) } ha->flags.management_server_logged_in = 0; - ha->link_data_rate = 0; + ha->link_data_rate = LDR_UNKNOWN; if (ql2xfdmienable) set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 3099b379de9d..363dfdd042b0 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -7,7 +7,6 @@ #include "qla_def.h" #include <linux/delay.h> -#include <scsi/scsi_transport_fc.h> static void qla2x00_mbx_sem_timeout(unsigned long data) @@ -1874,7 +1873,8 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma, mcp->mb[3] = LSW(id_list_dma); mcp->mb[6] = MSW(MSD(id_list_dma)); mcp->mb[7] = LSW(MSD(id_list_dma)); - mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2; + mcp->mb[8] = 0; + mcp->out_mb |= MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; } else { mcp->mb[1] = MSW(id_list_dma); mcp->mb[2] = LSW(id_list_dma); @@ -2017,8 +2017,109 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map) return rval; } +#endif + +/* + * qla2x00_get_link_status + * + * Input: + * ha = adapter block pointer. + * loop_id = device loop ID. + * ret_buf = pointer to link status return buffer. + * + * Returns: + * 0 = success. + * BIT_0 = mem alloc error. + * BIT_1 = mailbox error. + */ +int +qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id, + link_stat_t *ret_buf, uint16_t *status) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + link_stat_t *stat_buf; + dma_addr_t stat_buf_dma; + + DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);) + + stat_buf = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &stat_buf_dma); + if (stat_buf == NULL) { + DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n", + __func__, ha->host_no)); + return BIT_0; + } + memset(stat_buf, 0, sizeof(link_stat_t)); + + mcp->mb[0] = MBC_GET_LINK_STATUS; + mcp->mb[2] = MSW(stat_buf_dma); + mcp->mb[3] = LSW(stat_buf_dma); + mcp->mb[6] = MSW(MSD(stat_buf_dma)); + mcp->mb[7] = LSW(MSD(stat_buf_dma)); + mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; + mcp->in_mb = MBX_0; + if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) { + mcp->mb[1] = loop_id; + mcp->mb[4] = 0; + mcp->mb[10] = 0; + mcp->out_mb |= MBX_10|MBX_4|MBX_1; + mcp->in_mb |= MBX_1; + } else if (HAS_EXTENDED_IDS(ha)) { + mcp->mb[1] = loop_id; + mcp->mb[10] = 0; + mcp->out_mb |= MBX_10|MBX_1; + } else { + mcp->mb[1] = loop_id << 8; + mcp->out_mb |= MBX_1; + } + mcp->tov = 30; + mcp->flags = IOCTL_CMD; + rval = qla2x00_mailbox_command(ha, mcp); + + if (rval == QLA_SUCCESS) { + if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { + DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", + __func__, ha->host_no, mcp->mb[0]);) + status[0] = mcp->mb[0]; + rval = BIT_1; + } else { + /* copy over data -- firmware data is LE. */ + ret_buf->link_fail_cnt = + le32_to_cpu(stat_buf->link_fail_cnt); + ret_buf->loss_sync_cnt = + le32_to_cpu(stat_buf->loss_sync_cnt); + ret_buf->loss_sig_cnt = + le32_to_cpu(stat_buf->loss_sig_cnt); + ret_buf->prim_seq_err_cnt = + le32_to_cpu(stat_buf->prim_seq_err_cnt); + ret_buf->inval_xmit_word_cnt = + le32_to_cpu(stat_buf->inval_xmit_word_cnt); + ret_buf->inval_crc_cnt = + le32_to_cpu(stat_buf->inval_crc_cnt); + + DEBUG11(printk("%s(%ld): stat dump: fail_cnt=%d " + "loss_sync=%d loss_sig=%d seq_err=%d " + "inval_xmt_word=%d inval_crc=%d.\n", __func__, + ha->host_no, stat_buf->link_fail_cnt, + stat_buf->loss_sync_cnt, stat_buf->loss_sig_cnt, + stat_buf->prim_seq_err_cnt, + stat_buf->inval_xmit_word_cnt, + stat_buf->inval_crc_cnt);) + } + } else { + /* Failed. */ + DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, + ha->host_no, rval);) + rval = BIT_1; + } + + dma_pool_free(ha->s_dma_pool, stat_buf, stat_buf_dma); -uint8_t + return rval; +} + +int qla24xx_get_isp_stats(scsi_qla_host_t *ha, uint32_t *dwbuf, uint32_t dwords, uint16_t *status) { @@ -2080,7 +2181,6 @@ qla24xx_get_isp_stats(scsi_qla_host_t *ha, uint32_t *dwbuf, uint32_t dwords, return rval; } -#endif int qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp) diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 5866a7c706a8..9f91f1a20542 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -366,6 +366,12 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) goto qc_fail_command; } + /* Close window on fcport/rport state-transitioning. */ + if (!*(fc_port_t **)rport->dd_data) { + cmd->result = DID_IMM_RETRY << 16; + goto qc_fail_command; + } + if (atomic_read(&fcport->state) != FCS_ONLINE) { if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || atomic_read(&ha->loop_state) == LOOP_DEAD) { @@ -421,6 +427,12 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) goto qc24_fail_command; } + /* Close window on fcport/rport state-transitioning. */ + if (!*(fc_port_t **)rport->dd_data) { + cmd->result = DID_IMM_RETRY << 16; + goto qc24_fail_command; + } + if (atomic_read(&fcport->state) != FCS_ONLINE) { if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || atomic_read(&ha->loop_state) == LOOP_DEAD) { @@ -513,7 +525,7 @@ qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd) * Success (Adapter is online) : 0 * Failed (Adapter is offline/disabled) : 1 */ -static int +int qla2x00_wait_for_hba_online(scsi_qla_host_t *ha) { int return_status; @@ -1312,6 +1324,8 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info) ha->ports = MAX_BUSES; ha->init_cb_size = sizeof(init_cb_t); ha->mgmt_svr_loop_id = MANAGEMENT_SERVER; + ha->link_data_rate = LDR_UNKNOWN; + ha->optrom_size = OPTROM_SIZE_2300; /* Assign ISP specific operations. */ ha->isp_ops.pci_config = qla2100_pci_config; @@ -1339,6 +1353,8 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info) ha->isp_ops.write_nvram = qla2x00_write_nvram_data; ha->isp_ops.fw_dump = qla2100_fw_dump; ha->isp_ops.ascii_fw_dump = qla2100_ascii_fw_dump; + ha->isp_ops.read_optrom = qla2x00_read_optrom_data; + ha->isp_ops.write_optrom = qla2x00_write_optrom_data; if (IS_QLA2100(ha)) { host->max_id = MAX_TARGETS_2100; ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; @@ -1364,7 +1380,12 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info) ha->isp_ops.intr_handler = qla2300_intr_handler; ha->isp_ops.fw_dump = qla2300_fw_dump; ha->isp_ops.ascii_fw_dump = qla2300_ascii_fw_dump; + ha->isp_ops.beacon_on = qla2x00_beacon_on; + ha->isp_ops.beacon_off = qla2x00_beacon_off; + ha->isp_ops.beacon_blink = qla2x00_beacon_blink; ha->gid_list_info_size = 6; + if (IS_QLA2322(ha) || IS_QLA6322(ha)) + ha->optrom_size = OPTROM_SIZE_2322; } else if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) { host->max_id = MAX_TARGETS_2200; ha->mbx_count = MAILBOX_REGISTER_COUNT; @@ -1400,7 +1421,13 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info) ha->isp_ops.write_nvram = qla24xx_write_nvram_data; ha->isp_ops.fw_dump = qla24xx_fw_dump; ha->isp_ops.ascii_fw_dump = qla24xx_ascii_fw_dump; + ha->isp_ops.read_optrom = qla24xx_read_optrom_data; + ha->isp_ops.write_optrom = qla24xx_write_optrom_data; + ha->isp_ops.beacon_on = qla24xx_beacon_on; + ha->isp_ops.beacon_off = qla24xx_beacon_off; + ha->isp_ops.beacon_blink = qla24xx_beacon_blink; ha->gid_list_info_size = 8; + ha->optrom_size = OPTROM_SIZE_24XX; } host->can_queue = ha->request_q_length + 128; @@ -1657,11 +1684,13 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport, spin_lock_irqsave(&fcport->rport_lock, flags); fcport->drport = rport; fcport->rport = NULL; + *(fc_port_t **)rport->dd_data = NULL; spin_unlock_irqrestore(&fcport->rport_lock, flags); set_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags); } else { spin_lock_irqsave(&fcport->rport_lock, flags); fcport->rport = NULL; + *(fc_port_t **)rport->dd_data = NULL; spin_unlock_irqrestore(&fcport->rport_lock, flags); fc_remote_port_delete(rport); } @@ -2066,6 +2095,8 @@ qla2x00_mem_free(scsi_qla_host_t *ha) ha->fw_dumped = 0; ha->fw_dump_reading = 0; ha->fw_dump_buffer = NULL; + + vfree(ha->optrom_buffer); } /* @@ -2314,6 +2345,9 @@ qla2x00_do_dpc(void *data) if (!ha->interrupts_on) ha->isp_ops.enable_intrs(ha); + if (test_and_clear_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags)) + ha->isp_ops.beacon_blink(ha); + ha->dpc_active = 0; } /* End of while(1) */ @@ -2491,6 +2525,12 @@ qla2x00_timer(scsi_qla_host_t *ha) atomic_read(&ha->loop_down_timer))); } + /* Check if beacon LED needs to be blinked */ + if (ha->beacon_blink_led == 1) { + set_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags); + start_dpc++; + } + /* Schedule the DPC routine if needed */ if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || @@ -2499,6 +2539,7 @@ qla2x00_timer(scsi_qla_host_t *ha) start_dpc || test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags) || test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) || + test_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags) || test_bit(RELOGIN_NEEDED, &ha->dpc_flags)) && ha->dpc_wait && !ha->dpc_active) { diff --git a/drivers/scsi/qla2xxx/qla_rscn.c b/drivers/scsi/qla2xxx/qla_rscn.c index 2c3342108dd8..b70bebe18c01 100644 --- a/drivers/scsi/qla2xxx/qla_rscn.c +++ b/drivers/scsi/qla2xxx/qla_rscn.c @@ -6,8 +6,6 @@ */ #include "qla_def.h" -#include <scsi/scsi_transport_fc.h> - /** * IO descriptor handle definitions. * diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index f4d755a643e4..3866a5760f15 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -695,3 +695,966 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, return ret; } + + +static inline void +qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags) +{ + if (IS_QLA2322(ha)) { + /* Flip all colors. */ + if (ha->beacon_color_state == QLA_LED_ALL_ON) { + /* Turn off. */ + ha->beacon_color_state = 0; + *pflags = GPIO_LED_ALL_OFF; + } else { + /* Turn on. */ + ha->beacon_color_state = QLA_LED_ALL_ON; + *pflags = GPIO_LED_RGA_ON; + } + } else { + /* Flip green led only. */ + if (ha->beacon_color_state == QLA_LED_GRN_ON) { + /* Turn off. */ + ha->beacon_color_state = 0; + *pflags = GPIO_LED_GREEN_OFF_AMBER_OFF; + } else { + /* Turn on. */ + ha->beacon_color_state = QLA_LED_GRN_ON; + *pflags = GPIO_LED_GREEN_ON_AMBER_OFF; + } + } +} + +void +qla2x00_beacon_blink(struct scsi_qla_host *ha) +{ + uint16_t gpio_enable; + uint16_t gpio_data; + uint16_t led_color = 0; + unsigned long flags; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + if (ha->pio_address) + reg = (struct device_reg_2xxx __iomem *)ha->pio_address; + + spin_lock_irqsave(&ha->hardware_lock, flags); + + /* Save the Original GPIOE. */ + if (ha->pio_address) { + gpio_enable = RD_REG_WORD_PIO(®->gpioe); + gpio_data = RD_REG_WORD_PIO(®->gpiod); + } else { + gpio_enable = RD_REG_WORD(®->gpioe); + gpio_data = RD_REG_WORD(®->gpiod); + } + + /* Set the modified gpio_enable values */ + gpio_enable |= GPIO_LED_MASK; + + if (ha->pio_address) { + WRT_REG_WORD_PIO(®->gpioe, gpio_enable); + } else { + WRT_REG_WORD(®->gpioe, gpio_enable); + RD_REG_WORD(®->gpioe); + } + + qla2x00_flip_colors(ha, &led_color); + + /* Clear out any previously set LED color. */ + gpio_data &= ~GPIO_LED_MASK; + + /* Set the new input LED color to GPIOD. */ + gpio_data |= led_color; + + /* Set the modified gpio_data values */ + if (ha->pio_address) { + WRT_REG_WORD_PIO(®->gpiod, gpio_data); + } else { + WRT_REG_WORD(®->gpiod, gpio_data); + RD_REG_WORD(®->gpiod); + } + + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +int +qla2x00_beacon_on(struct scsi_qla_host *ha) +{ + uint16_t gpio_enable; + uint16_t gpio_data; + unsigned long flags; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; + ha->fw_options[1] |= FO1_DISABLE_GPIO6_7; + + if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) { + qla_printk(KERN_WARNING, ha, + "Unable to update fw options (beacon on).\n"); + return QLA_FUNCTION_FAILED; + } + + if (ha->pio_address) + reg = (struct device_reg_2xxx __iomem *)ha->pio_address; + + /* Turn off LEDs. */ + spin_lock_irqsave(&ha->hardware_lock, flags); + if (ha->pio_address) { + gpio_enable = RD_REG_WORD_PIO(®->gpioe); + gpio_data = RD_REG_WORD_PIO(®->gpiod); + } else { + gpio_enable = RD_REG_WORD(®->gpioe); + gpio_data = RD_REG_WORD(®->gpiod); + } + gpio_enable |= GPIO_LED_MASK; + + /* Set the modified gpio_enable values. */ + if (ha->pio_address) { + WRT_REG_WORD_PIO(®->gpioe, gpio_enable); + } else { + WRT_REG_WORD(®->gpioe, gpio_enable); + RD_REG_WORD(®->gpioe); + } + + /* Clear out previously set LED colour. */ + gpio_data &= ~GPIO_LED_MASK; + if (ha->pio_address) { + WRT_REG_WORD_PIO(®->gpiod, gpio_data); + } else { + WRT_REG_WORD(®->gpiod, gpio_data); + RD_REG_WORD(®->gpiod); + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + /* + * Let the per HBA timer kick off the blinking process based on + * the following flags. No need to do anything else now. + */ + ha->beacon_blink_led = 1; + ha->beacon_color_state = 0; + + return QLA_SUCCESS; +} + +int +qla2x00_beacon_off(struct scsi_qla_host *ha) +{ + int rval = QLA_SUCCESS; + + ha->beacon_blink_led = 0; + + /* Set the on flag so when it gets flipped it will be off. */ + if (IS_QLA2322(ha)) + ha->beacon_color_state = QLA_LED_ALL_ON; + else + ha->beacon_color_state = QLA_LED_GRN_ON; + + ha->isp_ops.beacon_blink(ha); /* This turns green LED off */ + + ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; + ha->fw_options[1] &= ~FO1_DISABLE_GPIO6_7; + + rval = qla2x00_set_fw_options(ha, ha->fw_options); + if (rval != QLA_SUCCESS) + qla_printk(KERN_WARNING, ha, + "Unable to update fw options (beacon off).\n"); + return rval; +} + + +static inline void +qla24xx_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags) +{ + /* Flip all colors. */ + if (ha->beacon_color_state == QLA_LED_ALL_ON) { + /* Turn off. */ + ha->beacon_color_state = 0; + *pflags = 0; + } else { + /* Turn on. */ + ha->beacon_color_state = QLA_LED_ALL_ON; + *pflags = GPDX_LED_YELLOW_ON | GPDX_LED_AMBER_ON; + } +} + +void +qla24xx_beacon_blink(struct scsi_qla_host *ha) +{ + uint16_t led_color = 0; + uint32_t gpio_data; + unsigned long flags; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + + /* Save the Original GPIOD. */ + spin_lock_irqsave(&ha->hardware_lock, flags); + gpio_data = RD_REG_DWORD(®->gpiod); + + /* Enable the gpio_data reg for update. */ + gpio_data |= GPDX_LED_UPDATE_MASK; + + WRT_REG_DWORD(®->gpiod, gpio_data); + gpio_data = RD_REG_DWORD(®->gpiod); + + /* Set the color bits. */ + qla24xx_flip_colors(ha, &led_color); + + /* Clear out any previously set LED color. */ + gpio_data &= ~GPDX_LED_COLOR_MASK; + + /* Set the new input LED color to GPIOD. */ + gpio_data |= led_color; + + /* Set the modified gpio_data values. */ + WRT_REG_DWORD(®->gpiod, gpio_data); + gpio_data = RD_REG_DWORD(®->gpiod); + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +int +qla24xx_beacon_on(struct scsi_qla_host *ha) +{ + uint32_t gpio_data; + unsigned long flags; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + + if (ha->beacon_blink_led == 0) { + /* Enable firmware for update */ + ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL; + + if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) + return QLA_FUNCTION_FAILED; + + if (qla2x00_get_fw_options(ha, ha->fw_options) != + QLA_SUCCESS) { + qla_printk(KERN_WARNING, ha, + "Unable to update fw options (beacon on).\n"); + return QLA_FUNCTION_FAILED; + } + + spin_lock_irqsave(&ha->hardware_lock, flags); + gpio_data = RD_REG_DWORD(®->gpiod); + + /* Enable the gpio_data reg for update. */ + gpio_data |= GPDX_LED_UPDATE_MASK; + WRT_REG_DWORD(®->gpiod, gpio_data); + RD_REG_DWORD(®->gpiod); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + } + + /* So all colors blink together. */ + ha->beacon_color_state = 0; + + /* Let the per HBA timer kick off the blinking process. */ + ha->beacon_blink_led = 1; + + return QLA_SUCCESS; +} + +int +qla24xx_beacon_off(struct scsi_qla_host *ha) +{ + uint32_t gpio_data; + unsigned long flags; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + + ha->beacon_blink_led = 0; + ha->beacon_color_state = QLA_LED_ALL_ON; + + ha->isp_ops.beacon_blink(ha); /* Will flip to all off. */ + + /* Give control back to firmware. */ + spin_lock_irqsave(&ha->hardware_lock, flags); + gpio_data = RD_REG_DWORD(®->gpiod); + + /* Disable the gpio_data reg for update. */ + gpio_data &= ~GPDX_LED_UPDATE_MASK; + WRT_REG_DWORD(®->gpiod, gpio_data); + RD_REG_DWORD(®->gpiod); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL; + + if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) { + qla_printk(KERN_WARNING, ha, + "Unable to update fw options (beacon off).\n"); + return QLA_FUNCTION_FAILED; + } + + if (qla2x00_get_fw_options(ha, ha->fw_options) != QLA_SUCCESS) { + qla_printk(KERN_WARNING, ha, + "Unable to get fw options (beacon off).\n"); + return QLA_FUNCTION_FAILED; + } + + return QLA_SUCCESS; +} + + +/* + * Flash support routines + */ + +/** + * qla2x00_flash_enable() - Setup flash for reading and writing. + * @ha: HA context + */ +static void +qla2x00_flash_enable(scsi_qla_host_t *ha) +{ + uint16_t data; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + data = RD_REG_WORD(®->ctrl_status); + data |= CSR_FLASH_ENABLE; + WRT_REG_WORD(®->ctrl_status, data); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ +} + +/** + * qla2x00_flash_disable() - Disable flash and allow RISC to run. + * @ha: HA context + */ +static void +qla2x00_flash_disable(scsi_qla_host_t *ha) +{ + uint16_t data; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + data = RD_REG_WORD(®->ctrl_status); + data &= ~(CSR_FLASH_ENABLE); + WRT_REG_WORD(®->ctrl_status, data); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ +} + +/** + * qla2x00_read_flash_byte() - Reads a byte from flash + * @ha: HA context + * @addr: Address in flash to read + * + * A word is read from the chip, but, only the lower byte is valid. + * + * Returns the byte read from flash @addr. + */ +static uint8_t +qla2x00_read_flash_byte(scsi_qla_host_t *ha, uint32_t addr) +{ + uint16_t data; + uint16_t bank_select; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + bank_select = RD_REG_WORD(®->ctrl_status); + + if (IS_QLA2322(ha) || IS_QLA6322(ha)) { + /* Specify 64K address range: */ + /* clear out Module Select and Flash Address bits [19:16]. */ + bank_select &= ~0xf8; + bank_select |= addr >> 12 & 0xf0; + bank_select |= CSR_FLASH_64K_BANK; + WRT_REG_WORD(®->ctrl_status, bank_select); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ + + WRT_REG_WORD(®->flash_address, (uint16_t)addr); + data = RD_REG_WORD(®->flash_data); + + return (uint8_t)data; + } + + /* Setup bit 16 of flash address. */ + if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) { + bank_select |= CSR_FLASH_64K_BANK; + WRT_REG_WORD(®->ctrl_status, bank_select); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ + } else if (((addr & BIT_16) == 0) && + (bank_select & CSR_FLASH_64K_BANK)) { + bank_select &= ~(CSR_FLASH_64K_BANK); + WRT_REG_WORD(®->ctrl_status, bank_select); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ + } + + /* Always perform IO mapped accesses to the FLASH registers. */ + if (ha->pio_address) { + uint16_t data2; + + reg = (struct device_reg_2xxx __iomem *)ha->pio_address; + WRT_REG_WORD_PIO(®->flash_address, (uint16_t)addr); + do { + data = RD_REG_WORD_PIO(®->flash_data); + barrier(); + cpu_relax(); + data2 = RD_REG_WORD_PIO(®->flash_data); + } while (data != data2); + } else { + WRT_REG_WORD(®->flash_address, (uint16_t)addr); + data = qla2x00_debounce_register(®->flash_data); + } + + return (uint8_t)data; +} + +/** + * qla2x00_write_flash_byte() - Write a byte to flash + * @ha: HA context + * @addr: Address in flash to write + * @data: Data to write + */ +static void +qla2x00_write_flash_byte(scsi_qla_host_t *ha, uint32_t addr, uint8_t data) +{ + uint16_t bank_select; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + bank_select = RD_REG_WORD(®->ctrl_status); + if (IS_QLA2322(ha) || IS_QLA6322(ha)) { + /* Specify 64K address range: */ + /* clear out Module Select and Flash Address bits [19:16]. */ + bank_select &= ~0xf8; + bank_select |= addr >> 12 & 0xf0; + bank_select |= CSR_FLASH_64K_BANK; + WRT_REG_WORD(®->ctrl_status, bank_select); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ + + WRT_REG_WORD(®->flash_address, (uint16_t)addr); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ + WRT_REG_WORD(®->flash_data, (uint16_t)data); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ + + return; + } + + /* Setup bit 16 of flash address. */ + if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) { + bank_select |= CSR_FLASH_64K_BANK; + WRT_REG_WORD(®->ctrl_status, bank_select); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ + } else if (((addr & BIT_16) == 0) && + (bank_select & CSR_FLASH_64K_BANK)) { + bank_select &= ~(CSR_FLASH_64K_BANK); + WRT_REG_WORD(®->ctrl_status, bank_select); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ + } + + /* Always perform IO mapped accesses to the FLASH registers. */ + if (ha->pio_address) { + reg = (struct device_reg_2xxx __iomem *)ha->pio_address; + WRT_REG_WORD_PIO(®->flash_address, (uint16_t)addr); + WRT_REG_WORD_PIO(®->flash_data, (uint16_t)data); + } else { + WRT_REG_WORD(®->flash_address, (uint16_t)addr); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ + WRT_REG_WORD(®->flash_data, (uint16_t)data); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ + } +} + +/** + * qla2x00_poll_flash() - Polls flash for completion. + * @ha: HA context + * @addr: Address in flash to poll + * @poll_data: Data to be polled + * @man_id: Flash manufacturer ID + * @flash_id: Flash ID + * + * This function polls the device until bit 7 of what is read matches data + * bit 7 or until data bit 5 becomes a 1. If that hapens, the flash ROM timed + * out (a fatal error). The flash book recommeds reading bit 7 again after + * reading bit 5 as a 1. + * + * Returns 0 on success, else non-zero. + */ +static int +qla2x00_poll_flash(scsi_qla_host_t *ha, uint32_t addr, uint8_t poll_data, + uint8_t man_id, uint8_t flash_id) +{ + int status; + uint8_t flash_data; + uint32_t cnt; + + status = 1; + + /* Wait for 30 seconds for command to finish. */ + poll_data &= BIT_7; + for (cnt = 3000000; cnt; cnt--) { + flash_data = qla2x00_read_flash_byte(ha, addr); + if ((flash_data & BIT_7) == poll_data) { + status = 0; + break; + } + + if (man_id != 0x40 && man_id != 0xda) { + if ((flash_data & BIT_5) && cnt > 2) + cnt = 2; + } + udelay(10); + barrier(); + } + return status; +} + +#define IS_OEM_001(ha) \ + ((ha)->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2322 && \ + (ha)->pdev->subsystem_vendor == 0x1028 && \ + (ha)->pdev->subsystem_device == 0x0170) + +/** + * qla2x00_program_flash_address() - Programs a flash address + * @ha: HA context + * @addr: Address in flash to program + * @data: Data to be written in flash + * @man_id: Flash manufacturer ID + * @flash_id: Flash ID + * + * Returns 0 on success, else non-zero. + */ +static int +qla2x00_program_flash_address(scsi_qla_host_t *ha, uint32_t addr, uint8_t data, + uint8_t man_id, uint8_t flash_id) +{ + /* Write Program Command Sequence. */ + if (IS_OEM_001(ha)) { + qla2x00_write_flash_byte(ha, 0xaaa, 0xaa); + qla2x00_write_flash_byte(ha, 0x555, 0x55); + qla2x00_write_flash_byte(ha, 0xaaa, 0xa0); + qla2x00_write_flash_byte(ha, addr, data); + } else { + if (man_id == 0xda && flash_id == 0xc1) { + qla2x00_write_flash_byte(ha, addr, data); + if (addr & 0x7e) + return 0; + } else { + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); + qla2x00_write_flash_byte(ha, 0x5555, 0xa0); + qla2x00_write_flash_byte(ha, addr, data); + } + } + + udelay(150); + + /* Wait for write to complete. */ + return qla2x00_poll_flash(ha, addr, data, man_id, flash_id); +} + +/** + * qla2x00_erase_flash() - Erase the flash. + * @ha: HA context + * @man_id: Flash manufacturer ID + * @flash_id: Flash ID + * + * Returns 0 on success, else non-zero. + */ +static int +qla2x00_erase_flash(scsi_qla_host_t *ha, uint8_t man_id, uint8_t flash_id) +{ + /* Individual Sector Erase Command Sequence */ + if (IS_OEM_001(ha)) { + qla2x00_write_flash_byte(ha, 0xaaa, 0xaa); + qla2x00_write_flash_byte(ha, 0x555, 0x55); + qla2x00_write_flash_byte(ha, 0xaaa, 0x80); + qla2x00_write_flash_byte(ha, 0xaaa, 0xaa); + qla2x00_write_flash_byte(ha, 0x555, 0x55); + qla2x00_write_flash_byte(ha, 0xaaa, 0x10); + } else { + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); + qla2x00_write_flash_byte(ha, 0x5555, 0x80); + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); + qla2x00_write_flash_byte(ha, 0x5555, 0x10); + } + + udelay(150); + + /* Wait for erase to complete. */ + return qla2x00_poll_flash(ha, 0x00, 0x80, man_id, flash_id); +} + +/** + * qla2x00_erase_flash_sector() - Erase a flash sector. + * @ha: HA context + * @addr: Flash sector to erase + * @sec_mask: Sector address mask + * @man_id: Flash manufacturer ID + * @flash_id: Flash ID + * + * Returns 0 on success, else non-zero. + */ +static int +qla2x00_erase_flash_sector(scsi_qla_host_t *ha, uint32_t addr, + uint32_t sec_mask, uint8_t man_id, uint8_t flash_id) +{ + /* Individual Sector Erase Command Sequence */ + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); + qla2x00_write_flash_byte(ha, 0x5555, 0x80); + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); + if (man_id == 0x1f && flash_id == 0x13) + qla2x00_write_flash_byte(ha, addr & sec_mask, 0x10); + else + qla2x00_write_flash_byte(ha, addr & sec_mask, 0x30); + + udelay(150); + + /* Wait for erase to complete. */ + return qla2x00_poll_flash(ha, addr, 0x80, man_id, flash_id); +} + +/** + * qla2x00_get_flash_manufacturer() - Read manufacturer ID from flash chip. + * @man_id: Flash manufacturer ID + * @flash_id: Flash ID + */ +static void +qla2x00_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id, + uint8_t *flash_id) +{ + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); + qla2x00_write_flash_byte(ha, 0x5555, 0x90); + *man_id = qla2x00_read_flash_byte(ha, 0x0000); + *flash_id = qla2x00_read_flash_byte(ha, 0x0001); + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); + qla2x00_write_flash_byte(ha, 0x5555, 0xf0); +} + + +static inline void +qla2x00_suspend_hba(struct scsi_qla_host *ha) +{ + int cnt; + unsigned long flags; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + /* Suspend HBA. */ + scsi_block_requests(ha->host); + ha->isp_ops.disable_intrs(ha); + set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); + + /* Pause RISC. */ + spin_lock_irqsave(&ha->hardware_lock, flags); + WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); + RD_REG_WORD(®->hccr); + if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { + for (cnt = 0; cnt < 30000; cnt++) { + if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) != 0) + break; + udelay(100); + } + } else { + udelay(10); + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +static inline void +qla2x00_resume_hba(struct scsi_qla_host *ha) +{ + /* Resume HBA. */ + clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); + set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); + up(ha->dpc_wait); + qla2x00_wait_for_hba_online(ha); + scsi_unblock_requests(ha->host); +} + +uint8_t * +qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, + uint32_t offset, uint32_t length) +{ + unsigned long flags; + uint32_t addr, midpoint; + uint8_t *data; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + /* Suspend HBA. */ + qla2x00_suspend_hba(ha); + + /* Go with read. */ + spin_lock_irqsave(&ha->hardware_lock, flags); + midpoint = ha->optrom_size / 2; + + qla2x00_flash_enable(ha); + WRT_REG_WORD(®->nvram, 0); + RD_REG_WORD(®->nvram); /* PCI Posting. */ + for (addr = offset, data = buf; addr < length; addr++, data++) { + if (addr == midpoint) { + WRT_REG_WORD(®->nvram, NVR_SELECT); + RD_REG_WORD(®->nvram); /* PCI Posting. */ + } + + *data = qla2x00_read_flash_byte(ha, addr); + } + qla2x00_flash_disable(ha); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + /* Resume HBA. */ + qla2x00_resume_hba(ha); + + return buf; +} + +int +qla2x00_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, + uint32_t offset, uint32_t length) +{ + + int rval; + unsigned long flags; + uint8_t man_id, flash_id, sec_number, data; + uint16_t wd; + uint32_t addr, liter, sec_mask, rest_addr; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + /* Suspend HBA. */ + qla2x00_suspend_hba(ha); + + rval = QLA_SUCCESS; + sec_number = 0; + + /* Reset ISP chip. */ + spin_lock_irqsave(&ha->hardware_lock, flags); + WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); + pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); + + /* Go with write. */ + qla2x00_flash_enable(ha); + do { /* Loop once to provide quick error exit */ + /* Structure of flash memory based on manufacturer */ + if (IS_OEM_001(ha)) { + /* OEM variant with special flash part. */ + man_id = flash_id = 0; + rest_addr = 0xffff; + sec_mask = 0x10000; + goto update_flash; + } + qla2x00_get_flash_manufacturer(ha, &man_id, &flash_id); + switch (man_id) { + case 0x20: /* ST flash. */ + if (flash_id == 0xd2 || flash_id == 0xe3) { + /* + * ST m29w008at part - 64kb sector size with + * 32kb,8kb,8kb,16kb sectors at memory address + * 0xf0000. + */ + rest_addr = 0xffff; + sec_mask = 0x10000; + break; + } + /* + * ST m29w010b part - 16kb sector size + * Default to 16kb sectors + */ + rest_addr = 0x3fff; + sec_mask = 0x1c000; + break; + case 0x40: /* Mostel flash. */ + /* Mostel v29c51001 part - 512 byte sector size. */ + rest_addr = 0x1ff; + sec_mask = 0x1fe00; + break; + case 0xbf: /* SST flash. */ + /* SST39sf10 part - 4kb sector size. */ + rest_addr = 0xfff; + sec_mask = 0x1f000; + break; + case 0xda: /* Winbond flash. */ + /* Winbond W29EE011 part - 256 byte sector size. */ + rest_addr = 0x7f; + sec_mask = 0x1ff80; + break; + case 0xc2: /* Macronix flash. */ + /* 64k sector size. */ + if (flash_id == 0x38 || flash_id == 0x4f) { + rest_addr = 0xffff; + sec_mask = 0x10000; + break; + } + /* Fall through... */ + + case 0x1f: /* Atmel flash. */ + /* 512k sector size. */ + if (flash_id == 0x13) { + rest_addr = 0x7fffffff; + sec_mask = 0x80000000; + break; + } + /* Fall through... */ + + case 0x01: /* AMD flash. */ + if (flash_id == 0x38 || flash_id == 0x40 || + flash_id == 0x4f) { + /* Am29LV081 part - 64kb sector size. */ + /* Am29LV002BT part - 64kb sector size. */ + rest_addr = 0xffff; + sec_mask = 0x10000; + break; + } else if (flash_id == 0x3e) { + /* + * Am29LV008b part - 64kb sector size with + * 32kb,8kb,8kb,16kb sector at memory address + * h0xf0000. + */ + rest_addr = 0xffff; + sec_mask = 0x10000; + break; + } else if (flash_id == 0x20 || flash_id == 0x6e) { + /* + * Am29LV010 part or AM29f010 - 16kb sector + * size. + */ + rest_addr = 0x3fff; + sec_mask = 0x1c000; + break; + } else if (flash_id == 0x6d) { + /* Am29LV001 part - 8kb sector size. */ + rest_addr = 0x1fff; + sec_mask = 0x1e000; + break; + } + default: + /* Default to 16 kb sector size. */ + rest_addr = 0x3fff; + sec_mask = 0x1c000; + break; + } + +update_flash: + if (IS_QLA2322(ha) || IS_QLA6322(ha)) { + if (qla2x00_erase_flash(ha, man_id, flash_id)) { + rval = QLA_FUNCTION_FAILED; + break; + } + } + + for (addr = offset, liter = 0; liter < length; liter++, + addr++) { + data = buf[liter]; + /* Are we at the beginning of a sector? */ + if ((addr & rest_addr) == 0) { + if (IS_QLA2322(ha) || IS_QLA6322(ha)) { + if (addr >= 0x10000UL) { + if (((addr >> 12) & 0xf0) && + ((man_id == 0x01 && + flash_id == 0x3e) || + (man_id == 0x20 && + flash_id == 0xd2))) { + sec_number++; + if (sec_number == 1) { + rest_addr = + 0x7fff; + sec_mask = + 0x18000; + } else if ( + sec_number == 2 || + sec_number == 3) { + rest_addr = + 0x1fff; + sec_mask = + 0x1e000; + } else if ( + sec_number == 4) { + rest_addr = + 0x3fff; + sec_mask = + 0x1c000; + } + } + } + } else if (addr == ha->optrom_size / 2) { + WRT_REG_WORD(®->nvram, NVR_SELECT); + RD_REG_WORD(®->nvram); + } + + if (flash_id == 0xda && man_id == 0xc1) { + qla2x00_write_flash_byte(ha, 0x5555, + 0xaa); + qla2x00_write_flash_byte(ha, 0x2aaa, + 0x55); + qla2x00_write_flash_byte(ha, 0x5555, + 0xa0); + } else if (!IS_QLA2322(ha) && !IS_QLA6322(ha)) { + /* Then erase it */ + if (qla2x00_erase_flash_sector(ha, + addr, sec_mask, man_id, + flash_id)) { + rval = QLA_FUNCTION_FAILED; + break; + } + if (man_id == 0x01 && flash_id == 0x6d) + sec_number++; + } + } + + if (man_id == 0x01 && flash_id == 0x6d) { + if (sec_number == 1 && + addr == (rest_addr - 1)) { + rest_addr = 0x0fff; + sec_mask = 0x1f000; + } else if (sec_number == 3 && (addr & 0x7ffe)) { + rest_addr = 0x3fff; + sec_mask = 0x1c000; + } + } + + if (qla2x00_program_flash_address(ha, addr, data, + man_id, flash_id)) { + rval = QLA_FUNCTION_FAILED; + break; + } + } + } while (0); + qla2x00_flash_disable(ha); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + /* Resume HBA. */ + qla2x00_resume_hba(ha); + + return rval; +} + +uint8_t * +qla24xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, + uint32_t offset, uint32_t length) +{ + /* Suspend HBA. */ + scsi_block_requests(ha->host); + ha->isp_ops.disable_intrs(ha); + set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); + + /* Go with read. */ + qla24xx_read_flash_data(ha, (uint32_t *)buf, offset >> 2, length >> 2); + + /* Resume HBA. */ + clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); + ha->isp_ops.enable_intrs(ha); + scsi_unblock_requests(ha->host); + + return buf; +} + +int +qla24xx_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, + uint32_t offset, uint32_t length) +{ + int rval; + + /* Suspend HBA. */ + scsi_block_requests(ha->host); + ha->isp_ops.disable_intrs(ha); + set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); + + /* Go with write. */ + rval = qla24xx_write_flash_data(ha, (uint32_t *)buf, offset >> 2, + length >> 2); + + /* Resume HBA -- RISC reset needed. */ + clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); + set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); + up(ha->dpc_wait); + qla2x00_wait_for_hba_online(ha); + scsi_unblock_requests(ha->host); + + return rval; +} diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c index c158de2e757d..aceaf56999a5 100644 --- a/drivers/scsi/sata_mv.c +++ b/drivers/scsi/sata_mv.c @@ -1091,6 +1091,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) case ATA_CMD_READ_EXT: case ATA_CMD_WRITE: case ATA_CMD_WRITE_EXT: + case ATA_CMD_WRITE_FUA_EXT: mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); break; #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */ diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c index e124db8e8284..b574379a7a82 100644 --- a/drivers/scsi/sata_vsc.c +++ b/drivers/scsi/sata_vsc.c @@ -81,6 +81,19 @@ /* Port stride */ #define VSC_SATA_PORT_OFFSET 0x200 +/* Error interrupt status bit offsets */ +#define VSC_SATA_INT_ERROR_E_OFFSET 2 +#define VSC_SATA_INT_ERROR_P_OFFSET 4 +#define VSC_SATA_INT_ERROR_T_OFFSET 5 +#define VSC_SATA_INT_ERROR_M_OFFSET 1 +#define is_vsc_sata_int_err(port_idx, int_status) \ + (int_status & ((1 << (VSC_SATA_INT_ERROR_E_OFFSET + (8 * port_idx))) | \ + (1 << (VSC_SATA_INT_ERROR_P_OFFSET + (8 * port_idx))) | \ + (1 << (VSC_SATA_INT_ERROR_T_OFFSET + (8 * port_idx))) | \ + (1 << (VSC_SATA_INT_ERROR_M_OFFSET + (8 * port_idx))) \ + )\ + ) + static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) { @@ -201,13 +214,28 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance, struct ata_port *ap; ap = host_set->ports[i]; + + if (is_vsc_sata_int_err(i, int_status)) { + u32 err_status; + printk(KERN_DEBUG "%s: ignoring interrupt(s)\n", __FUNCTION__); + err_status = ap ? vsc_sata_scr_read(ap, SCR_ERROR) : 0; + vsc_sata_scr_write(ap, SCR_ERROR, err_status); + handled++; + } + if (ap && !(ap->flags & (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) { struct ata_queued_cmd *qc; qc = ata_qc_from_tag(ap, ap->active_tag); - if (qc && (!(qc->tf.ctl & ATA_NIEN))) + if (qc && (!(qc->tf.ctl & ATA_NIEN))) { handled += ata_host_intr(ap, qc); + } else { + printk(KERN_DEBUG "%s: ignoring interrupt(s)\n", __FUNCTION__); + ata_chk_status(ap); + handled++; + } + } } } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 4a602853a98e..4362dcde74af 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -16,6 +16,7 @@ #include <linux/init.h> #include <linux/pci.h> #include <linux/delay.h> +#include <linux/hardirq.h> #include <scsi/scsi.h> #include <scsi/scsi_dbg.h> @@ -2248,3 +2249,61 @@ scsi_target_unblock(struct device *dev) device_for_each_child(dev, NULL, target_unblock); } EXPORT_SYMBOL_GPL(scsi_target_unblock); + + +struct work_queue_work { + struct work_struct work; + void (*fn)(void *); + void *data; +}; + +static void execute_in_process_context_work(void *data) +{ + void (*fn)(void *data); + struct work_queue_work *wqw = data; + + fn = wqw->fn; + data = wqw->data; + + kfree(wqw); + + fn(data); +} + +/** + * scsi_execute_in_process_context - reliably execute the routine with user context + * @fn: the function to execute + * @data: data to pass to the function + * + * Executes the function immediately if process context is available, + * otherwise schedules the function for delayed execution. + * + * Returns: 0 - function was executed + * 1 - function was scheduled for execution + * <0 - error + */ +int scsi_execute_in_process_context(void (*fn)(void *data), void *data) +{ + struct work_queue_work *wqw; + + if (!in_interrupt()) { + fn(data); + return 0; + } + + wqw = kmalloc(sizeof(struct work_queue_work), GFP_ATOMIC); + + if (unlikely(!wqw)) { + printk(KERN_ERR "Failed to allocate memory\n"); + WARN_ON(1); + return -ENOMEM; + } + + INIT_WORK(&wqw->work, execute_in_process_context_work, wqw); + wqw->fn = fn; + wqw->data = data; + schedule_work(&wqw->work); + + return 1; +} +EXPORT_SYMBOL_GPL(scsi_execute_in_process_context); diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 752fb5da3de4..5acb83ca5ae5 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -387,19 +387,12 @@ static struct scsi_target *scsi_alloc_target(struct device *parent, return found_target; } -struct work_queue_wrapper { - struct work_struct work; - struct scsi_target *starget; -}; - -static void scsi_target_reap_work(void *data) { - struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; - struct scsi_target *starget = wqw->starget; +static void scsi_target_reap_usercontext(void *data) +{ + struct scsi_target *starget = data; struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); unsigned long flags; - kfree(wqw); - spin_lock_irqsave(shost->host_lock, flags); if (--starget->reap_ref == 0 && list_empty(&starget->devices)) { @@ -428,18 +421,7 @@ static void scsi_target_reap_work(void *data) { */ void scsi_target_reap(struct scsi_target *starget) { - struct work_queue_wrapper *wqw = - kzalloc(sizeof(struct work_queue_wrapper), GFP_ATOMIC); - - if (!wqw) { - starget_printk(KERN_ERR, starget, - "Failed to allocate memory in scsi_reap_target()\n"); - return; - } - - INIT_WORK(&wqw->work, scsi_target_reap_work, wqw); - wqw->starget = starget; - schedule_work(&wqw->work); + scsi_execute_in_process_context(scsi_target_reap_usercontext, starget); } /** diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index a77b32deaf8f..902a5def8e62 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -217,8 +217,9 @@ static void scsi_device_cls_release(struct class_device *class_dev) put_device(&sdev->sdev_gendev); } -static void scsi_device_dev_release(struct device *dev) +static void scsi_device_dev_release_usercontext(void *data) { + struct device *dev = data; struct scsi_device *sdev; struct device *parent; struct scsi_target *starget; @@ -237,6 +238,7 @@ static void scsi_device_dev_release(struct device *dev) if (sdev->request_queue) { sdev->request_queue->queuedata = NULL; + /* user context needed to free queue */ scsi_free_queue(sdev->request_queue); /* temporary expedient, try to catch use of queue lock * after free of sdev */ @@ -252,6 +254,11 @@ static void scsi_device_dev_release(struct device *dev) put_device(parent); } +static void scsi_device_dev_release(struct device *dev) +{ + scsi_execute_in_process_context(scsi_device_dev_release_usercontext, dev); +} + static struct class sdev_class = { .name = "scsi_device", .release = scsi_device_cls_release, diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 723f7acbeb12..71e54a64adca 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -39,10 +39,6 @@ struct iscsi_internal { struct iscsi_transport *iscsi_transport; struct list_head list; /* - * List of sessions for this transport - */ - struct list_head sessions; - /* * based on transport capabilities, at register time we set these * bits to tell the transport class it wants attributes displayed * in sysfs or that it can support different iSCSI Data-Path @@ -164,9 +160,43 @@ static struct mempool_zone *z_reply; #define Z_MAX_ERROR 16 #define Z_HIWAT_ERROR 12 +static LIST_HEAD(sesslist); +static DEFINE_SPINLOCK(sesslock); static LIST_HEAD(connlist); static DEFINE_SPINLOCK(connlock); +static struct iscsi_cls_session *iscsi_session_lookup(uint64_t handle) +{ + unsigned long flags; + struct iscsi_cls_session *sess; + + spin_lock_irqsave(&sesslock, flags); + list_for_each_entry(sess, &sesslist, sess_list) { + if (sess == iscsi_ptr(handle)) { + spin_unlock_irqrestore(&sesslock, flags); + return sess; + } + } + spin_unlock_irqrestore(&sesslock, flags); + return NULL; +} + +static struct iscsi_cls_conn *iscsi_conn_lookup(uint64_t handle) +{ + unsigned long flags; + struct iscsi_cls_conn *conn; + + spin_lock_irqsave(&connlock, flags); + list_for_each_entry(conn, &connlist, conn_list) { + if (conn == iscsi_ptr(handle)) { + spin_unlock_irqrestore(&connlock, flags); + return conn; + } + } + spin_unlock_irqrestore(&connlock, flags); + return NULL; +} + /* * The following functions can be used by LLDs that allocate * their own scsi_hosts or by software iscsi LLDs @@ -365,6 +395,7 @@ iscsi_transport_create_session(struct scsi_transport_template *scsit, { struct iscsi_cls_session *session; struct Scsi_Host *shost; + unsigned long flags; shost = scsi_host_alloc(transport->host_template, hostdata_privsize(transport)); @@ -389,6 +420,9 @@ iscsi_transport_create_session(struct scsi_transport_template *scsit, goto remove_host; *(unsigned long*)shost->hostdata = (unsigned long)session; + spin_lock_irqsave(&sesslock, flags); + list_add(&session->sess_list, &sesslist); + spin_unlock_irqrestore(&sesslock, flags); return shost; remove_host: @@ -410,9 +444,13 @@ EXPORT_SYMBOL_GPL(iscsi_transport_create_session); int iscsi_transport_destroy_session(struct Scsi_Host *shost) { struct iscsi_cls_session *session; + unsigned long flags; scsi_remove_host(shost); session = hostdata_session(shost->hostdata); + spin_lock_irqsave(&sesslock, flags); + list_del(&session->sess_list); + spin_unlock_irqrestore(&sesslock, flags); iscsi_destroy_session(session); /* ref from host alloc */ scsi_host_put(shost); @@ -424,22 +462,6 @@ EXPORT_SYMBOL_GPL(iscsi_transport_destroy_session); /* * iscsi interface functions */ -static struct iscsi_cls_conn* -iscsi_if_find_conn(uint64_t key) -{ - unsigned long flags; - struct iscsi_cls_conn *conn; - - spin_lock_irqsave(&connlock, flags); - list_for_each_entry(conn, &connlist, conn_list) - if (conn->connh == key) { - spin_unlock_irqrestore(&connlock, flags); - return conn; - } - spin_unlock_irqrestore(&connlock, flags); - return NULL; -} - static struct iscsi_internal * iscsi_if_transport_lookup(struct iscsi_transport *tt) { @@ -504,6 +526,12 @@ mempool_zone_init(unsigned max, unsigned size, unsigned hiwat) if (!zp) return NULL; + zp->size = size; + zp->hiwat = hiwat; + INIT_LIST_HEAD(&zp->freequeue); + spin_lock_init(&zp->freelock); + atomic_set(&zp->allocated, 0); + zp->pool = mempool_create(max, mempool_zone_alloc_skb, mempool_zone_free_skb, zp); if (!zp->pool) { @@ -511,13 +539,6 @@ mempool_zone_init(unsigned max, unsigned size, unsigned hiwat) return NULL; } - zp->size = size; - zp->hiwat = hiwat; - - INIT_LIST_HEAD(&zp->freequeue); - spin_lock_init(&zp->freelock); - atomic_set(&zp->allocated, 0); - return zp; } @@ -559,25 +580,21 @@ iscsi_unicast_skb(struct mempool_zone *zone, struct sk_buff *skb) return 0; } -int iscsi_recv_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr, +int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, char *data, uint32_t data_size) { struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; - struct iscsi_cls_conn *conn; char *pdu; int len = NLMSG_SPACE(sizeof(*ev) + sizeof(struct iscsi_hdr) + data_size); - conn = iscsi_if_find_conn(connh); - BUG_ON(!conn); - mempool_zone_complete(conn->z_pdu); skb = mempool_zone_get_skb(conn->z_pdu); if (!skb) { - iscsi_conn_error(connh, ISCSI_ERR_CONN_FAILED); + iscsi_conn_error(conn, ISCSI_ERR_CONN_FAILED); dev_printk(KERN_ERR, &conn->dev, "iscsi: can not deliver " "control PDU: OOM\n"); return -ENOMEM; @@ -590,7 +607,7 @@ int iscsi_recv_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr, ev->type = ISCSI_KEVENT_RECV_PDU; if (atomic_read(&conn->z_pdu->allocated) >= conn->z_pdu->hiwat) ev->iferror = -ENOMEM; - ev->r.recv_req.conn_handle = connh; + ev->r.recv_req.conn_handle = iscsi_handle(conn); pdu = (char*)ev + sizeof(*ev); memcpy(pdu, hdr, sizeof(struct iscsi_hdr)); memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size); @@ -599,17 +616,13 @@ int iscsi_recv_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr, } EXPORT_SYMBOL_GPL(iscsi_recv_pdu); -void iscsi_conn_error(iscsi_connh_t connh, enum iscsi_err error) +void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error) { struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; - struct iscsi_cls_conn *conn; int len = NLMSG_SPACE(sizeof(*ev)); - conn = iscsi_if_find_conn(connh); - BUG_ON(!conn); - mempool_zone_complete(conn->z_error); skb = mempool_zone_get_skb(conn->z_error); @@ -626,7 +639,7 @@ void iscsi_conn_error(iscsi_connh_t connh, enum iscsi_err error) if (atomic_read(&conn->z_error->allocated) >= conn->z_error->hiwat) ev->iferror = -ENOMEM; ev->r.connerror.error = error; - ev->r.connerror.conn_handle = connh; + ev->r.connerror.conn_handle = iscsi_handle(conn); iscsi_unicast_skb(conn->z_error, skb); @@ -662,8 +675,7 @@ iscsi_if_send_reply(int pid, int seq, int type, int done, int multi, } static int -iscsi_if_get_stats(struct iscsi_transport *transport, struct sk_buff *skb, - struct nlmsghdr *nlh) +iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) { struct iscsi_uevent *ev = NLMSG_DATA(nlh); struct iscsi_stats *stats; @@ -677,7 +689,7 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct sk_buff *skb, ISCSI_STATS_CUSTOM_MAX); int err = 0; - conn = iscsi_if_find_conn(ev->u.get_stats.conn_handle); + conn = iscsi_conn_lookup(ev->u.get_stats.conn_handle); if (!conn) return -EEXIST; @@ -707,14 +719,14 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct sk_buff *skb, ((char*)evstat + sizeof(*evstat)); memset(stats, 0, sizeof(*stats)); - transport->get_stats(ev->u.get_stats.conn_handle, stats); + transport->get_stats(conn, stats); actual_size = NLMSG_SPACE(sizeof(struct iscsi_uevent) + sizeof(struct iscsi_stats) + sizeof(struct iscsi_stats_custom) * stats->custom_length); actual_size -= sizeof(*nlhstat); actual_size = NLMSG_LENGTH(actual_size); - skb_trim(skb, NLMSG_ALIGN(actual_size)); + skb_trim(skbstat, NLMSG_ALIGN(actual_size)); nlhstat->nlmsg_len = actual_size; err = iscsi_unicast_skb(conn->z_pdu, skbstat); @@ -727,58 +739,34 @@ static int iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev) { struct iscsi_transport *transport = priv->iscsi_transport; - struct Scsi_Host *shost; - - if (!transport->create_session) - return -EINVAL; + struct iscsi_cls_session *session; + uint32_t sid; - shost = transport->create_session(&priv->t, - ev->u.c_session.initial_cmdsn); - if (!shost) + session = transport->create_session(&priv->t, + ev->u.c_session.initial_cmdsn, + &sid); + if (!session) return -ENOMEM; - ev->r.c_session_ret.session_handle = iscsi_handle(iscsi_hostdata(shost->hostdata)); - ev->r.c_session_ret.sid = shost->host_no; + ev->r.c_session_ret.session_handle = iscsi_handle(session); + ev->r.c_session_ret.sid = sid; return 0; } static int -iscsi_if_destroy_session(struct iscsi_internal *priv, struct iscsi_uevent *ev) +iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev) { - struct iscsi_transport *transport = priv->iscsi_transport; - - struct Scsi_Host *shost; - - if (!transport->destroy_session) - return -EINVAL; - - shost = scsi_host_lookup(ev->u.d_session.sid); - if (shost == ERR_PTR(-ENXIO)) - return -EEXIST; - - if (transport->destroy_session) - transport->destroy_session(shost); - /* ref from host lookup */ - scsi_host_put(shost); - return 0; -} - -static int -iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev){ - struct Scsi_Host *shost; struct iscsi_cls_conn *conn; + struct iscsi_cls_session *session; unsigned long flags; - if (!transport->create_conn) + session = iscsi_session_lookup(ev->u.c_conn.session_handle); + if (!session) return -EINVAL; - shost = scsi_host_lookup(ev->u.c_conn.sid); - if (shost == ERR_PTR(-ENXIO)) - return -EEXIST; - - conn = transport->create_conn(shost, ev->u.c_conn.cid); + conn = transport->create_conn(session, ev->u.c_conn.cid); if (!conn) - goto release_ref; + return -ENOMEM; conn->z_pdu = mempool_zone_init(Z_MAX_PDU, NLMSG_SPACE(sizeof(struct iscsi_uevent) + @@ -800,14 +788,13 @@ iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev) goto free_pdu_pool; } - ev->r.handle = conn->connh = iscsi_handle(conn->dd_data); + ev->r.handle = iscsi_handle(conn); spin_lock_irqsave(&connlock, flags); list_add(&conn->conn_list, &connlist); conn->active = 1; spin_unlock_irqrestore(&connlock, flags); - scsi_host_put(shost); return 0; free_pdu_pool: @@ -815,8 +802,6 @@ free_pdu_pool: destroy_conn: if (transport->destroy_conn) transport->destroy_conn(conn->dd_data); -release_ref: - scsi_host_put(shost); return -ENOMEM; } @@ -827,13 +812,9 @@ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev struct iscsi_cls_conn *conn; struct mempool_zone *z_error, *z_pdu; - conn = iscsi_if_find_conn(ev->u.d_conn.conn_handle); + conn = iscsi_conn_lookup(ev->u.d_conn.conn_handle); if (!conn) - return -EEXIST; - - if (!transport->destroy_conn) return -EINVAL; - spin_lock_irqsave(&connlock, flags); conn->active = 0; list_del(&conn->conn_list); @@ -858,23 +839,27 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) struct iscsi_uevent *ev = NLMSG_DATA(nlh); struct iscsi_transport *transport = NULL; struct iscsi_internal *priv; - - if (NETLINK_CREDS(skb)->uid) - return -EPERM; + struct iscsi_cls_session *session; + struct iscsi_cls_conn *conn; priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); if (!priv) return -EINVAL; transport = priv->iscsi_transport; - daemon_pid = NETLINK_CREDS(skb)->pid; + if (!try_module_get(transport->owner)) + return -EINVAL; switch (nlh->nlmsg_type) { case ISCSI_UEVENT_CREATE_SESSION: err = iscsi_if_create_session(priv, ev); break; case ISCSI_UEVENT_DESTROY_SESSION: - err = iscsi_if_destroy_session(priv, ev); + session = iscsi_session_lookup(ev->u.d_session.session_handle); + if (session) + transport->destroy_session(session); + else + err = -EINVAL; break; case ISCSI_UEVENT_CREATE_CONN: err = iscsi_if_create_conn(transport, ev); @@ -883,56 +868,64 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) err = iscsi_if_destroy_conn(transport, ev); break; case ISCSI_UEVENT_BIND_CONN: - if (!iscsi_if_find_conn(ev->u.b_conn.conn_handle)) - return -EEXIST; - ev->r.retcode = transport->bind_conn( - ev->u.b_conn.session_handle, - ev->u.b_conn.conn_handle, - ev->u.b_conn.transport_fd, - ev->u.b_conn.is_leading); + session = iscsi_session_lookup(ev->u.b_conn.session_handle); + conn = iscsi_conn_lookup(ev->u.b_conn.conn_handle); + + if (session && conn) + ev->r.retcode = transport->bind_conn(session, conn, + ev->u.b_conn.transport_fd, + ev->u.b_conn.is_leading); + else + err = -EINVAL; break; case ISCSI_UEVENT_SET_PARAM: - if (!iscsi_if_find_conn(ev->u.set_param.conn_handle)) - return -EEXIST; - ev->r.retcode = transport->set_param( - ev->u.set_param.conn_handle, - ev->u.set_param.param, ev->u.set_param.value); + conn = iscsi_conn_lookup(ev->u.set_param.conn_handle); + if (conn) + ev->r.retcode = transport->set_param(conn, + ev->u.set_param.param, ev->u.set_param.value); + else + err = -EINVAL; break; case ISCSI_UEVENT_START_CONN: - if (!iscsi_if_find_conn(ev->u.start_conn.conn_handle)) - return -EEXIST; - ev->r.retcode = transport->start_conn( - ev->u.start_conn.conn_handle); + conn = iscsi_conn_lookup(ev->u.start_conn.conn_handle); + if (conn) + ev->r.retcode = transport->start_conn(conn); + else + err = -EINVAL; + break; case ISCSI_UEVENT_STOP_CONN: - if (!iscsi_if_find_conn(ev->u.stop_conn.conn_handle)) - return -EEXIST; - transport->stop_conn(ev->u.stop_conn.conn_handle, - ev->u.stop_conn.flag); + conn = iscsi_conn_lookup(ev->u.stop_conn.conn_handle); + if (conn) + transport->stop_conn(conn, ev->u.stop_conn.flag); + else + err = -EINVAL; break; case ISCSI_UEVENT_SEND_PDU: - if (!iscsi_if_find_conn(ev->u.send_pdu.conn_handle)) - return -EEXIST; - ev->r.retcode = transport->send_pdu( - ev->u.send_pdu.conn_handle, - (struct iscsi_hdr*)((char*)ev + sizeof(*ev)), - (char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size, - ev->u.send_pdu.data_size); + conn = iscsi_conn_lookup(ev->u.send_pdu.conn_handle); + if (conn) + ev->r.retcode = transport->send_pdu(conn, + (struct iscsi_hdr*)((char*)ev + sizeof(*ev)), + (char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size, + ev->u.send_pdu.data_size); + else + err = -EINVAL; break; case ISCSI_UEVENT_GET_STATS: - err = iscsi_if_get_stats(transport, skb, nlh); + err = iscsi_if_get_stats(transport, nlh); break; default: err = -EINVAL; break; } + module_put(transport->owner); return err; } /* Get message from skb (based on rtnetlink_rcv_skb). Each message is * processed by iscsi_if_recv_msg. Malformed skbs with wrong length are - * discarded silently. */ + * or invalid creds discarded silently. */ static void iscsi_if_rx(struct sock *sk, int len) { @@ -940,6 +933,12 @@ iscsi_if_rx(struct sock *sk, int len) mutex_lock(&rx_queue_mutex); while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { + if (NETLINK_CREDS(skb)->uid) { + skb_pull(skb, skb->len); + goto free_skb; + } + daemon_pid = NETLINK_CREDS(skb)->pid; + while (skb->len >= NLMSG_SPACE(0)) { int err; uint32_t rlen; @@ -951,10 +950,12 @@ iscsi_if_rx(struct sock *sk, int len) skb->len < nlh->nlmsg_len) { break; } + ev = NLMSG_DATA(nlh); rlen = NLMSG_ALIGN(nlh->nlmsg_len); if (rlen > skb->len) rlen = skb->len; + err = iscsi_if_recv_msg(skb, nlh); if (err) { ev->type = ISCSI_KEVENT_IF_ERROR; @@ -978,6 +979,7 @@ iscsi_if_rx(struct sock *sk, int len) } while (err < 0 && err != -ECONNREFUSED); skb_pull(skb, rlen); } +free_skb: kfree_skb(skb); } mutex_unlock(&rx_queue_mutex); @@ -997,7 +999,7 @@ show_conn_int_param_##param(struct class_device *cdev, char *buf) \ struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \ struct iscsi_transport *t = conn->transport; \ \ - t->get_conn_param(conn->dd_data, param, &value); \ + t->get_conn_param(conn, param, &value); \ return snprintf(buf, 20, format"\n", value); \ } @@ -1024,10 +1026,9 @@ show_session_int_param_##param(struct class_device *cdev, char *buf) \ { \ uint32_t value = 0; \ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \ - struct Scsi_Host *shost = iscsi_session_to_shost(session); \ struct iscsi_transport *t = session->transport; \ \ - t->get_session_param(shost, param, &value); \ + t->get_session_param(session, param, &value); \ return snprintf(buf, 20, format"\n", value); \ } @@ -1121,7 +1122,6 @@ iscsi_register_transport(struct iscsi_transport *tt) return NULL; memset(priv, 0, sizeof(*priv)); INIT_LIST_HEAD(&priv->list); - INIT_LIST_HEAD(&priv->sessions); priv->iscsi_transport = tt; priv->cdev.class = &iscsi_transport_class; diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index 8260f040d39c..f4854c33f48d 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c @@ -3588,7 +3588,7 @@ static int sym_evaluate_dp(struct sym_hcb *np, struct sym_ccb *cp, u32 scr, int if (pm) { dp_scr = scr_to_cpu(pm->ret); - dp_ofs -= scr_to_cpu(pm->sg.size); + dp_ofs -= scr_to_cpu(pm->sg.size) & 0x00ffffff; } /* diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index b1fc97d5f643..244e8ff11977 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c @@ -2198,7 +2198,7 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count) touch_nmi_watchdog(); /* - * First save the UER then disable the interrupts + * First save the IER then disable the interrupts */ ier = serial_in(up, UART_IER); diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index 0f4361c8466b..b3c561abe3f6 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig @@ -902,8 +902,8 @@ config SERIAL_JSM something like this to connect more than two modems to your Linux box, for instance in order to become a dial-in server. This driver supports PCI boards only. - If you have a card like this, say Y here and read the file - <file:Documentation/jsm.txt>. + + If you have a card like this, say Y here, otherwise say N. To compile this driver as a module, choose M here: the module will be called jsm. diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index e9e5bc178cef..118288d94423 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -191,8 +191,9 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev) } if (wait_time <= 0) printk(KERN_WARNING "%s %s: BIOS handoff " - "failed (BIOS bug ?)\n", - pdev->dev.bus_id, "OHCI"); + "failed (BIOS bug ?) %08x\n", + pdev->dev.bus_id, "OHCI", + readl(base + OHCI_CONTROL)); /* reset controller, preserving RWC */ writel(control & OHCI_CTRL_RWC, base + OHCI_CONTROL); @@ -243,6 +244,12 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev) pr_debug("%s %s: BIOS handoff\n", pdev->dev.bus_id, "EHCI"); +#if 0 +/* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on, + * but that seems dubious in general (the BIOS left it off intentionally) + * and is known to prevent some systems from booting. so we won't do this + * unless maybe we can determine when we're on a system that needs SMI forced. + */ /* BIOS workaround (?): be sure the * pre-Linux code receives the SMI */ @@ -252,6 +259,7 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev) pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, val | EHCI_USBLEGCTLSTS_SOOE); +#endif } /* always say Linux will own the hardware @@ -274,8 +282,8 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev) * it down, and hope nothing goes too wrong */ printk(KERN_WARNING "%s %s: BIOS handoff " - "failed (BIOS bug ?)\n", - pdev->dev.bus_id, "EHCI"); + "failed (BIOS bug ?) %08x\n", + pdev->dev.bus_id, "EHCI", cap); pci_write_config_byte(pdev, offset + 2, 0); } diff --git a/drivers/usb/host/sl811_cs.c b/drivers/usb/host/sl811_cs.c index 466384d7c79f..134d2000128a 100644 --- a/drivers/usb/host/sl811_cs.c +++ b/drivers/usb/host/sl811_cs.c @@ -101,7 +101,7 @@ static struct resource resources[] = { }, }; -extern struct device_driver sl811h_driver; +extern struct platform_driver sl811h_driver; static struct platform_device platform_dev = { .id = -1, @@ -132,7 +132,7 @@ static int sl811_hc_init(struct device *parent, ioaddr_t base_addr, int irq) * initialized already because of the link order dependency created * by referencing "sl811h_driver". */ - platform_dev.name = sl811h_driver.name; + platform_dev.name = sl811h_driver.driver.name; return platform_device_register(&platform_dev); } diff --git a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c index 6f7a684c3e07..772478086bd3 100644 --- a/drivers/usb/input/hid-core.c +++ b/drivers/usb/input/hid-core.c @@ -1435,17 +1435,20 @@ void hid_init_reports(struct hid_device *hid) #define USB_DEVICE_ID_VERNIER_CYCLOPS 0x0004 #define USB_VENDOR_ID_LD 0x0f11 -#define USB_DEVICE_ID_CASSY 0x1000 -#define USB_DEVICE_ID_POCKETCASSY 0x1010 -#define USB_DEVICE_ID_MOBILECASSY 0x1020 -#define USB_DEVICE_ID_JWM 0x1080 -#define USB_DEVICE_ID_DMMP 0x1081 -#define USB_DEVICE_ID_UMIP 0x1090 -#define USB_DEVICE_ID_VIDEOCOM 0x1200 -#define USB_DEVICE_ID_COM3LAB 0x2000 -#define USB_DEVICE_ID_TELEPORT 0x2010 -#define USB_DEVICE_ID_NETWORKANALYSER 0x2020 -#define USB_DEVICE_ID_POWERCONTROL 0x2030 +#define USB_DEVICE_ID_LD_CASSY 0x1000 +#define USB_DEVICE_ID_LD_POCKETCASSY 0x1010 +#define USB_DEVICE_ID_LD_MOBILECASSY 0x1020 +#define USB_DEVICE_ID_LD_JWM 0x1080 +#define USB_DEVICE_ID_LD_DMMP 0x1081 +#define USB_DEVICE_ID_LD_UMIP 0x1090 +#define USB_DEVICE_ID_LD_XRAY1 0x1100 +#define USB_DEVICE_ID_LD_XRAY2 0x1101 +#define USB_DEVICE_ID_LD_VIDEOCOM 0x1200 +#define USB_DEVICE_ID_LD_COM3LAB 0x2000 +#define USB_DEVICE_ID_LD_TELEPORT 0x2010 +#define USB_DEVICE_ID_LD_NETWORKANALYSER 0x2020 +#define USB_DEVICE_ID_LD_POWERCONTROL 0x2030 +#define USB_DEVICE_ID_LD_MACHINETEST 0x2040 #define USB_VENDOR_ID_APPLE 0x05ac #define USB_DEVICE_ID_APPLE_POWERMOUSE 0x0304 @@ -1491,17 +1494,20 @@ static const struct hid_blacklist { { USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_POWERMATE, HID_QUIRK_IGNORE }, { USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_SOUNDKNOB, HID_QUIRK_IGNORE }, { USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO, HID_QUIRK_IGNORE }, - { USB_VENDOR_ID_LD, USB_DEVICE_ID_CASSY, HID_QUIRK_IGNORE }, - { USB_VENDOR_ID_LD, USB_DEVICE_ID_POCKETCASSY, HID_QUIRK_IGNORE }, - { USB_VENDOR_ID_LD, USB_DEVICE_ID_MOBILECASSY, HID_QUIRK_IGNORE }, - { USB_VENDOR_ID_LD, USB_DEVICE_ID_JWM, HID_QUIRK_IGNORE }, - { USB_VENDOR_ID_LD, USB_DEVICE_ID_DMMP, HID_QUIRK_IGNORE }, - { USB_VENDOR_ID_LD, USB_DEVICE_ID_UMIP, HID_QUIRK_IGNORE }, - { USB_VENDOR_ID_LD, USB_DEVICE_ID_VIDEOCOM, HID_QUIRK_IGNORE }, - { USB_VENDOR_ID_LD, USB_DEVICE_ID_COM3LAB, HID_QUIRK_IGNORE }, - { USB_VENDOR_ID_LD, USB_DEVICE_ID_TELEPORT, HID_QUIRK_IGNORE }, - { USB_VENDOR_ID_LD, USB_DEVICE_ID_NETWORKANALYSER, HID_QUIRK_IGNORE }, - { USB_VENDOR_ID_LD, USB_DEVICE_ID_POWERCONTROL, HID_QUIRK_IGNORE }, + { USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY, HID_QUIRK_IGNORE }, + { USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY, HID_QUIRK_IGNORE }, + { USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY, HID_QUIRK_IGNORE }, + { USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM, HID_QUIRK_IGNORE }, + { USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP, HID_QUIRK_IGNORE }, + { USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP, HID_QUIRK_IGNORE }, + { USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY1, HID_QUIRK_IGNORE }, + { USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY2, HID_QUIRK_IGNORE }, + { USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_VIDEOCOM, HID_QUIRK_IGNORE }, + { USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_COM3LAB, HID_QUIRK_IGNORE }, + { USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_TELEPORT, HID_QUIRK_IGNORE }, + { USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_NETWORKANALYSER, HID_QUIRK_IGNORE }, + { USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERCONTROL, HID_QUIRK_IGNORE }, + { USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETEST, HID_QUIRK_IGNORE }, { USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS, HID_QUIRK_IGNORE }, { USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS, HID_QUIRK_IGNORE }, { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_IGNORE }, diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig index 6649531fa824..8ba6a701e9c1 100644 --- a/drivers/usb/misc/Kconfig +++ b/drivers/usb/misc/Kconfig @@ -141,7 +141,7 @@ source "drivers/usb/misc/sisusbvga/Kconfig" config USB_LD tristate "USB LD driver" - depends on USB && EXPERIMENTAL + depends on USB help This driver is for generic USB devices that use interrupt transfers, like LD Didactic's USB devices. diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c index 331d4ae949ed..e2d1198623eb 100644 --- a/drivers/usb/misc/ldusb.c +++ b/drivers/usb/misc/ldusb.c @@ -24,6 +24,7 @@ * V0.1 (mh) Initial version * V0.11 (mh) Added raw support for HID 1.0 devices (no interrupt out endpoint) * V0.12 (mh) Added kmalloc check for string buffer + * V0.13 (mh) Added support for LD X-Ray and Machine Test System */ #include <linux/config.h> @@ -40,17 +41,20 @@ /* Define these values to match your devices */ #define USB_VENDOR_ID_LD 0x0f11 /* USB Vendor ID of LD Didactic GmbH */ -#define USB_DEVICE_ID_CASSY 0x1000 /* USB Product ID for all CASSY-S modules */ -#define USB_DEVICE_ID_POCKETCASSY 0x1010 /* USB Product ID for Pocket-CASSY */ -#define USB_DEVICE_ID_MOBILECASSY 0x1020 /* USB Product ID for Mobile-CASSY */ -#define USB_DEVICE_ID_JWM 0x1080 /* USB Product ID for Joule and Wattmeter */ -#define USB_DEVICE_ID_DMMP 0x1081 /* USB Product ID for Digital Multimeter P (reserved) */ -#define USB_DEVICE_ID_UMIP 0x1090 /* USB Product ID for UMI P */ -#define USB_DEVICE_ID_VIDEOCOM 0x1200 /* USB Product ID for VideoCom */ -#define USB_DEVICE_ID_COM3LAB 0x2000 /* USB Product ID for COM3LAB */ -#define USB_DEVICE_ID_TELEPORT 0x2010 /* USB Product ID for Terminal Adapter */ -#define USB_DEVICE_ID_NETWORKANALYSER 0x2020 /* USB Product ID for Network Analyser */ -#define USB_DEVICE_ID_POWERCONTROL 0x2030 /* USB Product ID for Controlling device for Power Electronics */ +#define USB_DEVICE_ID_LD_CASSY 0x1000 /* USB Product ID of CASSY-S */ +#define USB_DEVICE_ID_LD_POCKETCASSY 0x1010 /* USB Product ID of Pocket-CASSY */ +#define USB_DEVICE_ID_LD_MOBILECASSY 0x1020 /* USB Product ID of Mobile-CASSY */ +#define USB_DEVICE_ID_LD_JWM 0x1080 /* USB Product ID of Joule and Wattmeter */ +#define USB_DEVICE_ID_LD_DMMP 0x1081 /* USB Product ID of Digital Multimeter P (reserved) */ +#define USB_DEVICE_ID_LD_UMIP 0x1090 /* USB Product ID of UMI P */ +#define USB_DEVICE_ID_LD_XRAY1 0x1100 /* USB Product ID of X-Ray Apparatus */ +#define USB_DEVICE_ID_LD_XRAY2 0x1101 /* USB Product ID of X-Ray Apparatus */ +#define USB_DEVICE_ID_LD_VIDEOCOM 0x1200 /* USB Product ID of VideoCom */ +#define USB_DEVICE_ID_LD_COM3LAB 0x2000 /* USB Product ID of COM3LAB */ +#define USB_DEVICE_ID_LD_TELEPORT 0x2010 /* USB Product ID of Terminal Adapter */ +#define USB_DEVICE_ID_LD_NETWORKANALYSER 0x2020 /* USB Product ID of Network Analyser */ +#define USB_DEVICE_ID_LD_POWERCONTROL 0x2030 /* USB Product ID of Converter Control Unit */ +#define USB_DEVICE_ID_LD_MACHINETEST 0x2040 /* USB Product ID of Machine Test System */ #define USB_VENDOR_ID_VERNIER 0x08f7 #define USB_DEVICE_ID_VERNIER_LABPRO 0x0001 @@ -67,17 +71,20 @@ /* table of devices that work with this driver */ static struct usb_device_id ld_usb_table [] = { - { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_CASSY) }, - { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_POCKETCASSY) }, - { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_MOBILECASSY) }, - { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_JWM) }, - { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_DMMP) }, - { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_UMIP) }, - { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_VIDEOCOM) }, - { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_COM3LAB) }, - { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_TELEPORT) }, - { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_NETWORKANALYSER) }, - { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_POWERCONTROL) }, + { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) }, + { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) }, + { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY) }, + { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) }, + { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) }, + { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) }, + { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY1) }, + { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY2) }, + { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_VIDEOCOM) }, + { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_COM3LAB) }, + { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_TELEPORT) }, + { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_NETWORKANALYSER) }, + { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERCONTROL) }, + { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETEST) }, { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LABPRO) }, { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) }, { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) }, @@ -85,7 +92,7 @@ static struct usb_device_id ld_usb_table [] = { { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, ld_usb_table); -MODULE_VERSION("V0.12"); +MODULE_VERSION("V0.13"); MODULE_AUTHOR("Michael Hund <mhund@ld-didactic.de>"); MODULE_DESCRIPTION("LD USB Driver"); MODULE_LICENSE("GPL"); @@ -632,8 +639,8 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id * /* workaround for early firmware versions on fast computers */ if ((le16_to_cpu(udev->descriptor.idVendor) == USB_VENDOR_ID_LD) && - ((le16_to_cpu(udev->descriptor.idProduct) == USB_DEVICE_ID_CASSY) || - (le16_to_cpu(udev->descriptor.idProduct) == USB_DEVICE_ID_COM3LAB)) && + ((le16_to_cpu(udev->descriptor.idProduct) == USB_DEVICE_ID_LD_CASSY) || + (le16_to_cpu(udev->descriptor.idProduct) == USB_DEVICE_ID_LD_COM3LAB)) && (le16_to_cpu(udev->descriptor.bcdDevice) <= 0x103)) { buffer = kmalloc(256, GFP_KERNEL); if (buffer == NULL) { diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index e8e575e037c1..37c81c08faad 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -73,9 +73,10 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X65) }, { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X75) }, { USB_DEVICE(SYNTECH_VENDOR_ID, SYNTECH_PRODUCT_ID) }, - { USB_DEVICE(NOKIA_CA42_VENDOR_ID, NOKIA_CA42_PRODUCT_ID ) }, - { USB_DEVICE(CA_42_CA42_VENDOR_ID, CA_42_CA42_PRODUCT_ID ) }, + { USB_DEVICE(NOKIA_CA42_VENDOR_ID, NOKIA_CA42_PRODUCT_ID) }, + { USB_DEVICE(CA_42_CA42_VENDOR_ID, CA_42_CA42_PRODUCT_ID) }, { USB_DEVICE(SAGEM_VENDOR_ID, SAGEM_PRODUCT_ID) }, + { USB_DEVICE(LEADTEK_VENDOR_ID, LEADTEK_9531_PRODUCT_ID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 1807087a76e3..9bc4755162ad 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -71,3 +71,7 @@ #define SAGEM_VENDOR_ID 0x079b #define SAGEM_PRODUCT_ID 0x0027 + +/* Leadtek GPS 9531 (ID 0413:2101) */ +#define LEADTEK_VENDOR_ID 0x0413 +#define LEADTEK_9531_PRODUCT_ID 0x2101 diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index ee958f986eb8..e71c5ca1a07b 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -106,6 +106,13 @@ UNUSUAL_DEV( 0x0411, 0x001c, 0x0113, 0x0113, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_FIX_INQUIRY ), +/* Reported by Christian Leber <christian@leber.de> */ +UNUSUAL_DEV( 0x0419, 0xaaf5, 0x0100, 0x0100, + "TrekStor", + "i.Beat 115 2.0", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_IGNORE_RESIDUE | US_FL_NOT_LOCKABLE ), + /* Reported by Stefan Werner <dustbln@gmx.de> */ UNUSUAL_DEV( 0x0419, 0xaaf6, 0x0100, 0x0100, "TrekStor", @@ -127,6 +134,14 @@ UNUSUAL_DEV( 0x0436, 0x0005, 0x0100, 0x0100, US_SC_SCSI, US_PR_DPCM_USB, NULL, 0 ), #endif +/* Patch submitted by Daniel Drake <dsd@gentoo.org> + * Device reports nonsense bInterfaceProtocol 6 when connected over USB2 */ +UNUSUAL_DEV( 0x0451, 0x5416, 0x0100, 0x0100, + "Neuros Audio", + "USB 2.0 HD 2.5", + US_SC_DEVICE, US_PR_BULK, NULL, + US_FL_NEED_OVERRIDE ), + /* * Pete Zaitcev <zaitcev@yahoo.com>, from Patrick C. F. Ernzer, bz#162559. * The key does not actually break, but it returns zero sense which @@ -137,13 +152,16 @@ UNUSUAL_DEV( 0x0457, 0x0150, 0x0100, 0x0100, "USB Mass Storage Device", US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ), -/* Patch submitted by Daniel Drake <dsd@gentoo.org> - * Device reports nonsense bInterfaceProtocol 6 when connected over USB2 */ -UNUSUAL_DEV( 0x0451, 0x5416, 0x0100, 0x0100, - "Neuros Audio", - "USB 2.0 HD 2.5", - US_SC_DEVICE, US_PR_BULK, NULL, - US_FL_NEED_OVERRIDE ), +/* +* Bohdan Linda <bohdan.linda@gmail.com> +* 1GB USB sticks MyFlash High Speed. I have restricted +* the revision to my model only +*/ +UNUSUAL_DEV( 0x0457, 0x0151, 0x0100, 0x0100, + "USB 2.0", + "Flash Disk", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_NOT_LOCKABLE ), UNUSUAL_DEV( 0x045a, 0x5210, 0x0101, 0x0101, "Rio", @@ -946,6 +964,12 @@ UNUSUAL_DEV( 0x084d, 0x0011, 0x0110, 0x0110, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_BULK32), +/* Submitted by Jan De Luyck <lkml@kcore.org> */ +UNUSUAL_DEV( 0x08bd, 0x1100, 0x0000, 0x0000, + "CITIZEN", + "X1DE-USB", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_SINGLE_LUN), /* Entry needed for flags. Moreover, all devices with this ID use * bulk-only transport, but _some_ falsely report Control/Bulk instead. @@ -1085,6 +1109,13 @@ UNUSUAL_DEV( 0x0dda, 0x0301, 0x0012, 0x0012, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_IGNORE_RESIDUE ), +/* Reported by Jim McCloskey <mcclosk@ucsc.edu> */ +UNUSUAL_DEV( 0x0e21, 0x0520, 0x0100, 0x0100, + "Cowon Systems", + "iAUDIO M5", + US_SC_DEVICE, US_PR_BULK, NULL, + 0 ), + /* Submitted by Antoine Mairesse <antoine.mairesse@free.fr> */ UNUSUAL_DEV( 0x0ed1, 0x6660, 0x0100, 0x0300, "USB", @@ -1162,6 +1193,13 @@ UNUSUAL_DEV( 0x55aa, 0xa103, 0x0000, 0x9999, US_FL_SINGLE_LUN), #endif +/* Reported by Andrew Simmons <andrew.simmons@gmail.com> */ +UNUSUAL_DEV( 0xed06, 0x4500, 0x0001, 0x0001, + "DataStor", + "USB4500 FW1.04", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_FIX_CAPACITY), + /* Control/Bulk transport for all SubClass values */ USUAL_DEV(US_SC_RBC, US_PR_CB, USB_US_TYPE_STOR), USUAL_DEV(US_SC_8020, US_PR_CB, USB_US_TYPE_STOR), diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c index 38d22729b129..c9a7cdf6d543 100644 --- a/drivers/video/gbefb.c +++ b/drivers/video/gbefb.c @@ -1243,7 +1243,7 @@ static int __devexit gbefb_remove(struct platform_device* p_dev) (void *)gbe_tiles.cpu, gbe_tiles.dma); release_mem_region(GBE_BASE, sizeof(struct sgi_gbe)); iounmap(gbe); - gbefb_remove_sysfs(dev); + gbefb_remove_sysfs(&p_dev->dev); framebuffer_release(info); return 0; diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c index 747602aa5615..a2e201dc40f7 100644 --- a/drivers/video/neofb.c +++ b/drivers/video/neofb.c @@ -843,6 +843,9 @@ static int neofb_set_par(struct fb_info *info) par->SysIfaceCntl2 = 0xc0; /* VESA Bios sets this to 0x80! */ + /* Initialize: by default, we want display config register to be read */ + par->PanelDispCntlRegRead = 1; + /* Enable any user specified display devices. */ par->PanelDispCntlReg1 = 0x00; if (par->internal_display) @@ -1334,6 +1337,18 @@ static int neofb_blank(int blank_mode, struct fb_info *info) struct neofb_par *par = info->par; int seqflags, lcdflags, dpmsflags, reg; + + /* + * Reload the value stored in the register, if sensible. It might have + * been changed via FN keystroke. + */ + if (par->PanelDispCntlRegRead) { + neoUnlock(); + par->PanelDispCntlReg1 = vga_rgfx(NULL, 0x20) & 0x03; + neoLock(&par->state); + } + par->PanelDispCntlRegRead = !blank_mode; + switch (blank_mode) { case FB_BLANK_POWERDOWN: /* powerdown - both sync lines down */ seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */ @@ -1366,7 +1381,7 @@ static int neofb_blank(int blank_mode, struct fb_info *info) case FB_BLANK_NORMAL: /* just blank screen (backlight stays on) */ seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */ lcdflags = par->PanelDispCntlReg1 & 0x02; /* LCD normal */ - dpmsflags = 0; /* no hsync/vsync suppression */ + dpmsflags = 0x00; /* no hsync/vsync suppression */ break; case FB_BLANK_UNBLANK: /* unblank */ seqflags = 0; /* Enable sequencer */ diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c index d574dd3c9c8a..9451932fbaf2 100644 --- a/drivers/video/s3c2410fb.c +++ b/drivers/video/s3c2410fb.c @@ -82,7 +82,6 @@ #include <linux/fb.h> #include <linux/init.h> #include <linux/dma-mapping.h> -#include <linux/string.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/wait.h> diff --git a/fs/cifs/file.c b/fs/cifs/file.c index d17c97d07c80..675bd2568297 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1442,13 +1442,15 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data, &bytes_read, &smb_read_data, &buf_type); pSMBr = (struct smb_com_read_rsp *)smb_read_data; - if (copy_to_user(current_offset, - smb_read_data + 4 /* RFC1001 hdr */ - + le16_to_cpu(pSMBr->DataOffset), - bytes_read)) { - rc = -EFAULT; - } if (smb_read_data) { + if (copy_to_user(current_offset, + smb_read_data + + 4 /* RFC1001 length field */ + + le16_to_cpu(pSMBr->DataOffset), + bytes_read)) { + rc = -EFAULT; + } + if(buf_type == CIFS_SMALL_BUFFER) cifs_small_buf_release(smb_read_data); else if(buf_type == CIFS_LARGE_BUFFER) diff --git a/fs/compat.c b/fs/compat.c index a2ba78bdf7f7..5333c7d7427f 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -1757,7 +1757,7 @@ asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, goto sticky; rtv.tv_usec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)); rtv.tv_sec = timeout; - if (compat_timeval_compare(&rtv, &tv) < 0) + if (compat_timeval_compare(&rtv, &tv) >= 0) rtv = tv; if (copy_to_user(tvp, &rtv, sizeof(rtv))) { sticky: @@ -1834,7 +1834,7 @@ asmlinkage long compat_sys_pselect7(int n, compat_ulong_t __user *inp, rts.tv_sec++; rts.tv_nsec -= NSEC_PER_SEC; } - if (compat_timespec_compare(&rts, &ts) < 0) + if (compat_timespec_compare(&rts, &ts) >= 0) rts = ts; copy_to_user(tsp, &rts, sizeof(rts)); } @@ -1934,7 +1934,7 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, rts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) * 1000; rts.tv_sec = timeout; - if (compat_timespec_compare(&rts, &ts) < 0) + if (compat_timespec_compare(&rts, &ts) >= 0) rts = ts; if (copy_to_user(tsp, &rts, sizeof(rts))) { sticky: diff --git a/fs/exec.c b/fs/exec.c index 055378d2513e..0e1c95074d42 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1403,7 +1403,7 @@ static void zap_threads (struct mm_struct *mm) do_each_thread(g,p) { if (mm == p->mm && p != tsk && p->ptrace && p->parent->mm == mm) { - __ptrace_unlink(p); + __ptrace_detach(p, 0); } } while_each_thread(g,p); write_unlock_irq(&tasklist_lock); diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c index a2ca3107d475..86ae8e93adb9 100644 --- a/fs/ext2/xattr.c +++ b/fs/ext2/xattr.c @@ -792,18 +792,20 @@ ext2_xattr_delete_inode(struct inode *inode) ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1); get_bh(bh); bforget(bh); + unlock_buffer(bh); } else { HDR(bh)->h_refcount = cpu_to_le32( le32_to_cpu(HDR(bh)->h_refcount) - 1); if (ce) mb_cache_entry_release(ce); + ea_bdebug(bh, "refcount now=%d", + le32_to_cpu(HDR(bh)->h_refcount)); + unlock_buffer(bh); mark_buffer_dirty(bh); if (IS_SYNC(inode)) sync_dirty_buffer(bh); DQUOT_FREE_BLOCK(inode, 1); } - ea_bdebug(bh, "refcount now=%d", le32_to_cpu(HDR(bh)->h_refcount) - 1); - unlock_buffer(bh); EXT2_I(inode)->i_file_acl = 0; cleanup: diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index f556a0d5c0d3..0c9a2ee54c91 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -66,6 +66,12 @@ static void restore_sigs(sigset_t *oldset) sigprocmask(SIG_SETMASK, oldset, NULL); } +/* + * Reset request, so that it can be reused + * + * The caller must be _very_ careful to make sure, that it is holding + * the only reference to req + */ void fuse_reset_request(struct fuse_req *req) { int preallocated = req->preallocated; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 296351615b00..6f05379b0a0d 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -116,9 +116,14 @@ int fuse_open_common(struct inode *inode, struct file *file, int isdir) /* Special case for failed iget in CREATE */ static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) { - u64 nodeid = req->in.h.nodeid; - fuse_reset_request(req); - fuse_send_forget(fc, req, nodeid, 1); + /* If called from end_io_requests(), req has more than one + reference and fuse_reset_request() cannot work */ + if (fc->connected) { + u64 nodeid = req->in.h.nodeid; + fuse_reset_request(req); + fuse_send_forget(fc, req, nodeid, 1); + } else + fuse_put_request(fc, req); } void fuse_send_release(struct fuse_conn *fc, struct fuse_file *ff, diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c index e6265a0b56b8..543ed543d1e5 100644 --- a/fs/jbd/checkpoint.c +++ b/fs/jbd/checkpoint.c @@ -24,75 +24,29 @@ #include <linux/slab.h> /* - * Unlink a buffer from a transaction checkpoint list. + * Unlink a buffer from a transaction. * * Called with j_list_lock held. */ -static void __buffer_unlink_first(struct journal_head *jh) +static inline void __buffer_unlink(struct journal_head *jh) { transaction_t *transaction; transaction = jh->b_cp_transaction; + jh->b_cp_transaction = NULL; jh->b_cpnext->b_cpprev = jh->b_cpprev; jh->b_cpprev->b_cpnext = jh->b_cpnext; - if (transaction->t_checkpoint_list == jh) { + if (transaction->t_checkpoint_list == jh) transaction->t_checkpoint_list = jh->b_cpnext; - if (transaction->t_checkpoint_list == jh) - transaction->t_checkpoint_list = NULL; - } -} - -/* - * Unlink a buffer from a transaction checkpoint(io) list. - * - * Called with j_list_lock held. - */ - -static inline void __buffer_unlink(struct journal_head *jh) -{ - transaction_t *transaction; - - transaction = jh->b_cp_transaction; - - __buffer_unlink_first(jh); - if (transaction->t_checkpoint_io_list == jh) { - transaction->t_checkpoint_io_list = jh->b_cpnext; - if (transaction->t_checkpoint_io_list == jh) - transaction->t_checkpoint_io_list = NULL; - } -} - -/* - * Move a buffer from the checkpoint list to the checkpoint io list - * - * Called with j_list_lock held - */ - -static inline void __buffer_relink_io(struct journal_head *jh) -{ - transaction_t *transaction; - - transaction = jh->b_cp_transaction; - __buffer_unlink_first(jh); - - if (!transaction->t_checkpoint_io_list) { - jh->b_cpnext = jh->b_cpprev = jh; - } else { - jh->b_cpnext = transaction->t_checkpoint_io_list; - jh->b_cpprev = transaction->t_checkpoint_io_list->b_cpprev; - jh->b_cpprev->b_cpnext = jh; - jh->b_cpnext->b_cpprev = jh; - } - transaction->t_checkpoint_io_list = jh; + if (transaction->t_checkpoint_list == jh) + transaction->t_checkpoint_list = NULL; } /* * Try to release a checkpointed buffer from its transaction. - * Returns 1 if we released it and 2 if we also released the - * whole transaction. - * + * Returns 1 if we released it. * Requires j_list_lock * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it */ @@ -103,11 +57,12 @@ static int __try_to_free_cp_buf(struct journal_head *jh) if (jh->b_jlist == BJ_None && !buffer_locked(bh) && !buffer_dirty(bh)) { JBUFFER_TRACE(jh, "remove from checkpoint list"); - ret = __journal_remove_checkpoint(jh) + 1; + __journal_remove_checkpoint(jh); jbd_unlock_bh_state(bh); journal_remove_journal_head(bh); BUFFER_TRACE(bh, "release"); __brelse(bh); + ret = 1; } else { jbd_unlock_bh_state(bh); } @@ -162,53 +117,83 @@ static void jbd_sync_bh(journal_t *journal, struct buffer_head *bh) } /* - * Clean up transaction's list of buffers submitted for io. - * We wait for any pending IO to complete and remove any clean - * buffers. Note that we take the buffers in the opposite ordering - * from the one in which they were submitted for IO. + * Clean up a transaction's checkpoint list. + * + * We wait for any pending IO to complete and make sure any clean + * buffers are removed from the transaction. + * + * Return 1 if we performed any actions which might have destroyed the + * checkpoint. (journal_remove_checkpoint() deletes the transaction when + * the last checkpoint buffer is cleansed) * * Called with j_list_lock held. */ - -static void __wait_cp_io(journal_t *journal, transaction_t *transaction) +static int __cleanup_transaction(journal_t *journal, transaction_t *transaction) { - struct journal_head *jh; + struct journal_head *jh, *next_jh, *last_jh; struct buffer_head *bh; - tid_t this_tid; - int released = 0; - - this_tid = transaction->t_tid; -restart: - /* Didn't somebody clean up the transaction in the meanwhile */ - if (journal->j_checkpoint_transactions != transaction || - transaction->t_tid != this_tid) - return; - while (!released && transaction->t_checkpoint_io_list) { - jh = transaction->t_checkpoint_io_list; + int ret = 0; + + assert_spin_locked(&journal->j_list_lock); + jh = transaction->t_checkpoint_list; + if (!jh) + return 0; + + last_jh = jh->b_cpprev; + next_jh = jh; + do { + jh = next_jh; bh = jh2bh(jh); - if (!jbd_trylock_bh_state(bh)) { - jbd_sync_bh(journal, bh); - spin_lock(&journal->j_list_lock); - goto restart; - } if (buffer_locked(bh)) { atomic_inc(&bh->b_count); spin_unlock(&journal->j_list_lock); - jbd_unlock_bh_state(bh); wait_on_buffer(bh); /* the journal_head may have gone by now */ BUFFER_TRACE(bh, "brelse"); __brelse(bh); - spin_lock(&journal->j_list_lock); - goto restart; + goto out_return_1; } + /* - * Now in whatever state the buffer currently is, we know that - * it has been written out and so we can drop it from the list + * This is foul */ - released = __journal_remove_checkpoint(jh); - jbd_unlock_bh_state(bh); - } + if (!jbd_trylock_bh_state(bh)) { + jbd_sync_bh(journal, bh); + goto out_return_1; + } + + if (jh->b_transaction != NULL) { + transaction_t *t = jh->b_transaction; + tid_t tid = t->t_tid; + + spin_unlock(&journal->j_list_lock); + jbd_unlock_bh_state(bh); + log_start_commit(journal, tid); + log_wait_commit(journal, tid); + goto out_return_1; + } + + /* + * AKPM: I think the buffer_jbddirty test is redundant - it + * shouldn't have NULL b_transaction? + */ + next_jh = jh->b_cpnext; + if (!buffer_dirty(bh) && !buffer_jbddirty(bh)) { + BUFFER_TRACE(bh, "remove from checkpoint"); + __journal_remove_checkpoint(jh); + jbd_unlock_bh_state(bh); + journal_remove_journal_head(bh); + __brelse(bh); + ret = 1; + } else { + jbd_unlock_bh_state(bh); + } + } while (jh != last_jh); + + return ret; +out_return_1: + spin_lock(&journal->j_list_lock); + return 1; } #define NR_BATCH 64 @@ -218,7 +203,9 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count) { int i; + spin_unlock(&journal->j_list_lock); ll_rw_block(SWRITE, *batch_count, bhs); + spin_lock(&journal->j_list_lock); for (i = 0; i < *batch_count; i++) { struct buffer_head *bh = bhs[i]; clear_buffer_jwrite(bh); @@ -234,46 +221,19 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count) * Return 1 if something happened which requires us to abort the current * scan of the checkpoint list. * - * Called with j_list_lock held and drops it if 1 is returned + * Called with j_list_lock held. * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it */ -static int __process_buffer(journal_t *journal, struct journal_head *jh, - struct buffer_head **bhs, int *batch_count) +static int __flush_buffer(journal_t *journal, struct journal_head *jh, + struct buffer_head **bhs, int *batch_count, + int *drop_count) { struct buffer_head *bh = jh2bh(jh); int ret = 0; - if (buffer_locked(bh)) { - get_bh(bh); - spin_unlock(&journal->j_list_lock); - jbd_unlock_bh_state(bh); - wait_on_buffer(bh); - /* the journal_head may have gone by now */ - BUFFER_TRACE(bh, "brelse"); - put_bh(bh); - ret = 1; - } - else if (jh->b_transaction != NULL) { - transaction_t *t = jh->b_transaction; - tid_t tid = t->t_tid; + if (buffer_dirty(bh) && !buffer_locked(bh) && jh->b_jlist == BJ_None) { + J_ASSERT_JH(jh, jh->b_transaction == NULL); - spin_unlock(&journal->j_list_lock); - jbd_unlock_bh_state(bh); - log_start_commit(journal, tid); - log_wait_commit(journal, tid); - ret = 1; - } - else if (!buffer_dirty(bh)) { - J_ASSERT_JH(jh, !buffer_jbddirty(bh)); - BUFFER_TRACE(bh, "remove from checkpoint"); - __journal_remove_checkpoint(jh); - spin_unlock(&journal->j_list_lock); - jbd_unlock_bh_state(bh); - journal_remove_journal_head(bh); - put_bh(bh); - ret = 1; - } - else { /* * Important: we are about to write the buffer, and * possibly block, while still holding the journal lock. @@ -286,30 +246,45 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh, J_ASSERT_BH(bh, !buffer_jwrite(bh)); set_buffer_jwrite(bh); bhs[*batch_count] = bh; - __buffer_relink_io(jh); jbd_unlock_bh_state(bh); (*batch_count)++; if (*batch_count == NR_BATCH) { - spin_unlock(&journal->j_list_lock); __flush_batch(journal, bhs, batch_count); ret = 1; } + } else { + int last_buffer = 0; + if (jh->b_cpnext == jh) { + /* We may be about to drop the transaction. Tell the + * caller that the lists have changed. + */ + last_buffer = 1; + } + if (__try_to_free_cp_buf(jh)) { + (*drop_count)++; + ret = last_buffer; + } } return ret; } /* - * Perform an actual checkpoint. We take the first transaction on the - * list of transactions to be checkpointed and send all its buffers - * to disk. We submit larger chunks of data at once. + * Perform an actual checkpoint. We don't write out only enough to + * satisfy the current blocked requests: rather we submit a reasonably + * sized chunk of the outstanding data to disk at once for + * efficiency. __log_wait_for_space() will retry if we didn't free enough. * + * However, we _do_ take into account the amount requested so that once + * the IO has been queued, we can return as soon as enough of it has + * completed to disk. + * * The journal should be locked before calling this function. */ int log_do_checkpoint(journal_t *journal) { - transaction_t *transaction; - tid_t this_tid; int result; + int batch_count = 0; + struct buffer_head *bhs[NR_BATCH]; jbd_debug(1, "Start checkpoint\n"); @@ -324,70 +299,79 @@ int log_do_checkpoint(journal_t *journal) return result; /* - * OK, we need to start writing disk blocks. Take one transaction - * and write it. + * OK, we need to start writing disk blocks. Try to free up a + * quarter of the log in a single checkpoint if we can. */ - spin_lock(&journal->j_list_lock); - if (!journal->j_checkpoint_transactions) - goto out; - transaction = journal->j_checkpoint_transactions; - this_tid = transaction->t_tid; -restart: /* - * If someone cleaned up this transaction while we slept, we're - * done (maybe it's a new transaction, but it fell at the same - * address). + * AKPM: check this code. I had a feeling a while back that it + * degenerates into a busy loop at unmount time. */ - if (journal->j_checkpoint_transactions == transaction && - transaction->t_tid == this_tid) { - int batch_count = 0; - struct buffer_head *bhs[NR_BATCH]; - struct journal_head *jh; - int retry = 0; - - while (!retry && transaction->t_checkpoint_list) { + spin_lock(&journal->j_list_lock); + while (journal->j_checkpoint_transactions) { + transaction_t *transaction; + struct journal_head *jh, *last_jh, *next_jh; + int drop_count = 0; + int cleanup_ret, retry = 0; + tid_t this_tid; + + transaction = journal->j_checkpoint_transactions; + this_tid = transaction->t_tid; + jh = transaction->t_checkpoint_list; + last_jh = jh->b_cpprev; + next_jh = jh; + do { struct buffer_head *bh; - jh = transaction->t_checkpoint_list; + jh = next_jh; + next_jh = jh->b_cpnext; bh = jh2bh(jh); if (!jbd_trylock_bh_state(bh)) { jbd_sync_bh(journal, bh); + spin_lock(&journal->j_list_lock); retry = 1; break; } - retry = __process_buffer(journal, jh, bhs, - &batch_count); - if (!retry && - lock_need_resched(&journal->j_list_lock)) { - spin_unlock(&journal->j_list_lock); + retry = __flush_buffer(journal, jh, bhs, &batch_count, &drop_count); + if (cond_resched_lock(&journal->j_list_lock)) { retry = 1; break; } - } + } while (jh != last_jh && !retry); if (batch_count) { - if (!retry) { - spin_unlock(&journal->j_list_lock); - retry = 1; - } __flush_batch(journal, bhs, &batch_count); + retry = 1; } - if (retry) { - spin_lock(&journal->j_list_lock); - goto restart; - } /* - * Now we have cleaned up the first transaction's checkpoint - * list. Let's clean up the second one. + * If someone cleaned up this transaction while we slept, we're + * done + */ + if (journal->j_checkpoint_transactions != transaction) + break; + if (retry) + continue; + /* + * Maybe it's a new transaction, but it fell at the same + * address */ - __wait_cp_io(journal, transaction); + if (transaction->t_tid != this_tid) + continue; + /* + * We have walked the whole transaction list without + * finding anything to write to disk. We had better be + * able to make some progress or we are in trouble. + */ + cleanup_ret = __cleanup_transaction(journal, transaction); + J_ASSERT(drop_count != 0 || cleanup_ret != 0); + if (journal->j_checkpoint_transactions != transaction) + break; } -out: spin_unlock(&journal->j_list_lock); result = cleanup_journal_tail(journal); if (result < 0) return result; + return 0; } @@ -472,91 +456,52 @@ int cleanup_journal_tail(journal_t *journal) /* Checkpoint list management */ /* - * journal_clean_one_cp_list - * - * Find all the written-back checkpoint buffers in the given list and release them. - * - * Called with the journal locked. - * Called with j_list_lock held. - * Returns number of bufers reaped (for debug) - */ - -static int journal_clean_one_cp_list(struct journal_head *jh, int *released) -{ - struct journal_head *last_jh; - struct journal_head *next_jh = jh; - int ret, freed = 0; - - *released = 0; - if (!jh) - return 0; - - last_jh = jh->b_cpprev; - do { - jh = next_jh; - next_jh = jh->b_cpnext; - /* Use trylock because of the ranking */ - if (jbd_trylock_bh_state(jh2bh(jh))) { - ret = __try_to_free_cp_buf(jh); - if (ret) { - freed++; - if (ret == 2) { - *released = 1; - return freed; - } - } - } - /* - * This function only frees up some memory if possible so we - * dont have an obligation to finish processing. Bail out if - * preemption requested: - */ - if (need_resched()) - return freed; - } while (jh != last_jh); - - return freed; -} - -/* * journal_clean_checkpoint_list * * Find all the written-back checkpoint buffers in the journal and release them. * * Called with the journal locked. * Called with j_list_lock held. - * Returns number of buffers reaped (for debug) + * Returns number of bufers reaped (for debug) */ int __journal_clean_checkpoint_list(journal_t *journal) { transaction_t *transaction, *last_transaction, *next_transaction; - int ret = 0, released; + int ret = 0; transaction = journal->j_checkpoint_transactions; - if (!transaction) + if (transaction == 0) goto out; last_transaction = transaction->t_cpprev; next_transaction = transaction; do { + struct journal_head *jh; + transaction = next_transaction; next_transaction = transaction->t_cpnext; - ret += journal_clean_one_cp_list(transaction-> - t_checkpoint_list, &released); - if (need_resched()) - goto out; - if (released) - continue; - /* - * It is essential that we are as careful as in the case of - * t_checkpoint_list with removing the buffer from the list as - * we can possibly see not yet submitted buffers on io_list - */ - ret += journal_clean_one_cp_list(transaction-> - t_checkpoint_io_list, &released); - if (need_resched()) - goto out; + jh = transaction->t_checkpoint_list; + if (jh) { + struct journal_head *last_jh = jh->b_cpprev; + struct journal_head *next_jh = jh; + + do { + jh = next_jh; + next_jh = jh->b_cpnext; + /* Use trylock because of the ranknig */ + if (jbd_trylock_bh_state(jh2bh(jh))) + ret += __try_to_free_cp_buf(jh); + /* + * This function only frees up some memory + * if possible so we dont have an obligation + * to finish processing. Bail out if preemption + * requested: + */ + if (need_resched()) + goto out; + } while (jh != last_jh); + } } while (transaction != last_transaction); out: return ret; @@ -571,22 +516,18 @@ out: * buffer updates committed in that transaction have safely been stored * elsewhere on disk. To achieve this, all of the buffers in a * transaction need to be maintained on the transaction's checkpoint - * lists until they have been rewritten, at which point this function is + * list until they have been rewritten, at which point this function is * called to remove the buffer from the existing transaction's - * checkpoint lists. - * - * The function returns 1 if it frees the transaction, 0 otherwise. + * checkpoint list. * * This function is called with the journal locked. * This function is called with j_list_lock held. - * This function is called with jbd_lock_bh_state(jh2bh(jh)) */ -int __journal_remove_checkpoint(struct journal_head *jh) +void __journal_remove_checkpoint(struct journal_head *jh) { transaction_t *transaction; journal_t *journal; - int ret = 0; JBUFFER_TRACE(jh, "entry"); @@ -597,10 +538,8 @@ int __journal_remove_checkpoint(struct journal_head *jh) journal = transaction->t_journal; __buffer_unlink(jh); - jh->b_cp_transaction = NULL; - if (transaction->t_checkpoint_list != NULL || - transaction->t_checkpoint_io_list != NULL) + if (transaction->t_checkpoint_list != NULL) goto out; JBUFFER_TRACE(jh, "transaction has no more buffers"); @@ -626,10 +565,8 @@ int __journal_remove_checkpoint(struct journal_head *jh) /* Just in case anybody was waiting for more transactions to be checkpointed... */ wake_up(&journal->j_wait_logspace); - ret = 1; out: JBUFFER_TRACE(jh, "exit"); - return ret; } /* @@ -691,7 +628,6 @@ void __journal_drop_transaction(journal_t *journal, transaction_t *transaction) J_ASSERT(transaction->t_shadow_list == NULL); J_ASSERT(transaction->t_log_list == NULL); J_ASSERT(transaction->t_checkpoint_list == NULL); - J_ASSERT(transaction->t_checkpoint_io_list == NULL); J_ASSERT(transaction->t_updates == 0); J_ASSERT(journal->j_committing_transaction != transaction); J_ASSERT(journal->j_running_transaction != transaction); diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c index 29e62d98bae6..002ad2bbc769 100644 --- a/fs/jbd/commit.c +++ b/fs/jbd/commit.c @@ -829,8 +829,7 @@ restart_loop: journal->j_committing_transaction = NULL; spin_unlock(&journal->j_state_lock); - if (commit_transaction->t_checkpoint_list == NULL && - commit_transaction->t_checkpoint_io_list == NULL) { + if (commit_transaction->t_checkpoint_list == NULL) { __journal_drop_transaction(journal, commit_transaction); } else { if (journal->j_checkpoint_transactions == NULL) { diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c index 3eaf6e701087..da6354baa0b8 100644 --- a/fs/lockd/clntlock.c +++ b/fs/lockd/clntlock.c @@ -111,9 +111,10 @@ long nlmclnt_block(struct nlm_rqst *req, long timeout) /* * The server lockd has called us back to tell us the lock was granted */ -u32 -nlmclnt_grant(struct nlm_lock *lock) +u32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *lock) { + const struct file_lock *fl = &lock->fl; + const struct nfs_fh *fh = &lock->fh; struct nlm_wait *block; u32 res = nlm_lck_denied; @@ -122,14 +123,20 @@ nlmclnt_grant(struct nlm_lock *lock) * Warning: must not use cookie to match it! */ list_for_each_entry(block, &nlm_blocked, b_list) { - if (nlm_compare_locks(block->b_lock, &lock->fl)) { - /* Alright, we found a lock. Set the return status - * and wake up the caller - */ - block->b_status = NLM_LCK_GRANTED; - wake_up(&block->b_wait); - res = nlm_granted; - } + struct file_lock *fl_blocked = block->b_lock; + + if (!nlm_compare_locks(fl_blocked, fl)) + continue; + if (!nlm_cmp_addr(&block->b_host->h_addr, addr)) + continue; + if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_dentry->d_inode) ,fh) != 0) + continue; + /* Alright, we found a lock. Set the return status + * and wake up the caller + */ + block->b_status = NLM_LCK_GRANTED; + wake_up(&block->b_wait); + res = nlm_granted; } return res; } diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c index 4063095d849e..b10f913aa06a 100644 --- a/fs/lockd/svc4proc.c +++ b/fs/lockd/svc4proc.c @@ -228,7 +228,7 @@ nlm4svc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp, resp->cookie = argp->cookie; dprintk("lockd: GRANTED called\n"); - resp->status = nlmclnt_grant(&argp->lock); + resp->status = nlmclnt_grant(&rqstp->rq_addr, &argp->lock); dprintk("lockd: GRANTED status %d\n", ntohl(resp->status)); return rpc_success; } diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c index 3bc437e0cf5b..35681d9cf1fc 100644 --- a/fs/lockd/svcproc.c +++ b/fs/lockd/svcproc.c @@ -256,7 +256,7 @@ nlmsvc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp, resp->cookie = argp->cookie; dprintk("lockd: GRANTED called\n"); - resp->status = nlmclnt_grant(&argp->lock); + resp->status = nlmclnt_grant(&rqstp->rq_addr, &argp->lock); dprintk("lockd: GRANTED status %d\n", ntohl(resp->status)); return rpc_success; } diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index 42eb53b5293b..23ceaa7127b4 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h @@ -208,6 +208,9 @@ static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm, #define DLM_LOCK_RES_IN_PROGRESS 0x00000010 #define DLM_LOCK_RES_MIGRATING 0x00000020 +/* max milliseconds to wait to sync up a network failure with a node death */ +#define DLM_NODE_DEATH_WAIT_MAX (5 * 1000) + #define DLM_PURGE_INTERVAL_MS (8 * 1000) struct dlm_lock_resource @@ -658,6 +661,7 @@ int dlm_launch_recovery_thread(struct dlm_ctxt *dlm); void dlm_complete_recovery_thread(struct dlm_ctxt *dlm); void dlm_wait_for_recovery(struct dlm_ctxt *dlm); int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node); +int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout); void dlm_put(struct dlm_ctxt *dlm); struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm); diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c index 6001b22a997d..f66e2d818ccd 100644 --- a/fs/ocfs2/dlm/dlmconvert.c +++ b/fs/ocfs2/dlm/dlmconvert.c @@ -392,6 +392,11 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, } else { mlog_errno(tmpret); if (dlm_is_host_down(tmpret)) { + /* instead of logging the same network error over + * and over, sleep here and wait for the heartbeat + * to notice the node is dead. times out after 5s. */ + dlm_wait_for_node_death(dlm, res->owner, + DLM_NODE_DEATH_WAIT_MAX); ret = DLM_RECOVERING; mlog(0, "node %u died so returning DLM_RECOVERING " "from convert message!\n", res->owner); @@ -421,7 +426,7 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data) struct dlm_lockstatus *lksb; enum dlm_status status = DLM_NORMAL; u32 flags; - int call_ast = 0, kick_thread = 0; + int call_ast = 0, kick_thread = 0, ast_reserved = 0; if (!dlm_grab(dlm)) { dlm_error(DLM_REJECTED); @@ -490,6 +495,7 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data) status = __dlm_lockres_state_to_status(res); if (status == DLM_NORMAL) { __dlm_lockres_reserve_ast(res); + ast_reserved = 1; res->state |= DLM_LOCK_RES_IN_PROGRESS; status = __dlmconvert_master(dlm, res, lock, flags, cnv->requested_type, @@ -512,10 +518,10 @@ leave: else dlm_lock_put(lock); - /* either queue the ast or release it */ + /* either queue the ast or release it, if reserved */ if (call_ast) dlm_queue_ast(dlm, lock); - else + else if (ast_reserved) dlm_lockres_release_ast(dlm, res); if (kick_thread) diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index d1a0038557a3..671d4ff222cc 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c @@ -220,6 +220,17 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, dlm_error(status); dlm_revert_pending_lock(res, lock); dlm_lock_put(lock); + } else if (dlm_is_recovery_lock(res->lockname.name, + res->lockname.len)) { + /* special case for the $RECOVERY lock. + * there will never be an AST delivered to put + * this lock on the proper secondary queue + * (granted), so do it manually. */ + mlog(0, "%s: $RECOVERY lock for this node (%u) is " + "mastered by %u; got lock, manually granting (no ast)\n", + dlm->name, dlm->node_num, res->owner); + list_del_init(&lock->list); + list_add_tail(&lock->list, &res->granted); } spin_unlock(&res->spinlock); @@ -646,7 +657,19 @@ retry_lock: mlog(0, "retrying lock with migration/" "recovery/in progress\n"); msleep(100); - dlm_wait_for_recovery(dlm); + /* no waiting for dlm_reco_thread */ + if (recovery) { + if (status == DLM_RECOVERING) { + mlog(0, "%s: got RECOVERING " + "for $REOCVERY lock, master " + "was %u\n", dlm->name, + res->owner); + dlm_wait_for_node_death(dlm, res->owner, + DLM_NODE_DEATH_WAIT_MAX); + } + } else { + dlm_wait_for_recovery(dlm); + } goto retry_lock; } diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index a3194fe173d9..2e2e95e69499 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -2482,7 +2482,9 @@ top: atomic_set(&mle->woken, 1); spin_unlock(&mle->spinlock); wake_up(&mle->wq); - /* final put will take care of list removal */ + /* do not need events any longer, so detach + * from heartbeat */ + __dlm_mle_detach_hb_events(dlm, mle); __dlm_put_mle(mle); } continue; @@ -2537,6 +2539,9 @@ top: spin_unlock(&res->spinlock); dlm_lockres_put(res); + /* about to get rid of mle, detach from heartbeat */ + __dlm_mle_detach_hb_events(dlm, mle); + /* dump the mle */ spin_lock(&dlm->master_lock); __dlm_put_mle(mle); diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 186e9a76aa58..ed76bda1a534 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -278,6 +278,24 @@ int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node) return dead; } +int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout) +{ + if (timeout) { + mlog(ML_NOTICE, "%s: waiting %dms for notification of " + "death of node %u\n", dlm->name, timeout, node); + wait_event_timeout(dlm->dlm_reco_thread_wq, + dlm_is_node_dead(dlm, node), + msecs_to_jiffies(timeout)); + } else { + mlog(ML_NOTICE, "%s: waiting indefinitely for notification " + "of death of node %u\n", dlm->name, node); + wait_event(dlm->dlm_reco_thread_wq, + dlm_is_node_dead(dlm, node)); + } + /* for now, return 0 */ + return 0; +} + /* callers of the top-level api calls (dlmlock/dlmunlock) should * block on the dlm->reco.event when recovery is in progress. * the dlm recovery thread will set this state when it begins @@ -2032,6 +2050,30 @@ again: dlm->reco.new_master); status = -EEXIST; } else { + status = 0; + + /* see if recovery was already finished elsewhere */ + spin_lock(&dlm->spinlock); + if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { + status = -EINVAL; + mlog(0, "%s: got reco EX lock, but " + "node got recovered already\n", dlm->name); + if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { + mlog(ML_ERROR, "%s: new master is %u " + "but no dead node!\n", + dlm->name, dlm->reco.new_master); + BUG(); + } + } + spin_unlock(&dlm->spinlock); + } + + /* if this node has actually become the recovery master, + * set the master and send the messages to begin recovery */ + if (!status) { + mlog(0, "%s: dead=%u, this=%u, sending " + "begin_reco now\n", dlm->name, + dlm->reco.dead_node, dlm->node_num); status = dlm_send_begin_reco_message(dlm, dlm->reco.dead_node); /* this always succeeds */ diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index fa0bcac5ceae..d329c9df90ae 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -1584,10 +1584,9 @@ static int ocfs2_commit_thread(void *arg) while (!(kthread_should_stop() && atomic_read(&journal->j_num_trans) == 0)) { - wait_event_interruptible_timeout(osb->checkpoint_event, - atomic_read(&journal->j_num_trans) - || kthread_should_stop(), - OCFS2_CHECKPOINT_INTERVAL); + wait_event_interruptible(osb->checkpoint_event, + atomic_read(&journal->j_num_trans) + || kthread_should_stop()); status = ocfs2_commit_cache(osb); if (status < 0) diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index 7d0a816184fa..2f3a6acdac45 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h @@ -29,8 +29,6 @@ #include <linux/fs.h> #include <linux/jbd.h> -#define OCFS2_CHECKPOINT_INTERVAL (8 * HZ) - enum ocfs2_journal_state { OCFS2_JOURNAL_FREE = 0, OCFS2_JOURNAL_LOADED, diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c index 43de3ba83332..ab8894c3b9e5 100644 --- a/fs/reiserfs/xattr_acl.c +++ b/fs/reiserfs/xattr_acl.c @@ -228,7 +228,8 @@ struct posix_acl *reiserfs_get_acl(struct inode *inode, int type) acl = ERR_PTR(retval); } else { acl = posix_acl_from_disk(value, retval); - *p_acl = posix_acl_dup(acl); + if (!IS_ERR(acl)) + *p_acl = posix_acl_dup(acl); } kfree(value); diff --git a/fs/select.c b/fs/select.c index 6ce68a9c8976..1815a57d2255 100644 --- a/fs/select.c +++ b/fs/select.c @@ -404,7 +404,7 @@ asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp, goto sticky; rtv.tv_usec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)); rtv.tv_sec = timeout; - if (timeval_compare(&rtv, &tv) < 0) + if (timeval_compare(&rtv, &tv) >= 0) rtv = tv; if (copy_to_user(tvp, &rtv, sizeof(rtv))) { sticky: @@ -471,7 +471,7 @@ asmlinkage long sys_pselect7(int n, fd_set __user *inp, fd_set __user *outp, rts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) * 1000; rts.tv_sec = timeout; - if (timespec_compare(&rts, &ts) < 0) + if (timespec_compare(&rts, &ts) >= 0) rts = ts; if (copy_to_user(tsp, &rts, sizeof(rts))) { sticky: @@ -775,7 +775,7 @@ asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, rts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) * 1000; rts.tv_sec = timeout; - if (timespec_compare(&rts, &ts) < 0) + if (timespec_compare(&rts, &ts) >= 0) rts = ts; if (copy_to_user(tsp, &rts, sizeof(rts))) { sticky: diff --git a/include/asm-alpha/mman.h b/include/asm-alpha/mman.h index f6439532a262..5f24c755f577 100644 --- a/include/asm-alpha/mman.h +++ b/include/asm-alpha/mman.h @@ -42,7 +42,11 @@ #define MADV_WILLNEED 3 /* will need these pages */ #define MADV_SPACEAVAIL 5 /* ensure resources are available */ #define MADV_DONTNEED 6 /* don't need these pages */ -#define MADV_REMOVE 7 /* remove these pages & resources */ + +/* common/generic parameters */ +#define MADV_REMOVE 9 /* remove these pages & resources */ +#define MADV_DONTFORK 10 /* don't inherit across fork */ +#define MADV_DOFORK 11 /* do inherit across fork */ /* compatibility flags */ #define MAP_ANON MAP_ANONYMOUS diff --git a/include/asm-arm/mman.h b/include/asm-arm/mman.h index f0bebca2ac21..54570d2e95b7 100644 --- a/include/asm-arm/mman.h +++ b/include/asm-arm/mman.h @@ -1,19 +1,7 @@ #ifndef __ARM_MMAN_H__ #define __ARM_MMAN_H__ -#define PROT_READ 0x1 /* page can be read */ -#define PROT_WRITE 0x2 /* page can be written */ -#define PROT_EXEC 0x4 /* page can be executed */ -#define PROT_SEM 0x8 /* page may be used for atomic ops */ -#define PROT_NONE 0x0 /* page can not be accessed */ -#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ -#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ - -#define MAP_SHARED 0x01 /* Share changes */ -#define MAP_PRIVATE 0x02 /* Changes are private */ -#define MAP_TYPE 0x0f /* Mask for type of mapping */ -#define MAP_FIXED 0x10 /* Interpret addr exactly */ -#define MAP_ANONYMOUS 0x20 /* don't use a file */ +#include <asm-generic/mman.h> #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ @@ -23,22 +11,7 @@ #define MAP_POPULATE 0x8000 /* populate (prefault) page tables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ -#define MS_ASYNC 1 /* sync memory asynchronously */ -#define MS_INVALIDATE 2 /* invalidate the caches */ -#define MS_SYNC 4 /* synchronous memory sync */ - #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ -#define MADV_NORMAL 0x0 /* default page-in behavior */ -#define MADV_RANDOM 0x1 /* page-in minimum required */ -#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ -#define MADV_WILLNEED 0x3 /* pre-fault pages */ -#define MADV_DONTNEED 0x4 /* discard these pages */ -#define MADV_REMOVE 0x5 /* remove these pages & resources */ - -/* compatibility flags */ -#define MAP_ANON MAP_ANONYMOUS -#define MAP_FILE 0 - #endif /* __ARM_MMAN_H__ */ diff --git a/include/asm-arm/smp.h b/include/asm-arm/smp.h index 5a72e50ca9fc..fe45f7f61223 100644 --- a/include/asm-arm/smp.h +++ b/include/asm-arm/smp.h @@ -42,6 +42,11 @@ extern void show_ipi_list(struct seq_file *p); asmlinkage void do_IPI(struct pt_regs *regs); /* + * Setup the SMP cpu_possible_map + */ +extern void smp_init_cpus(void); + +/* * Move global data into per-processor storage. */ extern void smp_store_cpu_info(unsigned int cpuid); diff --git a/include/asm-arm/unistd.h b/include/asm-arm/unistd.h index 77430d6178ae..8f331bbd39a8 100644 --- a/include/asm-arm/unistd.h +++ b/include/asm-arm/unistd.h @@ -309,7 +309,7 @@ #define __NR_mq_getsetattr (__NR_SYSCALL_BASE+279) #define __NR_waitid (__NR_SYSCALL_BASE+280) -#if 0 /* reserve these for un-muxing socketcall */ +#if defined(__ARM_EABI__) /* reserve these for un-muxing socketcall */ #define __NR_socket (__NR_SYSCALL_BASE+281) #define __NR_bind (__NR_SYSCALL_BASE+282) #define __NR_connect (__NR_SYSCALL_BASE+283) @@ -329,7 +329,7 @@ #define __NR_recvmsg (__NR_SYSCALL_BASE+297) #endif -#if 0 /* reserve these for un-muxing ipc */ +#if defined(__ARM_EABI__) /* reserve these for un-muxing ipc */ #define __NR_semop (__NR_SYSCALL_BASE+298) #define __NR_semget (__NR_SYSCALL_BASE+299) #define __NR_semctl (__NR_SYSCALL_BASE+300) @@ -347,7 +347,7 @@ #define __NR_request_key (__NR_SYSCALL_BASE+310) #define __NR_keyctl (__NR_SYSCALL_BASE+311) -#if 0 /* reserved for un-muxing ipc */ +#if defined(__ARM_EABI__) /* reserved for un-muxing ipc */ #define __NR_semtimedop (__NR_SYSCALL_BASE+312) #endif diff --git a/include/asm-arm26/mman.h b/include/asm-arm26/mman.h index 0ed7780541fa..4000a6c1b76b 100644 --- a/include/asm-arm26/mman.h +++ b/include/asm-arm26/mman.h @@ -1,19 +1,7 @@ #ifndef __ARM_MMAN_H__ #define __ARM_MMAN_H__ -#define PROT_READ 0x1 /* page can be read */ -#define PROT_WRITE 0x2 /* page can be written */ -#define PROT_EXEC 0x4 /* page can be executed */ -#define PROT_SEM 0x8 /* page may be used for atomic ops */ -#define PROT_NONE 0x0 /* page can not be accessed */ -#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ -#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ - -#define MAP_SHARED 0x01 /* Share changes */ -#define MAP_PRIVATE 0x02 /* Changes are private */ -#define MAP_TYPE 0x0f /* Mask for type of mapping */ -#define MAP_FIXED 0x10 /* Interpret addr exactly */ -#define MAP_ANONYMOUS 0x20 /* don't use a file */ +#include <asm-generic/mman.h> #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ @@ -23,22 +11,7 @@ #define MAP_POPULATE 0x8000 /* populate (prefault) page tables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ -#define MS_ASYNC 1 /* sync memory asynchronously */ -#define MS_INVALIDATE 2 /* invalidate the caches */ -#define MS_SYNC 4 /* synchronous memory sync */ - #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ -#define MADV_NORMAL 0x0 /* default page-in behavior */ -#define MADV_RANDOM 0x1 /* page-in minimum required */ -#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ -#define MADV_WILLNEED 0x3 /* pre-fault pages */ -#define MADV_DONTNEED 0x4 /* discard these pages */ -#define MADV_REMOVE 0x5 /* remove these pages & resources */ - -/* compatibility flags */ -#define MAP_ANON MAP_ANONYMOUS -#define MAP_FILE 0 - #endif /* __ARM_MMAN_H__ */ diff --git a/include/asm-cris/mman.h b/include/asm-cris/mman.h index 5a382b8bf3f7..1c35e1b66b46 100644 --- a/include/asm-cris/mman.h +++ b/include/asm-cris/mman.h @@ -3,19 +3,7 @@ /* verbatim copy of asm-i386/ version */ -#define PROT_READ 0x1 /* page can be read */ -#define PROT_WRITE 0x2 /* page can be written */ -#define PROT_EXEC 0x4 /* page can be executed */ -#define PROT_SEM 0x8 /* page may be used for atomic ops */ -#define PROT_NONE 0x0 /* page can not be accessed */ -#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ -#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ - -#define MAP_SHARED 0x01 /* Share changes */ -#define MAP_PRIVATE 0x02 /* Changes are private */ -#define MAP_TYPE 0x0f /* Mask for type of mapping */ -#define MAP_FIXED 0x10 /* Interpret addr exactly */ -#define MAP_ANONYMOUS 0x20 /* don't use a file */ +#include <asm-generic/mman.h> #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ @@ -25,22 +13,7 @@ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ -#define MS_ASYNC 1 /* sync memory asynchronously */ -#define MS_INVALIDATE 2 /* invalidate the caches */ -#define MS_SYNC 4 /* synchronous memory sync */ - #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ -#define MADV_NORMAL 0x0 /* default page-in behavior */ -#define MADV_RANDOM 0x1 /* page-in minimum required */ -#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ -#define MADV_WILLNEED 0x3 /* pre-fault pages */ -#define MADV_DONTNEED 0x4 /* discard these pages */ -#define MADV_REMOVE 0x5 /* remove these pages & resources */ - -/* compatibility flags */ -#define MAP_ANON MAP_ANONYMOUS -#define MAP_FILE 0 - #endif /* __CRIS_MMAN_H__ */ diff --git a/include/asm-frv/atomic.h b/include/asm-frv/atomic.h index a59f684b4f33..5d9f84bfdcad 100644 --- a/include/asm-frv/atomic.h +++ b/include/asm-frv/atomic.h @@ -220,9 +220,9 @@ extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsig switch (sizeof(__xg_orig)) { \ case 4: \ asm volatile( \ - "swap%I0 %2,%M0" \ - : "+m"(*__xg_ptr), "=&r"(__xg_orig) \ - : "r"(x) \ + "swap%I0 %M0,%1" \ + : "+m"(*__xg_ptr), "=r"(__xg_orig) \ + : "1"(x) \ : "memory" \ ); \ break; \ diff --git a/include/asm-frv/cacheflush.h b/include/asm-frv/cacheflush.h index 3007deccb490..eaa5826bc1c8 100644 --- a/include/asm-frv/cacheflush.h +++ b/include/asm-frv/cacheflush.h @@ -87,5 +87,17 @@ static inline void flush_icache_page(struct vm_area_struct *vma, struct page *pa flush_icache_user_range(vma, page, page_to_phys(page), PAGE_SIZE); } +/* + * permit ptrace to access another process's address space through the icache + * and the dcache + */ +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ +do { \ + memcpy((dst), (src), (len)); \ + flush_icache_user_range((vma), (page), (vaddr), (len)); \ +} while(0) + +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy((dst), (src), (len)) #endif /* _ASM_CACHEFLUSH_H */ diff --git a/include/asm-frv/io.h b/include/asm-frv/io.h index 075369b1a34b..01247cb2bc39 100644 --- a/include/asm-frv/io.h +++ b/include/asm-frv/io.h @@ -251,7 +251,6 @@ static inline void writel(uint32_t datum, volatile void __iomem *addr) #define IOMAP_WRITETHROUGH 3 extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag); -extern void __iounmap(void __iomem *addr, unsigned long size); static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size) { diff --git a/include/asm-frv/mman.h b/include/asm-frv/mman.h index 8af4a41c255e..b4371e928683 100644 --- a/include/asm-frv/mman.h +++ b/include/asm-frv/mman.h @@ -1,19 +1,7 @@ #ifndef __ASM_MMAN_H__ #define __ASM_MMAN_H__ -#define PROT_READ 0x1 /* page can be read */ -#define PROT_WRITE 0x2 /* page can be written */ -#define PROT_EXEC 0x4 /* page can be executed */ -#define PROT_SEM 0x8 /* page may be used for atomic ops */ -#define PROT_NONE 0x0 /* page can not be accessed */ -#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ -#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ - -#define MAP_SHARED 0x01 /* Share changes */ -#define MAP_PRIVATE 0x02 /* Changes are private */ -#define MAP_TYPE 0x0f /* Mask for type of mapping */ -#define MAP_FIXED 0x10 /* Interpret addr exactly */ -#define MAP_ANONYMOUS 0x20 /* don't use a file */ +#include <asm-generic/mman.h> #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ @@ -23,23 +11,8 @@ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ -#define MS_ASYNC 1 /* sync memory asynchronously */ -#define MS_INVALIDATE 2 /* invalidate the caches */ -#define MS_SYNC 4 /* synchronous memory sync */ - #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ -#define MADV_NORMAL 0x0 /* default page-in behavior */ -#define MADV_RANDOM 0x1 /* page-in minimum required */ -#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ -#define MADV_WILLNEED 0x3 /* pre-fault pages */ -#define MADV_DONTNEED 0x4 /* discard these pages */ -#define MADV_REMOVE 0x5 /* remove these pages & resources */ - -/* compatibility flags */ -#define MAP_ANON MAP_ANONYMOUS -#define MAP_FILE 0 - #endif /* __ASM_MMAN_H__ */ diff --git a/include/asm-frv/spr-regs.h b/include/asm-frv/spr-regs.h index ef472f058d9c..c2a541ef828d 100644 --- a/include/asm-frv/spr-regs.h +++ b/include/asm-frv/spr-regs.h @@ -98,6 +98,7 @@ #define TBR_TT_TRAP0 (0x80 << 4) #define TBR_TT_TRAP1 (0x81 << 4) #define TBR_TT_TRAP2 (0x82 << 4) +#define TBR_TT_TRAP3 (0x83 << 4) #define TBR_TT_TRAP126 (0xfe << 4) #define TBR_TT_BREAK (0xff << 4) diff --git a/include/asm-frv/system.h b/include/asm-frv/system.h index d2aea70a5f64..f72ff0c4dc0b 100644 --- a/include/asm-frv/system.h +++ b/include/asm-frv/system.h @@ -40,8 +40,84 @@ do { \ /* * interrupt flag manipulation + * - use virtual interrupt management since touching the PSR is slow + * - ICC2.Z: T if interrupts virtually disabled + * - ICC2.C: F if interrupts really disabled + * - if Z==1 upon interrupt: + * - C is set to 0 + * - interrupts are really disabled + * - entry.S returns immediately + * - uses TIHI (TRAP if Z==0 && C==0) #2 to really reenable interrupts + * - if taken, the trap: + * - sets ICC2.C + * - enables interrupts */ -#define local_irq_disable() \ +#define local_irq_disable() \ +do { \ + /* set Z flag, but don't change the C flag */ \ + asm volatile(" andcc gr0,gr0,gr0,icc2 \n" \ + : \ + : \ + : "memory", "icc2" \ + ); \ +} while(0) + +#define local_irq_enable() \ +do { \ + /* clear Z flag and then test the C flag */ \ + asm volatile(" oricc gr0,#1,gr0,icc2 \n" \ + " tihi icc2,gr0,#2 \n" \ + : \ + : \ + : "memory", "icc2" \ + ); \ +} while(0) + +#define local_save_flags(flags) \ +do { \ + typecheck(unsigned long, flags); \ + asm volatile("movsg ccr,%0" \ + : "=r"(flags) \ + : \ + : "memory"); \ + \ + /* shift ICC2.Z to bit 0 */ \ + flags >>= 26; \ + \ + /* make flags 1 if interrupts disabled, 0 otherwise */ \ + flags &= 1UL; \ +} while(0) + +#define irqs_disabled() \ + ({unsigned long flags; local_save_flags(flags); flags; }) + +#define local_irq_save(flags) \ +do { \ + typecheck(unsigned long, flags); \ + local_save_flags(flags); \ + local_irq_disable(); \ +} while(0) + +#define local_irq_restore(flags) \ +do { \ + typecheck(unsigned long, flags); \ + \ + /* load the Z flag by turning 1 if disabled into 0 if disabled \ + * and thus setting the Z flag but not the C flag */ \ + asm volatile(" xoricc %0,#1,gr0,icc2 \n" \ + /* then test Z=0 and C=0 */ \ + " tihi icc2,gr0,#2 \n" \ + : \ + : "r"(flags) \ + : "memory", "icc2" \ + ); \ + \ +} while(0) + +/* + * real interrupt flag manipulation + */ +#define __local_irq_disable() \ do { \ unsigned long psr; \ asm volatile(" movsg psr,%0 \n" \ @@ -53,7 +129,7 @@ do { \ : "memory"); \ } while(0) -#define local_irq_enable() \ +#define __local_irq_enable() \ do { \ unsigned long psr; \ asm volatile(" movsg psr,%0 \n" \ @@ -64,7 +140,7 @@ do { \ : "memory"); \ } while(0) -#define local_save_flags(flags) \ +#define __local_save_flags(flags) \ do { \ typecheck(unsigned long, flags); \ asm("movsg psr,%0" \ @@ -73,7 +149,7 @@ do { \ : "memory"); \ } while(0) -#define local_irq_save(flags) \ +#define __local_irq_save(flags) \ do { \ unsigned long npsr; \ typecheck(unsigned long, flags); \ @@ -86,7 +162,7 @@ do { \ : "memory"); \ } while(0) -#define local_irq_restore(flags) \ +#define __local_irq_restore(flags) \ do { \ typecheck(unsigned long, flags); \ asm volatile(" movgs %0,psr \n" \ @@ -95,7 +171,7 @@ do { \ : "memory"); \ } while(0) -#define irqs_disabled() \ +#define __irqs_disabled() \ ((__get_PSR() & PSR_PIL) >= PSR_PIL_14) /* diff --git a/include/asm-frv/uaccess.h b/include/asm-frv/uaccess.h index b6bcbe01f6ee..a1d140438863 100644 --- a/include/asm-frv/uaccess.h +++ b/include/asm-frv/uaccess.h @@ -306,7 +306,4 @@ extern long strnlen_user(const char *src, long count); extern unsigned long search_exception_table(unsigned long addr); -#define copy_to_user_page(vma, page, vaddr, dst, src, len) memcpy(dst, src, len) -#define copy_from_user_page(vma, page, vaddr, dst, src, len) memcpy(dst, src, len) - #endif /* _ASM_UACCESS_H */ diff --git a/include/asm-frv/unistd.h b/include/asm-frv/unistd.h index 4d994d2e99e3..322531caa484 100644 --- a/include/asm-frv/unistd.h +++ b/include/asm-frv/unistd.h @@ -295,13 +295,29 @@ #define __NR_add_key 286 #define __NR_request_key 287 #define __NR_keyctl 288 -#define __NR_vperfctr_open 289 -#define __NR_vperfctr_control (__NR_perfctr_info+1) -#define __NR_vperfctr_unlink (__NR_perfctr_info+2) -#define __NR_vperfctr_iresume (__NR_perfctr_info+3) -#define __NR_vperfctr_read (__NR_perfctr_info+4) +#define __NR_ioprio_set 289 +#define __NR_ioprio_get 290 +#define __NR_inotify_init 291 +#define __NR_inotify_add_watch 292 +#define __NR_inotify_rm_watch 293 +#define __NR_migrate_pages 294 +#define __NR_openat 295 +#define __NR_mkdirat 296 +#define __NR_mknodat 297 +#define __NR_fchownat 298 +#define __NR_futimesat 299 +#define __NR_newfstatat 300 +#define __NR_unlinkat 301 +#define __NR_renameat 302 +#define __NR_linkat 303 +#define __NR_symlinkat 304 +#define __NR_readlinkat 305 +#define __NR_fchmodat 306 +#define __NR_faccessat 307 +#define __NR_pselect6 308 +#define __NR_ppoll 309 -#define NR_syscalls 294 +#define NR_syscalls 310 /* * process the return value of a syscall, consigning it to one of two possible fates diff --git a/include/asm-generic/mman.h b/include/asm-generic/mman.h new file mode 100644 index 000000000000..3b41d2bb70da --- /dev/null +++ b/include/asm-generic/mman.h @@ -0,0 +1,42 @@ +#ifndef _ASM_GENERIC_MMAN_H +#define _ASM_GENERIC_MMAN_H + +/* + Author: Michael S. Tsirkin <mst@mellanox.co.il>, Mellanox Technologies Ltd. + Based on: asm-xxx/mman.h +*/ + +#define PROT_READ 0x1 /* page can be read */ +#define PROT_WRITE 0x2 /* page can be written */ +#define PROT_EXEC 0x4 /* page can be executed */ +#define PROT_SEM 0x8 /* page may be used for atomic ops */ +#define PROT_NONE 0x0 /* page can not be accessed */ +#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ +#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ + +#define MAP_SHARED 0x01 /* Share changes */ +#define MAP_PRIVATE 0x02 /* Changes are private */ +#define MAP_TYPE 0x0f /* Mask for type of mapping */ +#define MAP_FIXED 0x10 /* Interpret addr exactly */ +#define MAP_ANONYMOUS 0x20 /* don't use a file */ + +#define MS_ASYNC 1 /* sync memory asynchronously */ +#define MS_INVALIDATE 2 /* invalidate the caches */ +#define MS_SYNC 4 /* synchronous memory sync */ + +#define MADV_NORMAL 0 /* no further special treatment */ +#define MADV_RANDOM 1 /* expect random page references */ +#define MADV_SEQUENTIAL 2 /* expect sequential page references */ +#define MADV_WILLNEED 3 /* will need these pages */ +#define MADV_DONTNEED 4 /* don't need these pages */ + +/* common parameters: try to keep these consistent across architectures */ +#define MADV_REMOVE 9 /* remove these pages & resources */ +#define MADV_DONTFORK 10 /* don't inherit across fork */ +#define MADV_DOFORK 11 /* do inherit across fork */ + +/* compatibility flags */ +#define MAP_ANON MAP_ANONYMOUS +#define MAP_FILE 0 + +#endif diff --git a/include/asm-h8300/mman.h b/include/asm-h8300/mman.h index 744a8fb485c2..b9f104f22a36 100644 --- a/include/asm-h8300/mman.h +++ b/include/asm-h8300/mman.h @@ -1,19 +1,7 @@ #ifndef __H8300_MMAN_H__ #define __H8300_MMAN_H__ -#define PROT_READ 0x1 /* page can be read */ -#define PROT_WRITE 0x2 /* page can be written */ -#define PROT_EXEC 0x4 /* page can be executed */ -#define PROT_SEM 0x8 /* page may be used for atomic ops */ -#define PROT_NONE 0x0 /* page can not be accessed */ -#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ -#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ - -#define MAP_SHARED 0x01 /* Share changes */ -#define MAP_PRIVATE 0x02 /* Changes are private */ -#define MAP_TYPE 0x0f /* Mask for type of mapping */ -#define MAP_FIXED 0x10 /* Interpret addr exactly */ -#define MAP_ANONYMOUS 0x20 /* don't use a file */ +#include <asm-generic/mman.h> #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ @@ -23,22 +11,7 @@ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ -#define MS_ASYNC 1 /* sync memory asynchronously */ -#define MS_INVALIDATE 2 /* invalidate the caches */ -#define MS_SYNC 4 /* synchronous memory sync */ - #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ -#define MADV_NORMAL 0x0 /* default page-in behavior */ -#define MADV_RANDOM 0x1 /* page-in minimum required */ -#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ -#define MADV_WILLNEED 0x3 /* pre-fault pages */ -#define MADV_DONTNEED 0x4 /* discard these pages */ -#define MADV_REMOVE 0x5 /* remove these pages & resources */ - -/* compatibility flags */ -#define MAP_ANON MAP_ANONYMOUS -#define MAP_FILE 0 - #endif /* __H8300_MMAN_H__ */ diff --git a/include/asm-i386/mman.h b/include/asm-i386/mman.h index ba4941e6f643..8fd9d7ab7faf 100644 --- a/include/asm-i386/mman.h +++ b/include/asm-i386/mman.h @@ -1,19 +1,7 @@ #ifndef __I386_MMAN_H__ #define __I386_MMAN_H__ -#define PROT_READ 0x1 /* page can be read */ -#define PROT_WRITE 0x2 /* page can be written */ -#define PROT_EXEC 0x4 /* page can be executed */ -#define PROT_SEM 0x8 /* page may be used for atomic ops */ -#define PROT_NONE 0x0 /* page can not be accessed */ -#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ -#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ - -#define MAP_SHARED 0x01 /* Share changes */ -#define MAP_PRIVATE 0x02 /* Changes are private */ -#define MAP_TYPE 0x0f /* Mask for type of mapping */ -#define MAP_FIXED 0x10 /* Interpret addr exactly */ -#define MAP_ANONYMOUS 0x20 /* don't use a file */ +#include <asm-generic/mman.h> #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ @@ -23,22 +11,7 @@ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ -#define MS_ASYNC 1 /* sync memory asynchronously */ -#define MS_INVALIDATE 2 /* invalidate the caches */ -#define MS_SYNC 4 /* synchronous memory sync */ - #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ -#define MADV_NORMAL 0x0 /* default page-in behavior */ -#define MADV_RANDOM 0x1 /* page-in minimum required */ -#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ -#define MADV_WILLNEED 0x3 /* pre-fault pages */ -#define MADV_DONTNEED 0x4 /* discard these pages */ -#define MADV_REMOVE 0x5 /* remove these pages & resources */ - -/* compatibility flags */ -#define MAP_ANON MAP_ANONYMOUS -#define MAP_FILE 0 - #endif /* __I386_MMAN_H__ */ diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h index e20e99551d71..1f7d48c9ba3f 100644 --- a/include/asm-i386/thread_info.h +++ b/include/asm-i386/thread_info.h @@ -158,8 +158,8 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__; /* work to do on interrupt/exception return */ #define _TIF_WORK_MASK \ - (0x0000FFFF & ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP|\ - _TIF_SECCOMP|_TIF_SYSCALL_EMU)) + (0x0000FFFF & ~(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ + _TIF_SECCOMP | _TIF_SYSCALL_EMU)) /* work to do on any return to u-space */ #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h index af503a122b23..aa958c6ee83e 100644 --- a/include/asm-i386/topology.h +++ b/include/asm-i386/topology.h @@ -27,7 +27,7 @@ #ifndef _ASM_I386_TOPOLOGY_H #define _ASM_I386_TOPOLOGY_H -#ifdef CONFIG_SMP +#ifdef CONFIG_X86_HT #define topology_physical_package_id(cpu) \ (phys_proc_id[cpu] == BAD_APICID ? -1 : phys_proc_id[cpu]) #define topology_core_id(cpu) \ diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h index 3a544ffc5008..f7a517654308 100644 --- a/include/asm-ia64/acpi.h +++ b/include/asm-ia64/acpi.h @@ -106,6 +106,8 @@ extern unsigned int can_cpei_retarget(void); extern unsigned int is_cpu_cpei_target(unsigned int cpu); extern void set_cpei_target_cpu(unsigned int cpu); extern unsigned int get_cpei_target_cpu(void); +extern void prefill_possible_map(void); +extern int additional_cpus; #ifdef CONFIG_ACPI_NUMA /* Proximity bitmap length; _PXM is at most 255 (8 bit)*/ diff --git a/include/asm-ia64/machvec_sn2.h b/include/asm-ia64/machvec_sn2.h index e1b6cd63f49e..03d00faf03b5 100644 --- a/include/asm-ia64/machvec_sn2.h +++ b/include/asm-ia64/machvec_sn2.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved. + * Copyright (c) 2002-2003, 2006 Silicon Graphics, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License @@ -20,11 +20,6 @@ * License along with this program; if not, write the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * - * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, - * Mountain View, CA 94043, or: - * - * http://www.sgi.com - * * For further information regarding this notice, see: * * http://oss.sgi.com/projects/GenInfo/NoticeExplan diff --git a/include/asm-ia64/mman.h b/include/asm-ia64/mman.h index 828beb24a20e..6ba179f12718 100644 --- a/include/asm-ia64/mman.h +++ b/include/asm-ia64/mman.h @@ -8,19 +8,7 @@ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co */ -#define PROT_READ 0x1 /* page can be read */ -#define PROT_WRITE 0x2 /* page can be written */ -#define PROT_EXEC 0x4 /* page can be executed */ -#define PROT_SEM 0x8 /* page may be used for atomic ops */ -#define PROT_NONE 0x0 /* page can not be accessed */ -#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ -#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ - -#define MAP_SHARED 0x01 /* Share changes */ -#define MAP_PRIVATE 0x02 /* Changes are private */ -#define MAP_TYPE 0x0f /* Mask for type of mapping */ -#define MAP_FIXED 0x10 /* Interpret addr exactly */ -#define MAP_ANONYMOUS 0x20 /* don't use a file */ +#include <asm-generic/mman.h> #define MAP_GROWSDOWN 0x00100 /* stack-like segment */ #define MAP_GROWSUP 0x00200 /* register stack-like segment */ @@ -31,22 +19,7 @@ #define MAP_POPULATE 0x08000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ -#define MS_ASYNC 1 /* sync memory asynchronously */ -#define MS_INVALIDATE 2 /* invalidate the caches */ -#define MS_SYNC 4 /* synchronous memory sync */ - #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ -#define MADV_NORMAL 0x0 /* default page-in behavior */ -#define MADV_RANDOM 0x1 /* page-in minimum required */ -#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ -#define MADV_WILLNEED 0x3 /* pre-fault pages */ -#define MADV_DONTNEED 0x4 /* discard these pages */ -#define MADV_REMOVE 0x5 /* remove these pages & resources */ - -/* compatibility flags */ -#define MAP_ANON MAP_ANONYMOUS -#define MAP_FILE 0 - #endif /* _ASM_IA64_MMAN_H */ diff --git a/include/asm-ia64/sn/arch.h b/include/asm-ia64/sn/arch.h index 1a3831c04af6..91c31be87b13 100644 --- a/include/asm-ia64/sn/arch.h +++ b/include/asm-ia64/sn/arch.h @@ -70,7 +70,7 @@ DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); * Compact node ID to nasid mappings kept in the per-cpu data areas of each * cpu. */ -DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]); +DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]); #define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0])) diff --git a/include/asm-ia64/sn/bte.h b/include/asm-ia64/sn/bte.h index 01e5b4103235..5335d87ca5f8 100644 --- a/include/asm-ia64/sn/bte.h +++ b/include/asm-ia64/sn/bte.h @@ -46,7 +46,7 @@ #define BTES_PER_NODE (is_shub2() ? 4 : 2) #define MAX_BTES_PER_NODE 4 -#define BTE2OFF_CTRL (0) +#define BTE2OFF_CTRL 0 #define BTE2OFF_SRC (SH2_BT_ENG_SRC_ADDR_0 - SH2_BT_ENG_CSR_0) #define BTE2OFF_DEST (SH2_BT_ENG_DEST_ADDR_0 - SH2_BT_ENG_CSR_0) #define BTE2OFF_NOTIFY (SH2_BT_ENG_NOTIF_ADDR_0 - SH2_BT_ENG_CSR_0) @@ -75,11 +75,11 @@ : base + (BTEOFF_NOTIFY/8)) /* Define hardware modes */ -#define BTE_NOTIFY (IBCT_NOTIFY) +#define BTE_NOTIFY IBCT_NOTIFY #define BTE_NORMAL BTE_NOTIFY #define BTE_ZERO_FILL (BTE_NOTIFY | IBCT_ZFIL_MODE) /* Use a reserved bit to let the caller specify a wait for any BTE */ -#define BTE_WACQUIRE (0x4000) +#define BTE_WACQUIRE 0x4000 /* Use the BTE on the node with the destination memory */ #define BTE_USE_DEST (BTE_WACQUIRE << 1) /* Use any available BTE interface on any node for the transfer */ diff --git a/include/asm-ia64/sn/pcibr_provider.h b/include/asm-ia64/sn/pcibr_provider.h index 9334078b089a..a601d3af39b6 100644 --- a/include/asm-ia64/sn/pcibr_provider.h +++ b/include/asm-ia64/sn/pcibr_provider.h @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 1992-1997,2000-2006 Silicon Graphics, Inc. All rights reserved. */ #ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H #define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H @@ -115,18 +115,6 @@ struct pcibus_info { spinlock_t pbi_lock; }; -/* - * pcibus_info structure locking macros - */ -inline static unsigned long -pcibr_lock(struct pcibus_info *pcibus_info) -{ - unsigned long flag; - spin_lock_irqsave(&pcibus_info->pbi_lock, flag); - return(flag); -} -#define pcibr_unlock(pcibus_info, flag) spin_unlock_irqrestore(&pcibus_info->pbi_lock, flag) - extern int pcibr_init_provider(void); extern void *pcibr_bus_fixup(struct pcibus_bussoft *, struct pci_controller *); extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t); diff --git a/include/asm-ia64/sn/sn_feature_sets.h b/include/asm-ia64/sn/sn_feature_sets.h index 9ca642cad338..ff33e3bd3f8e 100644 --- a/include/asm-ia64/sn/sn_feature_sets.h +++ b/include/asm-ia64/sn/sn_feature_sets.h @@ -12,9 +12,6 @@ */ -#include <asm/types.h> -#include <asm/bitops.h> - /* --------------------- PROM Features -----------------------------*/ extern int sn_prom_feature_available(int id); diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h index 0c36928ffd8b..df7f5f4f3cde 100644 --- a/include/asm-ia64/sn/xpc.h +++ b/include/asm-ia64/sn/xpc.h @@ -508,19 +508,24 @@ struct xpc_channel { #define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */ #define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */ -#define XPC_C_CONNECTCALLOUT 0x00000040 /* channel connected callout made */ -#define XPC_C_CONNECTED 0x00000080 /* local channel is connected */ -#define XPC_C_CONNECTING 0x00000100 /* channel is being connected */ - -#define XPC_C_RCLOSEREPLY 0x00000200 /* remote close channel reply */ -#define XPC_C_CLOSEREPLY 0x00000400 /* local close channel reply */ -#define XPC_C_RCLOSEREQUEST 0x00000800 /* remote close channel request */ -#define XPC_C_CLOSEREQUEST 0x00001000 /* local close channel request */ - -#define XPC_C_DISCONNECTED 0x00002000 /* channel is disconnected */ -#define XPC_C_DISCONNECTING 0x00004000 /* channel is being disconnected */ -#define XPC_C_DISCONNECTCALLOUT 0x00008000 /* chan disconnected callout made */ -#define XPC_C_WDISCONNECT 0x00010000 /* waiting for channel disconnect */ +#define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */ +#define XPC_C_CONNECTEDCALLOUT_MADE \ + 0x00000080 /* connected callout completed */ +#define XPC_C_CONNECTED 0x00000100 /* local channel is connected */ +#define XPC_C_CONNECTING 0x00000200 /* channel is being connected */ + +#define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */ +#define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */ +#define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */ +#define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */ + +#define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */ +#define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */ +#define XPC_C_DISCONNECTINGCALLOUT \ + 0x00010000 /* disconnecting callout initiated */ +#define XPC_C_DISCONNECTINGCALLOUT_MADE \ + 0x00020000 /* disconnecting callout completed */ +#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */ diff --git a/include/asm-ia64/timex.h b/include/asm-ia64/timex.h index 414aae060440..05a6baf8a472 100644 --- a/include/asm-ia64/timex.h +++ b/include/asm-ia64/timex.h @@ -15,6 +15,8 @@ typedef unsigned long cycles_t; +extern void (*ia64_udelay)(unsigned long usecs); + /* * For performance reasons, we don't want to define CLOCK_TICK_TRATE as * local_cpu_data->itc_rate. Fortunately, we don't have to, either: according to George diff --git a/include/asm-m32r/mman.h b/include/asm-m32r/mman.h index 12e29747bc84..695a860c024f 100644 --- a/include/asm-m32r/mman.h +++ b/include/asm-m32r/mman.h @@ -1,21 +1,9 @@ #ifndef __M32R_MMAN_H__ #define __M32R_MMAN_H__ -/* orig : i386 2.6.0-test6 */ - -#define PROT_READ 0x1 /* page can be read */ -#define PROT_WRITE 0x2 /* page can be written */ -#define PROT_EXEC 0x4 /* page can be executed */ -#define PROT_SEM 0x8 /* page may be used for atomic ops */ -#define PROT_NONE 0x0 /* page can not be accessed */ -#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ -#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ +#include <asm-generic/mman.h> -#define MAP_SHARED 0x01 /* Share changes */ -#define MAP_PRIVATE 0x02 /* Changes are private */ -#define MAP_TYPE 0x0f /* Mask for type of mapping */ -#define MAP_FIXED 0x10 /* Interpret addr exactly */ -#define MAP_ANONYMOUS 0x20 /* don't use a file */ +/* orig : i386 2.6.0-test6 */ #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ @@ -25,22 +13,7 @@ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ -#define MS_ASYNC 1 /* sync memory asynchronously */ -#define MS_INVALIDATE 2 /* invalidate the caches */ -#define MS_SYNC 4 /* synchronous memory sync */ - #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ -#define MADV_NORMAL 0x0 /* default page-in behavior */ -#define MADV_RANDOM 0x1 /* page-in minimum required */ -#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ -#define MADV_WILLNEED 0x3 /* pre-fault pages */ -#define MADV_DONTNEED 0x4 /* discard these pages */ -#define MADV_REMOVE 0x5 /* remove these pages & resources */ - -/* compatibility flags */ -#define MAP_ANON MAP_ANONYMOUS -#define MAP_FILE 0 - #endif /* __M32R_MMAN_H__ */ diff --git a/include/asm-m68k/mman.h b/include/asm-m68k/mman.h index ea262ab88b3b..1626d37f4898 100644 --- a/include/asm-m68k/mman.h +++ b/include/asm-m68k/mman.h @@ -1,19 +1,7 @@ #ifndef __M68K_MMAN_H__ #define __M68K_MMAN_H__ -#define PROT_READ 0x1 /* page can be read */ -#define PROT_WRITE 0x2 /* page can be written */ -#define PROT_EXEC 0x4 /* page can be executed */ -#define PROT_SEM 0x8 /* page may be used for atomic ops */ -#define PROT_NONE 0x0 /* page can not be accessed */ -#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ -#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ - -#define MAP_SHARED 0x01 /* Share changes */ -#define MAP_PRIVATE 0x02 /* Changes are private */ -#define MAP_TYPE 0x0f /* Mask for type of mapping */ -#define MAP_FIXED 0x10 /* Interpret addr exactly */ -#define MAP_ANONYMOUS 0x20 /* don't use a file */ +#include <asm-generic/mman.h> #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ @@ -23,22 +11,7 @@ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ -#define MS_ASYNC 1 /* sync memory asynchronously */ -#define MS_INVALIDATE 2 /* invalidate the caches */ -#define MS_SYNC 4 /* synchronous memory sync */ - #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ -#define MADV_NORMAL 0x0 /* default page-in behavior */ -#define MADV_RANDOM 0x1 /* page-in minimum required */ -#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ -#define MADV_WILLNEED 0x3 /* pre-fault pages */ -#define MADV_DONTNEED 0x4 /* discard these pages */ -#define MADV_REMOVE 0x5 /* remove these pages & resources */ - -/* compatibility flags */ -#define MAP_ANON MAP_ANONYMOUS -#define MAP_FILE 0 - #endif /* __M68K_MMAN_H__ */ diff --git a/include/asm-mips/cpu.h b/include/asm-mips/cpu.h index 934e063e79f1..818b9a97e214 100644 --- a/include/asm-mips/cpu.h +++ b/include/asm-mips/cpu.h @@ -204,9 +204,9 @@ */ #define MIPS_CPU_ISA_I 0x00000001 #define MIPS_CPU_ISA_II 0x00000002 -#define MIPS_CPU_ISA_III 0x00000003 -#define MIPS_CPU_ISA_IV 0x00000004 -#define MIPS_CPU_ISA_V 0x00000005 +#define MIPS_CPU_ISA_III 0x00000004 +#define MIPS_CPU_ISA_IV 0x00000008 +#define MIPS_CPU_ISA_V 0x00000010 #define MIPS_CPU_ISA_M32R1 0x00000020 #define MIPS_CPU_ISA_M32R2 0x00000040 #define MIPS_CPU_ISA_M64R1 0x00000080 diff --git a/include/asm-mips/gcc/sgidefs.h b/include/asm-mips/gcc/sgidefs.h deleted file mode 100644 index 05994371a2af..000000000000 --- a/include/asm-mips/gcc/sgidefs.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * include/sgidefs.h - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1996 by Ralf Baechle - * - * This file is here to satisfy GCC's expectations. - */ -#ifndef __SGIDEFS_H -#define __SGIDEFS_H - -#include <asm/sgidefs.h> - -#endif /* __SGIDEFS_H */ diff --git a/include/asm-mips/mach-generic/timex.h b/include/asm-mips/mach-generic/timex.h index c6a2e5f0574a..48b4cfaa0d50 100644 --- a/include/asm-mips/mach-generic/timex.h +++ b/include/asm-mips/mach-generic/timex.h @@ -3,20 +3,11 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2003 by Ralf Baechle + * Copyright (C) 2003, 2005 by Ralf Baechle */ #ifndef __ASM_MACH_GENERIC_TIMEX_H #define __ASM_MACH_GENERIC_TIMEX_H -#include <linux/config.h> - -/* - * Last remaining user of the i8254 PIC, will be converted, too ... - */ -#ifdef CONFIG_SNI_RM200_PCI -#define CLOCK_TICK_RATE 1193182 -#else #define CLOCK_TICK_RATE 500000 -#endif #endif /* __ASM_MACH_GENERIC_TIMEX_H */ diff --git a/include/asm-mips/mach-rm200/timex.h b/include/asm-mips/mach-rm200/timex.h new file mode 100644 index 000000000000..11ff6cb0f214 --- /dev/null +++ b/include/asm-mips/mach-rm200/timex.h @@ -0,0 +1,13 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003, 2005 by Ralf Baechle + */ +#ifndef __ASM_MACH_RM200_TIMEX_H +#define __ASM_MACH_RM200_TIMEX_H + +#define CLOCK_TICK_RATE 1193182 + +#endif /* __ASM_MACH_RM200_TIMEX_H */ diff --git a/include/asm-mips/mman.h b/include/asm-mips/mman.h index dd17c8bd62a1..046cf686bee7 100644 --- a/include/asm-mips/mman.h +++ b/include/asm-mips/mman.h @@ -60,15 +60,19 @@ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ -#define MADV_NORMAL 0x0 /* default page-in behavior */ -#define MADV_RANDOM 0x1 /* page-in minimum required */ -#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ -#define MADV_WILLNEED 0x3 /* pre-fault pages */ -#define MADV_DONTNEED 0x4 /* discard these pages */ -#define MADV_REMOVE 0x5 /* remove these pages & resources */ +#define MADV_NORMAL 0 /* no further special treatment */ +#define MADV_RANDOM 1 /* expect random page references */ +#define MADV_SEQUENTIAL 2 /* expect sequential page references */ +#define MADV_WILLNEED 3 /* will need these pages */ +#define MADV_DONTNEED 4 /* don't need these pages */ + +/* common parameters: try to keep these consistent across architectures */ +#define MADV_REMOVE 9 /* remove these pages & resources */ +#define MADV_DONTFORK 10 /* don't inherit across fork */ +#define MADV_DOFORK 11 /* do inherit across fork */ /* compatibility flags */ -#define MAP_ANON MAP_ANONYMOUS -#define MAP_FILE 0 +#define MAP_ANON MAP_ANONYMOUS +#define MAP_FILE 0 #endif /* _ASM_MMAN_H */ diff --git a/include/asm-mips/r4kcache.h b/include/asm-mips/r4kcache.h index cc53196efa40..9632c27dad15 100644 --- a/include/asm-mips/r4kcache.h +++ b/include/asm-mips/r4kcache.h @@ -14,6 +14,7 @@ #include <asm/asm.h> #include <asm/cacheops.h> +#include <asm/cpu-features.h> /* * This macro return a properly sign-extended address suitable as base address @@ -78,22 +79,25 @@ static inline void flush_scache_line(unsigned long addr) cache_op(Hit_Writeback_Inv_SD, addr); } +#define protected_cache_op(op,addr) \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noreorder \n" \ + " .set mips3 \n" \ + "1: cache %0, (%1) \n" \ + "2: .set pop \n" \ + " .section __ex_table,\"a\" \n" \ + " "STR(PTR)" 1b, 2b \n" \ + " .previous" \ + : \ + : "i" (op), "r" (addr)) + /* * The next two are for badland addresses like signal trampolines. */ static inline void protected_flush_icache_line(unsigned long addr) { - __asm__ __volatile__( - " .set push \n" - " .set noreorder \n" - " .set mips3 \n" - "1: cache %0, (%1) \n" - "2: .set pop \n" - " .section __ex_table,\"a\" \n" - " "STR(PTR)" 1b, 2b \n" - " .previous" - : - : "i" (Hit_Invalidate_I), "r" (addr)); + protected_cache_op(Hit_Invalidate_I, addr); } /* @@ -104,32 +108,12 @@ static inline void protected_flush_icache_line(unsigned long addr) */ static inline void protected_writeback_dcache_line(unsigned long addr) { - __asm__ __volatile__( - " .set push \n" - " .set noreorder \n" - " .set mips3 \n" - "1: cache %0, (%1) \n" - "2: .set pop \n" - " .section __ex_table,\"a\" \n" - " "STR(PTR)" 1b, 2b \n" - " .previous" - : - : "i" (Hit_Writeback_Inv_D), "r" (addr)); + protected_cache_op(Hit_Writeback_Inv_D, addr); } static inline void protected_writeback_scache_line(unsigned long addr) { - __asm__ __volatile__( - " .set push \n" - " .set noreorder \n" - " .set mips3 \n" - "1: cache %0, (%1) \n" - "2: .set pop \n" - " .section __ex_table,\"a\" \n" - " "STR(PTR)" 1b, 2b \n" - " .previous" - : - : "i" (Hit_Writeback_Inv_SD), "r" (addr)); + protected_cache_op(Hit_Writeback_Inv_SD, addr); } /* @@ -295,4 +279,28 @@ __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64) __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64) __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128) +/* build blast_xxx_range, protected_blast_xxx_range */ +#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \ +static inline void prot##blast_##pfx##cache##_range(unsigned long start, \ + unsigned long end) \ +{ \ + unsigned long lsize = cpu_##desc##_line_size(); \ + unsigned long addr = start & ~(lsize - 1); \ + unsigned long aend = (end - 1) & ~(lsize - 1); \ + while (1) { \ + prot##cache_op(hitop, addr); \ + if (addr == aend) \ + break; \ + addr += lsize; \ + } \ +} + +__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_) +__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_) +__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_) +__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, ) +__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, ) +/* blast_inv_dcache_range */ +__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, ) + #endif /* _ASM_R4KCACHE_H */ diff --git a/include/asm-mips/uaccess.h b/include/asm-mips/uaccess.h index 91d813a37823..7a553e9d44d3 100644 --- a/include/asm-mips/uaccess.h +++ b/include/asm-mips/uaccess.h @@ -266,6 +266,8 @@ do { \ */ #define __get_user_asm_ll32(val, addr) \ { \ + unsigned long long __gu_tmp; \ + \ __asm__ __volatile__( \ "1: lw %1, (%3) \n" \ "2: lw %D1, 4(%3) \n" \ @@ -280,8 +282,9 @@ do { \ " " __UA_ADDR " 1b, 4b \n" \ " " __UA_ADDR " 2b, 4b \n" \ " .previous \n" \ - : "=r" (__gu_err), "=&r" (val) \ + : "=r" (__gu_err), "=&r" (__gu_tmp) \ : "0" (0), "r" (addr), "i" (-EFAULT)); \ + (val) = __gu_tmp; \ } /* diff --git a/include/asm-mips/unistd.h b/include/asm-mips/unistd.h index e7ff9b187783..769305d20108 100644 --- a/include/asm-mips/unistd.h +++ b/include/asm-mips/unistd.h @@ -1184,10 +1184,8 @@ type name (atype a,btype b,ctype c,dtype d,etype e,ftype f) \ #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_RT_SIGACTION -# ifndef __mips64 -# define __ARCH_WANT_STAT64 -# endif # ifdef CONFIG_32BIT +# define __ARCH_WANT_STAT64 # define __ARCH_WANT_SYS_TIME # endif # ifdef CONFIG_MIPS32_O32 diff --git a/include/asm-parisc/mman.h b/include/asm-parisc/mman.h index 736b0abcac05..0ef15ee0f17e 100644 --- a/include/asm-parisc/mman.h +++ b/include/asm-parisc/mman.h @@ -38,7 +38,11 @@ #define MADV_SPACEAVAIL 5 /* insure that resources are reserved */ #define MADV_VPS_PURGE 6 /* Purge pages from VM page cache */ #define MADV_VPS_INHERIT 7 /* Inherit parents page size */ -#define MADV_REMOVE 8 /* remove these pages & resources */ + +/* common/generic parameters */ +#define MADV_REMOVE 9 /* remove these pages & resources */ +#define MADV_DONTFORK 10 /* don't inherit across fork */ +#define MADV_DOFORK 11 /* do inherit across fork */ /* The range 12-64 is reserved for page size specification. */ #define MADV_4K_PAGES 12 /* Use 4K pages */ diff --git a/include/asm-powerpc/mman.h b/include/asm-powerpc/mman.h index a2e34c21b44f..24cf664a8295 100644 --- a/include/asm-powerpc/mman.h +++ b/include/asm-powerpc/mman.h @@ -1,6 +1,8 @@ #ifndef _ASM_POWERPC_MMAN_H #define _ASM_POWERPC_MMAN_H +#include <asm-generic/mman.h> + /* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -8,19 +10,6 @@ * 2 of the License, or (at your option) any later version. */ -#define PROT_READ 0x1 /* page can be read */ -#define PROT_WRITE 0x2 /* page can be written */ -#define PROT_EXEC 0x4 /* page can be executed */ -#define PROT_SEM 0x8 /* page may be used for atomic ops */ -#define PROT_NONE 0x0 /* page can not be accessed */ -#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ -#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ - -#define MAP_SHARED 0x01 /* Share changes */ -#define MAP_PRIVATE 0x02 /* Changes are private */ -#define MAP_TYPE 0x0f /* Mask for type of mapping */ -#define MAP_FIXED 0x10 /* Interpret addr exactly */ -#define MAP_ANONYMOUS 0x20 /* don't use a file */ #define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */ #define MAP_NORESERVE 0x40 /* don't reserve swap pages */ #define MAP_LOCKED 0x80 @@ -29,25 +18,10 @@ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ -#define MS_ASYNC 1 /* sync memory asynchronously */ -#define MS_INVALIDATE 2 /* invalidate the caches */ -#define MS_SYNC 4 /* synchronous memory sync */ - #define MCL_CURRENT 0x2000 /* lock all currently mapped pages */ #define MCL_FUTURE 0x4000 /* lock all additions to address space */ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ -#define MADV_NORMAL 0x0 /* default page-in behavior */ -#define MADV_RANDOM 0x1 /* page-in minimum required */ -#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ -#define MADV_WILLNEED 0x3 /* pre-fault pages */ -#define MADV_DONTNEED 0x4 /* discard these pages */ -#define MADV_REMOVE 0x5 /* remove these pages & resources */ - -/* compatibility flags */ -#define MAP_ANON MAP_ANONYMOUS -#define MAP_FILE 0 - #endif /* _ASM_POWERPC_MMAN_H */ diff --git a/include/asm-powerpc/pgalloc.h b/include/asm-powerpc/pgalloc.h index 9f5b052784a5..a00ee002cd11 100644 --- a/include/asm-powerpc/pgalloc.h +++ b/include/asm-powerpc/pgalloc.h @@ -146,7 +146,7 @@ extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) #ifndef CONFIG_PPC_64K_PAGES -#define __pud_free_tlb(tlb, pmd) \ +#define __pud_free_tlb(tlb, pud) \ pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) #endif /* CONFIG_PPC_64K_PAGES */ diff --git a/include/asm-s390/mman.h b/include/asm-s390/mman.h index c8d5409b5d56..7839767d837e 100644 --- a/include/asm-s390/mman.h +++ b/include/asm-s390/mman.h @@ -9,19 +9,7 @@ #ifndef __S390_MMAN_H__ #define __S390_MMAN_H__ -#define PROT_READ 0x1 /* page can be read */ -#define PROT_WRITE 0x2 /* page can be written */ -#define PROT_EXEC 0x4 /* page can be executed */ -#define PROT_SEM 0x8 /* page may be used for atomic ops */ -#define PROT_NONE 0x0 /* page can not be accessed */ -#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ -#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ - -#define MAP_SHARED 0x01 /* Share changes */ -#define MAP_PRIVATE 0x02 /* Changes are private */ -#define MAP_TYPE 0x0f /* Mask for type of mapping */ -#define MAP_FIXED 0x10 /* Interpret addr exactly */ -#define MAP_ANONYMOUS 0x20 /* don't use a file */ +#include <asm-generic/mman.h> #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ @@ -31,22 +19,7 @@ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ -#define MS_ASYNC 1 /* sync memory asynchronously */ -#define MS_INVALIDATE 2 /* invalidate the caches */ -#define MS_SYNC 4 /* synchronous memory sync */ - #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ -#define MADV_NORMAL 0x0 /* default page-in behavior */ -#define MADV_RANDOM 0x1 /* page-in minimum required */ -#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ -#define MADV_WILLNEED 0x3 /* pre-fault pages */ -#define MADV_DONTNEED 0x4 /* discard these pages */ -#define MADV_REMOVE 0x5 /* remove these pages & resources */ - -/* compatibility flags */ -#define MAP_ANON MAP_ANONYMOUS -#define MAP_FILE 0 - #endif /* __S390_MMAN_H__ */ diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h index 9c6e9c300eb9..444dae5912e6 100644 --- a/include/asm-s390/smp.h +++ b/include/asm-s390/smp.h @@ -31,6 +31,7 @@ typedef struct __u16 cpu; } sigp_info; +extern void smp_setup_cpu_possible_map(void); extern int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, int wait, int cpu); #define NO_PROC_ID 0xFF /* No processor magic marker */ @@ -104,6 +105,7 @@ smp_call_function_on(void (*func) (void *info), void *info, #define smp_cpu_not_running(cpu) 1 #define smp_get_cpu(cpu) ({ 0; }) #define smp_put_cpu(cpu) ({ 0; }) +#define smp_setup_cpu_possible_map() #endif #endif diff --git a/include/asm-sh/mman.h b/include/asm-sh/mman.h index 693bd55a3710..156eb0225cf6 100644 --- a/include/asm-sh/mman.h +++ b/include/asm-sh/mman.h @@ -1,19 +1,7 @@ #ifndef __ASM_SH_MMAN_H #define __ASM_SH_MMAN_H -#define PROT_READ 0x1 /* page can be read */ -#define PROT_WRITE 0x2 /* page can be written */ -#define PROT_EXEC 0x4 /* page can be executed */ -#define PROT_SEM 0x8 /* page may be used for atomic ops */ -#define PROT_NONE 0x0 /* page can not be accessed */ -#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ -#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ - -#define MAP_SHARED 0x01 /* Share changes */ -#define MAP_PRIVATE 0x02 /* Changes are private */ -#define MAP_TYPE 0x0f /* Mask for type of mapping */ -#define MAP_FIXED 0x10 /* Interpret addr exactly */ -#define MAP_ANONYMOUS 0x20 /* don't use a file */ +#include <asm-generic/mman.h> #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ @@ -23,22 +11,7 @@ #define MAP_POPULATE 0x8000 /* populate (prefault) page tables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ -#define MS_ASYNC 1 /* sync memory asynchronously */ -#define MS_INVALIDATE 2 /* invalidate the caches */ -#define MS_SYNC 4 /* synchronous memory sync */ - #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ -#define MADV_NORMAL 0x0 /* default page-in behavior */ -#define MADV_RANDOM 0x1 /* page-in minimum required */ -#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ -#define MADV_WILLNEED 0x3 /* pre-fault pages */ -#define MADV_DONTNEED 0x4 /* discard these pages */ -#define MADV_REMOVE 0x5 /* remove these pages & resources */ - -/* compatibility flags */ -#define MAP_ANON MAP_ANONYMOUS -#define MAP_FILE 0 - #endif /* __ASM_SH_MMAN_H */ diff --git a/include/asm-sparc/mman.h b/include/asm-sparc/mman.h index 98435ad8619e..88d1886abf3b 100644 --- a/include/asm-sparc/mman.h +++ b/include/asm-sparc/mman.h @@ -2,21 +2,10 @@ #ifndef __SPARC_MMAN_H__ #define __SPARC_MMAN_H__ +#include <asm-generic/mman.h> + /* SunOS'ified... */ -#define PROT_READ 0x1 /* page can be read */ -#define PROT_WRITE 0x2 /* page can be written */ -#define PROT_EXEC 0x4 /* page can be executed */ -#define PROT_SEM 0x8 /* page may be used for atomic ops */ -#define PROT_NONE 0x0 /* page can not be accessed */ -#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ -#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ - -#define MAP_SHARED 0x01 /* Share changes */ -#define MAP_PRIVATE 0x02 /* Changes are private */ -#define MAP_TYPE 0x0f /* Mask for type of mapping */ -#define MAP_FIXED 0x10 /* Interpret addr exactly */ -#define MAP_ANONYMOUS 0x20 /* don't use a file */ #define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */ #define MAP_NORESERVE 0x40 /* don't reserve swap pages */ #define MAP_INHERIT 0x80 /* SunOS doesn't do this, but... */ @@ -27,10 +16,6 @@ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ -#define MS_ASYNC 1 /* sync memory asynchronously */ -#define MS_INVALIDATE 2 /* invalidate the caches */ -#define MS_SYNC 4 /* synchronous memory sync */ - #define MCL_CURRENT 0x2000 /* lock all currently mapped pages */ #define MCL_FUTURE 0x4000 /* lock all additions to address space */ @@ -48,16 +33,6 @@ #define MC_LOCKAS 5 /* Lock an entire address space of the calling process */ #define MC_UNLOCKAS 6 /* Unlock entire address space of calling process */ -#define MADV_NORMAL 0x0 /* default page-in behavior */ -#define MADV_RANDOM 0x1 /* page-in minimum required */ -#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ -#define MADV_WILLNEED 0x3 /* pre-fault pages */ -#define MADV_DONTNEED 0x4 /* discard these pages */ #define MADV_FREE 0x5 /* (Solaris) contents can be freed */ -#define MADV_REMOVE 0x6 /* remove these pages & resources */ - -/* compatibility flags */ -#define MAP_ANON MAP_ANONYMOUS -#define MAP_FILE 0 #endif /* __SPARC_MMAN_H__ */ diff --git a/include/asm-sparc/unistd.h b/include/asm-sparc/unistd.h index 0615d601a7c6..64ec640a40ee 100644 --- a/include/asm-sparc/unistd.h +++ b/include/asm-sparc/unistd.h @@ -305,7 +305,7 @@ #define __NR_mknodat 286 #define __NR_fchownat 287 #define __NR_futimesat 288 -#define __NR_newfstatat 289 +#define __NR_fstatat64 289 #define __NR_unlinkat 290 #define __NR_renameat 291 #define __NR_linkat 292 diff --git a/include/asm-sparc64/mman.h b/include/asm-sparc64/mman.h index cb4b6156194d..6fd878e61435 100644 --- a/include/asm-sparc64/mman.h +++ b/include/asm-sparc64/mman.h @@ -2,21 +2,10 @@ #ifndef __SPARC64_MMAN_H__ #define __SPARC64_MMAN_H__ +#include <asm-generic/mman.h> + /* SunOS'ified... */ -#define PROT_READ 0x1 /* page can be read */ -#define PROT_WRITE 0x2 /* page can be written */ -#define PROT_EXEC 0x4 /* page can be executed */ -#define PROT_SEM 0x8 /* page may be used for atomic ops */ -#define PROT_NONE 0x0 /* page can not be accessed */ -#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ -#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ - -#define MAP_SHARED 0x01 /* Share changes */ -#define MAP_PRIVATE 0x02 /* Changes are private */ -#define MAP_TYPE 0x0f /* Mask for type of mapping */ -#define MAP_FIXED 0x10 /* Interpret addr exactly */ -#define MAP_ANONYMOUS 0x20 /* don't use a file */ #define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */ #define MAP_NORESERVE 0x40 /* don't reserve swap pages */ #define MAP_INHERIT 0x80 /* SunOS doesn't do this, but... */ @@ -27,10 +16,6 @@ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ -#define MS_ASYNC 1 /* sync memory asynchronously */ -#define MS_INVALIDATE 2 /* invalidate the caches */ -#define MS_SYNC 4 /* synchronous memory sync */ - #define MCL_CURRENT 0x2000 /* lock all currently mapped pages */ #define MCL_FUTURE 0x4000 /* lock all additions to address space */ @@ -48,16 +33,6 @@ #define MC_LOCKAS 5 /* Lock an entire address space of the calling process */ #define MC_UNLOCKAS 6 /* Unlock entire address space of calling process */ -#define MADV_NORMAL 0x0 /* default page-in behavior */ -#define MADV_RANDOM 0x1 /* page-in minimum required */ -#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ -#define MADV_WILLNEED 0x3 /* pre-fault pages */ -#define MADV_DONTNEED 0x4 /* discard these pages */ #define MADV_FREE 0x5 /* (Solaris) contents can be freed */ -#define MADV_REMOVE 0x6 /* remove these pages & resources */ - -/* compatibility flags */ -#define MAP_ANON MAP_ANONYMOUS -#define MAP_FILE 0 #endif /* __SPARC64_MMAN_H__ */ diff --git a/include/asm-sparc64/unistd.h b/include/asm-sparc64/unistd.h index c58ba8a096cf..a284986b1541 100644 --- a/include/asm-sparc64/unistd.h +++ b/include/asm-sparc64/unistd.h @@ -307,7 +307,7 @@ #define __NR_mknodat 286 #define __NR_fchownat 287 #define __NR_futimesat 288 -#define __NR_newfstatat 289 +#define __NR_fstatat64 289 #define __NR_unlinkat 290 #define __NR_renameat 291 #define __NR_linkat 292 diff --git a/include/asm-v850/mman.h b/include/asm-v850/mman.h index edc79965193a..edbf6edbfb37 100644 --- a/include/asm-v850/mman.h +++ b/include/asm-v850/mman.h @@ -1,18 +1,7 @@ #ifndef __V850_MMAN_H__ #define __V850_MMAN_H__ -#define PROT_READ 0x1 /* page can be read */ -#define PROT_WRITE 0x2 /* page can be written */ -#define PROT_EXEC 0x4 /* page can be executed */ -#define PROT_NONE 0x0 /* page can not be accessed */ -#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ -#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ - -#define MAP_SHARED 0x01 /* Share changes */ -#define MAP_PRIVATE 0x02 /* Changes are private */ -#define MAP_TYPE 0x0f /* Mask for type of mapping */ -#define MAP_FIXED 0x10 /* Interpret addr exactly */ -#define MAP_ANONYMOUS 0x20 /* don't use a file */ +#include <asm-generic/mman.h> #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ @@ -20,22 +9,7 @@ #define MAP_LOCKED 0x2000 /* pages are locked */ #define MAP_NORESERVE 0x4000 /* don't check for reservations */ -#define MS_ASYNC 1 /* sync memory asynchronously */ -#define MS_INVALIDATE 2 /* invalidate the caches */ -#define MS_SYNC 4 /* synchronous memory sync */ - #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ -#define MADV_NORMAL 0x0 /* default page-in behavior */ -#define MADV_RANDOM 0x1 /* page-in minimum required */ -#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ -#define MADV_WILLNEED 0x3 /* pre-fault pages */ -#define MADV_DONTNEED 0x4 /* discard these pages */ -#define MADV_REMOVE 0x5 /* remove these pages & resources */ - -/* compatibility flags */ -#define MAP_ANON MAP_ANONYMOUS -#define MAP_FILE 0 - #endif /* __V850_MMAN_H__ */ diff --git a/include/asm-x86_64/mman.h b/include/asm-x86_64/mman.h index d0e97b74f735..dd5cb0534d37 100644 --- a/include/asm-x86_64/mman.h +++ b/include/asm-x86_64/mman.h @@ -1,19 +1,8 @@ #ifndef __X8664_MMAN_H__ #define __X8664_MMAN_H__ -#define PROT_READ 0x1 /* page can be read */ -#define PROT_WRITE 0x2 /* page can be written */ -#define PROT_EXEC 0x4 /* page can be executed */ -#define PROT_NONE 0x0 /* page can not be accessed */ -#define PROT_SEM 0x8 -#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ -#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ +#include <asm-generic/mman.h> -#define MAP_SHARED 0x01 /* Share changes */ -#define MAP_PRIVATE 0x02 /* Changes are private */ -#define MAP_TYPE 0x0f /* Mask for type of mapping */ -#define MAP_FIXED 0x10 /* Interpret addr exactly */ -#define MAP_ANONYMOUS 0x20 /* don't use a file */ #define MAP_32BIT 0x40 /* only give out 32bit addresses */ #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ @@ -24,22 +13,7 @@ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ -#define MS_ASYNC 1 /* sync memory asynchronously */ -#define MS_INVALIDATE 2 /* invalidate the caches */ -#define MS_SYNC 4 /* synchronous memory sync */ - #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ -#define MADV_NORMAL 0x0 /* default page-in behavior */ -#define MADV_RANDOM 0x1 /* page-in minimum required */ -#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ -#define MADV_WILLNEED 0x3 /* pre-fault pages */ -#define MADV_DONTNEED 0x4 /* discard these pages */ -#define MADV_REMOVE 0x5 /* remove these pages & resources */ - -/* compatibility flags */ -#define MAP_ANON MAP_ANONYMOUS -#define MAP_FILE 0 - #endif diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h index c99832e7bf3f..eca3f2d633db 100644 --- a/include/asm-x86_64/proto.h +++ b/include/asm-x86_64/proto.h @@ -133,6 +133,7 @@ extern int fix_aperture; extern int force_iommu; extern int reboot_force; +extern int notsc_setup(char *); extern void smp_local_timer_interrupt(struct pt_regs * regs); diff --git a/include/asm-xtensa/mman.h b/include/asm-xtensa/mman.h index 082a7504925e..ba394cbb4807 100644 --- a/include/asm-xtensa/mman.h +++ b/include/asm-xtensa/mman.h @@ -67,15 +67,19 @@ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ -#define MADV_NORMAL 0x0 /* default page-in behavior */ -#define MADV_RANDOM 0x1 /* page-in minimum required */ -#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ -#define MADV_WILLNEED 0x3 /* pre-fault pages */ -#define MADV_DONTNEED 0x4 /* discard these pages */ -#define MADV_REMOVE 0x5 /* remove these pages & resources */ +#define MADV_NORMAL 0 /* no further special treatment */ +#define MADV_RANDOM 1 /* expect random page references */ +#define MADV_SEQUENTIAL 2 /* expect sequential page references */ +#define MADV_WILLNEED 3 /* will need these pages */ +#define MADV_DONTNEED 4 /* don't need these pages */ + +/* common parameters: try to keep these consistent across architectures */ +#define MADV_REMOVE 9 /* remove these pages & resources */ +#define MADV_DONTFORK 10 /* don't inherit across fork */ +#define MADV_DOFORK 11 /* do inherit across fork */ /* compatibility flags */ -#define MAP_ANON MAP_ANONYMOUS -#define MAP_FILE 0 +#define MAP_ANON MAP_ANONYMOUS +#define MAP_FILE 0 #endif /* _XTENSA_MMAN_H */ diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 0fe4aa891ddc..41ee79962bb2 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h @@ -498,12 +498,6 @@ struct transaction_s struct journal_head *t_checkpoint_list; /* - * Doubly-linked circular list of all buffers submitted for IO while - * checkpointing. [j_list_lock] - */ - struct journal_head *t_checkpoint_io_list; - - /* * Doubly-linked circular list of temporary buffers currently undergoing * IO in the log [j_list_lock] */ @@ -852,7 +846,7 @@ extern void journal_commit_transaction(journal_t *); /* Checkpoint list management */ int __journal_clean_checkpoint_list(journal_t *journal); -int __journal_remove_checkpoint(struct journal_head *); +void __journal_remove_checkpoint(struct journal_head *); void __journal_insert_checkpoint(struct journal_head *, transaction_t *); /* Buffer IO */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index b49affa0ac5a..3b507bf05d09 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -326,12 +326,6 @@ struct sysinfo { /* Force a compilation error if condition is true */ #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) -#ifdef CONFIG_SYSCTL -extern int randomize_va_space; -#else -#define randomize_va_space 1 -#endif - /* Trap pasters of __FUNCTION__ at compile-time */ #define __FUNCTION__ (__func__) diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 6aca67a569a2..f3dec45ef874 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -96,10 +96,16 @@ static inline ktime_t ktime_set(const long secs, const unsigned long nsecs) ({ (ktime_t){ .tv64 = (kt).tv64 + (nsval) }; }) /* convert a timespec to ktime_t format: */ -#define timespec_to_ktime(ts) ktime_set((ts).tv_sec, (ts).tv_nsec) +static inline ktime_t timespec_to_ktime(struct timespec ts) +{ + return ktime_set(ts.tv_sec, ts.tv_nsec); +} /* convert a timeval to ktime_t format: */ -#define timeval_to_ktime(tv) ktime_set((tv).tv_sec, (tv).tv_usec * 1000) +static inline ktime_t timeval_to_ktime(struct timeval tv) +{ + return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC); +} /* Map the ktime_t to timespec conversion to ns_to_timespec function */ #define ktime_to_timespec(kt) ns_to_timespec((kt).tv64) diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index 920766cea79c..ef21ed296039 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -149,7 +149,7 @@ struct nlm_rqst * nlmclnt_alloc_call(void); int nlmclnt_prepare_block(struct nlm_rqst *req, struct nlm_host *host, struct file_lock *fl); void nlmclnt_finish_block(struct nlm_rqst *req); long nlmclnt_block(struct nlm_rqst *req, long timeout); -u32 nlmclnt_grant(struct nlm_lock *); +u32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *); void nlmclnt_recovery(struct nlm_host *, u32); int nlmclnt_reclaim(struct nlm_host *, struct file_lock *); int nlmclnt_setgrantargs(struct nlm_rqst *, struct nlm_lock *); @@ -204,7 +204,7 @@ nlmsvc_file_inode(struct nlm_file *file) * Compare two host addresses (needs modifying for ipv6) */ static __inline__ int -nlm_cmp_addr(struct sockaddr_in *sin1, struct sockaddr_in *sin2) +nlm_cmp_addr(const struct sockaddr_in *sin1, const struct sockaddr_in *sin2) { return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr; } @@ -214,7 +214,7 @@ nlm_cmp_addr(struct sockaddr_in *sin1, struct sockaddr_in *sin2) * When the second lock is of type F_UNLCK, this acts like a wildcard. */ static __inline__ int -nlm_compare_locks(struct file_lock *fl1, struct file_lock *fl2) +nlm_compare_locks(const struct file_lock *fl1, const struct file_lock *fl2) { return fl1->fl_pid == fl2->fl_pid && fl1->fl_start == fl2->fl_start diff --git a/include/linux/mm.h b/include/linux/mm.h index 75e9f0724997..26e1663a5cbe 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1051,5 +1051,7 @@ int shrink_slab(unsigned long scanned, gfp_t gfp_mask, void drop_pagecache(void); void drop_slab(void); +extern int randomize_va_space; + #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 4cf6088625c1..468896939843 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -184,8 +184,11 @@ static inline int nf_hook_thresh(int pf, unsigned int hook, struct sk_buff **pskb, struct net_device *indev, struct net_device *outdev, - int (*okfn)(struct sk_buff *), int thresh) + int (*okfn)(struct sk_buff *), int thresh, + int cond) { + if (!cond) + return 1; #ifndef CONFIG_NETFILTER_DEBUG if (list_empty(&nf_hooks[pf][hook])) return 1; @@ -197,7 +200,7 @@ static inline int nf_hook(int pf, unsigned int hook, struct sk_buff **pskb, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct sk_buff *)) { - return nf_hook_thresh(pf, hook, pskb, indev, outdev, okfn, INT_MIN); + return nf_hook_thresh(pf, hook, pskb, indev, outdev, okfn, INT_MIN, 1); } /* Activate hook; either okfn or kfree_skb called, unless a hook @@ -224,7 +227,13 @@ static inline int nf_hook(int pf, unsigned int hook, struct sk_buff **pskb, #define NF_HOOK_THRESH(pf, hook, skb, indev, outdev, okfn, thresh) \ ({int __ret; \ -if ((__ret=nf_hook_thresh(pf, hook, &(skb), indev, outdev, okfn, thresh)) == 1)\ +if ((__ret=nf_hook_thresh(pf, hook, &(skb), indev, outdev, okfn, thresh, 1)) == 1)\ + __ret = (okfn)(skb); \ +__ret;}) + +#define NF_HOOK_COND(pf, hook, skb, indev, outdev, okfn, cond) \ +({int __ret; \ +if ((__ret=nf_hook_thresh(pf, hook, &(skb), indev, outdev, okfn, INT_MIN, cond)) == 1)\ __ret = (okfn)(skb); \ __ret;}) @@ -295,11 +304,13 @@ extern struct proc_dir_entry *proc_net_netfilter; #else /* !CONFIG_NETFILTER */ #define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb) +#define NF_HOOK_COND(pf, hook, skb, indev, outdev, okfn, cond) (okfn)(skb) static inline int nf_hook_thresh(int pf, unsigned int hook, struct sk_buff **pskb, struct net_device *indev, struct net_device *outdev, - int (*okfn)(struct sk_buff *), int thresh) + int (*okfn)(struct sk_buff *), int thresh, + int cond) { return okfn(*pskb); } @@ -307,7 +318,7 @@ static inline int nf_hook(int pf, unsigned int hook, struct sk_buff **pskb, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct sk_buff *)) { - return okfn(*pskb); + return 1; } static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} struct flowi; diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index fdc4a9527343..43c09d790b83 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h @@ -79,7 +79,7 @@ enum nf_ip_hook_priorities { #ifdef __KERNEL__ extern int ip_route_me_harder(struct sk_buff **pskb); - +extern int ip_xfrm_me_harder(struct sk_buff **pskb); #endif /*__KERNEL__*/ #endif /*__LINUX_IP_NETFILTER_H*/ diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 9d5cd106b344..0d36750fc0f1 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -84,6 +84,7 @@ extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __us extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); extern int ptrace_attach(struct task_struct *tsk); extern int ptrace_detach(struct task_struct *, unsigned int); +extern void __ptrace_detach(struct task_struct *, unsigned int); extern void ptrace_disable(struct task_struct *); extern int ptrace_check_attach(struct task_struct *task, int kill); extern int ptrace_request(struct task_struct *child, long request, long addr, long data); diff --git a/include/linux/sched.h b/include/linux/sched.h index 9c1da0269a18..b6f51e3a38ec 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -697,12 +697,9 @@ struct task_struct { int lock_depth; /* BKL lock depth */ -#if defined(CONFIG_SMP) - int last_waker_cpu; /* CPU that last woke this task up */ -#if defined(__ARCH_WANT_UNLOCKED_CTXSW) +#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) int oncpu; #endif -#endif int prio, static_prio; struct list_head run_list; prio_array_t *array; diff --git a/include/linux/timex.h b/include/linux/timex.h index 04a4a8cb4ed3..b7ca1204e42a 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -345,6 +345,9 @@ time_interpolator_reset(void) #endif /* !CONFIG_TIME_INTERPOLATION */ +/* Returns how long ticks are at present, in ns / 2^(SHIFT_SCALE-10). */ +extern u64 current_tick_length(void); + #endif /* KERNEL */ #endif /* LINUX_TIMEX_H */ diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h index bbfac86734ec..89d743cfdfdf 100644 --- a/include/net/bluetooth/rfcomm.h +++ b/include/net/bluetooth/rfcomm.h @@ -33,7 +33,7 @@ #define RFCOMM_DEFAULT_MTU 127 #define RFCOMM_DEFAULT_CREDITS 7 -#define RFCOMM_MAX_L2CAP_MTU 1024 +#define RFCOMM_MAX_L2CAP_MTU 1013 #define RFCOMM_MAX_CREDITS 40 #define RFCOMM_SKB_HEAD_RESERVE 8 diff --git a/include/net/ip.h b/include/net/ip.h index 8de0697b364c..fab3d5b3ab1c 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -41,6 +41,7 @@ struct inet_skb_parm #define IPSKB_XFRM_TUNNEL_SIZE 2 #define IPSKB_XFRM_TRANSFORMED 4 #define IPSKB_FRAG_COMPLETE 8 +#define IPSKB_REROUTED 16 }; struct ipcm_cookie diff --git a/include/net/irda/irda.h b/include/net/irda/irda.h index 05a840837fe7..1880e46ecc9b 100644 --- a/include/net/irda/irda.h +++ b/include/net/irda/irda.h @@ -82,9 +82,9 @@ do { if(!(expr)) { \ #define IRDA_ASSERT_LABEL(label) #endif /* CONFIG_IRDA_DEBUG */ -#define IRDA_WARNING(args...) printk(KERN_WARNING args) -#define IRDA_MESSAGE(args...) printk(KERN_INFO args) -#define IRDA_ERROR(args...) printk(KERN_ERR args) +#define IRDA_WARNING(args...) do { if (net_ratelimit()) printk(KERN_WARNING args); } while (0) +#define IRDA_MESSAGE(args...) do { if (net_ratelimit()) printk(KERN_INFO args); } while (0) +#define IRDA_ERROR(args...) do { if (net_ratelimit()) printk(KERN_ERR args); } while (0) /* * Magic numbers used by Linux-IrDA. Random numbers which must be unique to diff --git a/include/net/xfrm.h b/include/net/xfrm.h index d09ca0e7d139..d6111a2f0a23 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -866,7 +866,6 @@ extern int xfrm_state_mtu(struct xfrm_state *x, int mtu); extern int xfrm_init_state(struct xfrm_state *x); extern int xfrm4_rcv(struct sk_buff *skb); extern int xfrm4_output(struct sk_buff *skb); -extern int xfrm4_output_finish(struct sk_buff *skb); extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler); extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler); extern int xfrm6_rcv_spi(struct sk_buff **pskb, u32 spi); diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h index 3e5cb5ab2d34..e5618b90996e 100644 --- a/include/scsi/iscsi_if.h +++ b/include/scsi/iscsi_if.h @@ -163,9 +163,6 @@ enum iscsi_param { }; #define ISCSI_PARAM_MAX 14 -typedef uint64_t iscsi_sessionh_t; /* iSCSI Data-Path session handle */ -typedef uint64_t iscsi_connh_t; /* iSCSI Data-Path connection handle */ - #define iscsi_ptr(_handle) ((void*)(unsigned long)_handle) #define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr) #define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata)) diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index c60b8ff2f5e4..9c331258bc27 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -433,4 +433,6 @@ struct scsi_lun { /* Used to obtain the PCI location of a device */ #define SCSI_IOCTL_GET_PCI 0x5387 +int scsi_execute_in_process_context(void (*fn)(void *data), void *data); + #endif /* _SCSI_SCSI_H */ diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index 16602a547a63..b41cf077e54b 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h @@ -63,25 +63,28 @@ struct iscsi_transport { int max_lun; unsigned int max_conn; unsigned int max_cmd_len; - struct Scsi_Host *(*create_session) (struct scsi_transport_template *t, - uint32_t initial_cmdsn); - void (*destroy_session) (struct Scsi_Host *shost); - struct iscsi_cls_conn *(*create_conn) (struct Scsi_Host *shost, + struct iscsi_cls_session *(*create_session) + (struct scsi_transport_template *t, uint32_t sn, uint32_t *sid); + void (*destroy_session) (struct iscsi_cls_session *session); + struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess, uint32_t cid); - int (*bind_conn) (iscsi_sessionh_t session, iscsi_connh_t conn, + int (*bind_conn) (struct iscsi_cls_session *session, + struct iscsi_cls_conn *cls_conn, uint32_t transport_fd, int is_leading); - int (*start_conn) (iscsi_connh_t conn); - void (*stop_conn) (iscsi_connh_t conn, int flag); + int (*start_conn) (struct iscsi_cls_conn *conn); + void (*stop_conn) (struct iscsi_cls_conn *conn, int flag); void (*destroy_conn) (struct iscsi_cls_conn *conn); - int (*set_param) (iscsi_connh_t conn, enum iscsi_param param, + int (*set_param) (struct iscsi_cls_conn *conn, enum iscsi_param param, uint32_t value); - int (*get_conn_param) (void *conndata, enum iscsi_param param, + int (*get_conn_param) (struct iscsi_cls_conn *conn, + enum iscsi_param param, uint32_t *value); - int (*get_session_param) (struct Scsi_Host *shost, + int (*get_session_param) (struct iscsi_cls_session *session, enum iscsi_param param, uint32_t *value); - int (*send_pdu) (iscsi_connh_t conn, struct iscsi_hdr *hdr, + int (*send_pdu) (struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, char *data, uint32_t data_size); - void (*get_stats) (iscsi_connh_t conn, struct iscsi_stats *stats); + void (*get_stats) (struct iscsi_cls_conn *conn, + struct iscsi_stats *stats); }; /* @@ -93,15 +96,14 @@ extern int iscsi_unregister_transport(struct iscsi_transport *tt); /* * control plane upcalls */ -extern void iscsi_conn_error(iscsi_connh_t conn, enum iscsi_err error); -extern int iscsi_recv_pdu(iscsi_connh_t conn, struct iscsi_hdr *hdr, +extern void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error); +extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, char *data, uint32_t data_size); struct iscsi_cls_conn { struct list_head conn_list; /* item in connlist */ void *dd_data; /* LLD private data */ struct iscsi_transport *transport; - iscsi_connh_t connh; int active; /* must be accessed with the connlock */ struct device dev; /* sysfs transport/container device */ struct mempool_zone *z_error; @@ -113,7 +115,7 @@ struct iscsi_cls_conn { container_of(_dev, struct iscsi_cls_conn, dev) struct iscsi_cls_session { - struct list_head list; /* item in session_list */ + struct list_head sess_list; /* item in session_list */ struct iscsi_transport *transport; struct device dev; /* sysfs transport/container device */ }; diff --git a/include/video/neomagic.h b/include/video/neomagic.h index 1d69049bd4c1..78b1f15a538f 100644 --- a/include/video/neomagic.h +++ b/include/video/neomagic.h @@ -159,6 +159,7 @@ struct neofb_par { unsigned char PanelDispCntlReg1; unsigned char PanelDispCntlReg2; unsigned char PanelDispCntlReg3; + unsigned char PanelDispCntlRegRead; unsigned char PanelVertCenterReg1; unsigned char PanelVertCenterReg2; unsigned char PanelVertCenterReg3; diff --git a/kernel/cpuset.c b/kernel/cpuset.c index ba42b0a76961..12815d3f1a05 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1977,6 +1977,39 @@ void cpuset_fork(struct task_struct *child) * We don't need to task_lock() this reference to tsk->cpuset, * because tsk is already marked PF_EXITING, so attach_task() won't * mess with it, or task is a failed fork, never visible to attach_task. + * + * Hack: + * + * Set the exiting tasks cpuset to the root cpuset (top_cpuset). + * + * Don't leave a task unable to allocate memory, as that is an + * accident waiting to happen should someone add a callout in + * do_exit() after the cpuset_exit() call that might allocate. + * If a task tries to allocate memory with an invalid cpuset, + * it will oops in cpuset_update_task_memory_state(). + * + * We call cpuset_exit() while the task is still competent to + * handle notify_on_release(), then leave the task attached to + * the root cpuset (top_cpuset) for the remainder of its exit. + * + * To do this properly, we would increment the reference count on + * top_cpuset, and near the very end of the kernel/exit.c do_exit() + * code we would add a second cpuset function call, to drop that + * reference. This would just create an unnecessary hot spot on + * the top_cpuset reference count, to no avail. + * + * Normally, holding a reference to a cpuset without bumping its + * count is unsafe. The cpuset could go away, or someone could + * attach us to a different cpuset, decrementing the count on + * the first cpuset that we never incremented. But in this case, + * top_cpuset isn't going away, and either task has PF_EXITING set, + * which wards off any attach_task() attempts, or task is a failed + * fork, never visible to attach_task. + * + * Another way to do this would be to set the cpuset pointer + * to NULL here, and check in cpuset_update_task_memory_state() + * for a NULL pointer. This hack avoids that NULL check, for no + * cost (other than this way too long comment ;). **/ void cpuset_exit(struct task_struct *tsk) @@ -1984,7 +2017,7 @@ void cpuset_exit(struct task_struct *tsk) struct cpuset *cs; cs = tsk->cpuset; - tsk->cpuset = NULL; + tsk->cpuset = &top_cpuset; /* Hack - see comment above */ if (notify_on_release(cs)) { char *pathbuf = NULL; diff --git a/kernel/fork.c b/kernel/fork.c index 8e88b374cee9..fbea12d7a943 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1123,8 +1123,8 @@ static task_t *copy_process(unsigned long clone_flags, p->real_parent = current; p->parent = p->real_parent; + spin_lock(¤t->sighand->siglock); if (clone_flags & CLONE_THREAD) { - spin_lock(¤t->sighand->siglock); /* * Important: if an exit-all has been started then * do not create this new thread - the whole thread @@ -1162,8 +1162,6 @@ static task_t *copy_process(unsigned long clone_flags, */ p->it_prof_expires = jiffies_to_cputime(1); } - - spin_unlock(¤t->sighand->siglock); } /* @@ -1175,8 +1173,6 @@ static task_t *copy_process(unsigned long clone_flags, if (unlikely(p->ptrace & PT_PTRACED)) __ptrace_link(p, current->parent); - attach_pid(p, PIDTYPE_PID, p->pid); - attach_pid(p, PIDTYPE_TGID, p->tgid); if (thread_group_leader(p)) { p->signal->tty = current->signal->tty; p->signal->pgrp = process_group(current); @@ -1186,9 +1182,12 @@ static task_t *copy_process(unsigned long clone_flags, if (p->pid) __get_cpu_var(process_counts)++; } + attach_pid(p, PIDTYPE_TGID, p->tgid); + attach_pid(p, PIDTYPE_PID, p->pid); nr_threads++; total_forks++; + spin_unlock(¤t->sighand->siglock); write_unlock_irq(&tasklist_lock); proc_fork_connector(p); return p; diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 2b6e1757aedd..5ae51f1bc7c8 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -418,8 +418,19 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) /* Switch the timer base, if necessary: */ new_base = switch_hrtimer_base(timer, base); - if (mode == HRTIMER_REL) + if (mode == HRTIMER_REL) { tim = ktime_add(tim, new_base->get_time()); + /* + * CONFIG_TIME_LOW_RES is a temporary way for architectures + * to signal that they simply return xtime in + * do_gettimeoffset(). In this case we want to round up by + * resolution when starting a relative timer, to avoid short + * timeouts. This will go away with the GTOD framework. + */ +#ifdef CONFIG_TIME_LOW_RES + tim = ktime_add(tim, base->resolution); +#endif + } timer->expires = tim; enqueue_hrtimer(timer, new_base); diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 41f66365f0d8..8d5a5986d621 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -91,10 +91,8 @@ static int save_highmem_zone(struct zone *zone) * corrected eventually when the cases giving rise to this * are better understood. */ - if (PageReserved(page)) { - printk("highmem reserved page?!\n"); + if (PageReserved(page)) continue; - } BUG_ON(PageNosave(page)); if (PageNosaveFree(page)) continue; diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index 4e90905f0e87..2d9d08f72f76 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c @@ -153,13 +153,11 @@ static int swsusp_swap_check(void) /* This is called before saving image */ { int i; - if (!swsusp_resume_device) - return -ENODEV; spin_lock(&swap_lock); for (i = 0; i < MAX_SWAPFILES; i++) { if (!(swap_info[i].flags & SWP_WRITEOK)) continue; - if (is_resume_device(swap_info + i)) { + if (!swsusp_resume_device || is_resume_device(swap_info + i)) { spin_unlock(&swap_lock); root_swap = i; return 0; diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 5f33cdb6fff5..d95a72c9279d 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -72,8 +72,8 @@ void ptrace_untrace(task_t *child) */ void __ptrace_unlink(task_t *child) { - if (!child->ptrace) - BUG(); + BUG_ON(!child->ptrace); + child->ptrace = 0; if (!list_empty(&child->ptrace_list)) { list_del_init(&child->ptrace_list); @@ -184,22 +184,27 @@ bad: return retval; } +void __ptrace_detach(struct task_struct *child, unsigned int data) +{ + child->exit_code = data; + /* .. re-parent .. */ + __ptrace_unlink(child); + /* .. and wake it up. */ + if (child->exit_state != EXIT_ZOMBIE) + wake_up_process(child); +} + int ptrace_detach(struct task_struct *child, unsigned int data) { if (!valid_signal(data)) - return -EIO; + return -EIO; /* Architecture-specific hardware disable .. */ ptrace_disable(child); - /* .. re-parent .. */ - child->exit_code = data; - write_lock_irq(&tasklist_lock); - __ptrace_unlink(child); - /* .. and wake it up. */ - if (child->exit_state != EXIT_ZOMBIE) - wake_up_process(child); + if (child->ptrace) + __ptrace_detach(child, data); write_unlock_irq(&tasklist_lock); return 0; @@ -242,8 +247,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in if (write) { copy_to_user_page(vma, page, addr, maddr + offset, buf, bytes); - if (!PageCompound(page)) - set_page_dirty_lock(page); + set_page_dirty_lock(page); } else { copy_from_user_page(vma, page, addr, buf, maddr + offset, bytes); diff --git a/kernel/sched.c b/kernel/sched.c index 87d93be336a1..12d291bf3379 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1204,9 +1204,6 @@ static int try_to_wake_up(task_t *p, unsigned int state, int sync) } } - if (p->last_waker_cpu != this_cpu) - goto out_set_cpu; - if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) goto out_set_cpu; @@ -1277,8 +1274,6 @@ out_set_cpu: cpu = task_cpu(p); } - p->last_waker_cpu = this_cpu; - out_activate: #endif /* CONFIG_SMP */ if (old_state == TASK_UNINTERRUPTIBLE) { @@ -1360,12 +1355,9 @@ void fastcall sched_fork(task_t *p, int clone_flags) #ifdef CONFIG_SCHEDSTATS memset(&p->sched_info, 0, sizeof(p->sched_info)); #endif -#if defined(CONFIG_SMP) - p->last_waker_cpu = cpu; -#if defined(__ARCH_WANT_UNLOCKED_CTXSW) +#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) p->oncpu = 0; #endif -#endif #ifdef CONFIG_PREEMPT /* Want to start with kernel preemption disabled. */ task_thread_info(p)->preempt_count = 1; @@ -5066,7 +5058,18 @@ static void init_sched_build_groups(struct sched_group groups[], cpumask_t span, #define MAX_DOMAIN_DISTANCE 32 static unsigned long long migration_cost[MAX_DOMAIN_DISTANCE] = - { [ 0 ... MAX_DOMAIN_DISTANCE-1 ] = -1LL }; + { [ 0 ... MAX_DOMAIN_DISTANCE-1 ] = +/* + * Architectures may override the migration cost and thus avoid + * boot-time calibration. Unit is nanoseconds. Mostly useful for + * virtualized hardware: + */ +#ifdef CONFIG_DEFAULT_MIGRATION_COST + CONFIG_DEFAULT_MIGRATION_COST +#else + -1LL +#endif +}; /* * Allow override of migration cost - in units of microseconds. diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 71dd6f62efec..7654d55c47f5 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -126,8 +126,6 @@ extern int sysctl_hz_timer; extern int acct_parm[]; #endif -int randomize_va_space = 1; - static int parse_table(int __user *, int, void __user *, size_t __user *, void __user *, size_t, ctl_table *, void **); static int proc_doutsstring(ctl_table *table, int write, struct file *filp, diff --git a/kernel/timer.c b/kernel/timer.c index b9dad3994676..fe3a9a9f8328 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -717,12 +717,16 @@ static void second_overflow(void) #endif } -/* in the NTP reference this is called "hardclock()" */ -static void update_wall_time_one_tick(void) +/* + * Returns how many microseconds we need to add to xtime this tick + * in doing an adjustment requested with adjtime. + */ +static long adjtime_adjustment(void) { - long time_adjust_step, delta_nsec; + long time_adjust_step; - if ((time_adjust_step = time_adjust) != 0 ) { + time_adjust_step = time_adjust; + if (time_adjust_step) { /* * We are doing an adjtime thing. Prepare time_adjust_step to * be within bounds. Note that a positive time_adjust means we @@ -733,10 +737,19 @@ static void update_wall_time_one_tick(void) */ time_adjust_step = min(time_adjust_step, (long)tickadj); time_adjust_step = max(time_adjust_step, (long)-tickadj); + } + return time_adjust_step; +} +/* in the NTP reference this is called "hardclock()" */ +static void update_wall_time_one_tick(void) +{ + long time_adjust_step, delta_nsec; + + time_adjust_step = adjtime_adjustment(); + if (time_adjust_step) /* Reduce by this step the amount of time left */ time_adjust -= time_adjust_step; - } delta_nsec = tick_nsec + time_adjust_step * 1000; /* * Advance the phase, once it gets to one microsecond, then @@ -759,6 +772,22 @@ static void update_wall_time_one_tick(void) } /* + * Return how long ticks are at the moment, that is, how much time + * update_wall_time_one_tick will add to xtime next time we call it + * (assuming no calls to do_adjtimex in the meantime). + * The return value is in fixed-point nanoseconds with SHIFT_SCALE-10 + * bits to the right of the binary point. + * This function has no side-effects. + */ +u64 current_tick_length(void) +{ + long delta_nsec; + + delta_nsec = tick_nsec + adjtime_adjustment() * 1000; + return ((u64) delta_nsec << (SHIFT_SCALE - 10)) + time_adj; +} + +/* * Using a loop looks inefficient, but "ticks" is * usually just one (we shouldn't be losing ticks, * we're doing this this way mainly for interrupt diff --git a/lib/radix-tree.c b/lib/radix-tree.c index c0bd4a914803..1e5b17dc7e3d 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -752,12 +752,14 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) */ nr_cleared_tags = 0; for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { + tags[tag] = 1; if (tag_get(pathp->node, tag, pathp->offset)) { tag_clear(pathp->node, tag, pathp->offset); - tags[tag] = 0; - nr_cleared_tags++; - } else - tags[tag] = 1; + if (!any_tag_set(pathp->node, tag)) { + tags[tag] = 0; + nr_cleared_tags++; + } + } } for (pathp--; nr_cleared_tags && pathp->node; pathp--) { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 67f29516662a..508707704d2c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -85,7 +85,7 @@ void free_huge_page(struct page *page) BUG_ON(page_count(page)); INIT_LIST_HEAD(&page->lru); - page[1].mapping = NULL; + page[1].lru.next = NULL; /* reset dtor */ spin_lock(&hugetlb_lock); enqueue_huge_page(page); @@ -105,7 +105,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr) } spin_unlock(&hugetlb_lock); set_page_count(page, 1); - page[1].mapping = (void *)free_huge_page; + page[1].lru.next = (void *)free_huge_page; /* set dtor */ for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i) clear_user_highpage(&page[i], addr); return page; diff --git a/mm/madvise.c b/mm/madvise.c index ae0ae3ea299a..af3d573b0141 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -22,16 +22,23 @@ static long madvise_behavior(struct vm_area_struct * vma, struct mm_struct * mm = vma->vm_mm; int error = 0; pgoff_t pgoff; - int new_flags = vma->vm_flags & ~VM_READHINTMASK; + int new_flags = vma->vm_flags; switch (behavior) { + case MADV_NORMAL: + new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; + break; case MADV_SEQUENTIAL: - new_flags |= VM_SEQ_READ; + new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; break; case MADV_RANDOM: - new_flags |= VM_RAND_READ; + new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; break; - default: + case MADV_DONTFORK: + new_flags |= VM_DONTCOPY; + break; + case MADV_DOFORK: + new_flags &= ~VM_DONTCOPY; break; } @@ -177,6 +184,12 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, long error; switch (behavior) { + case MADV_DOFORK: + if (vma->vm_flags & VM_IO) { + error = -EINVAL; + break; + } + case MADV_DONTFORK: case MADV_NORMAL: case MADV_SEQUENTIAL: case MADV_RANDOM: diff --git a/mm/memory.c b/mm/memory.c index 2bee1f21aa8a..9abc6008544b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -82,6 +82,16 @@ EXPORT_SYMBOL(num_physpages); EXPORT_SYMBOL(high_memory); EXPORT_SYMBOL(vmalloc_earlyreserve); +int randomize_va_space __read_mostly = 1; + +static int __init disable_randmaps(char *s) +{ + randomize_va_space = 0; + return 0; +} +__setup("norandmaps", disable_randmaps); + + /* * If a p?d_bad entry is found while walking page tables, report * the error, before resetting entry to p?d_none. Usually (but diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 3bd7fb7e4b75..bedfa4f09c80 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -132,19 +132,29 @@ static int mpol_check_policy(int mode, nodemask_t *nodes) } return nodes_subset(*nodes, node_online_map) ? 0 : -EINVAL; } + /* Generate a custom zonelist for the BIND policy. */ static struct zonelist *bind_zonelist(nodemask_t *nodes) { struct zonelist *zl; - int num, max, nd; + int num, max, nd, k; max = 1 + MAX_NR_ZONES * nodes_weight(*nodes); - zl = kmalloc(sizeof(void *) * max, GFP_KERNEL); + zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL); if (!zl) return NULL; num = 0; - for_each_node_mask(nd, *nodes) - zl->zones[num++] = &NODE_DATA(nd)->node_zones[policy_zone]; + /* First put in the highest zones from all nodes, then all the next + lower zones etc. Avoid empty zones because the memory allocator + doesn't like them. If you implement node hot removal you + have to fix that. */ + for (k = policy_zone; k >= 0; k--) { + for_each_node_mask(nd, *nodes) { + struct zone *z = &NODE_DATA(nd)->node_zones[k]; + if (z->present_pages > 0) + zl->zones[num++] = z; + } + } zl->zones[num] = NULL; return zl; } @@ -798,6 +808,8 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, nodes_clear(*nodes); if (maxnode == 0 || !nmask) return 0; + if (maxnode > PAGE_SIZE) + return -EINVAL; nlongs = BITS_TO_LONGS(maxnode); if ((maxnode % BITS_PER_LONG) == 0) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index dde04ff4be31..208812b25597 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -56,6 +56,7 @@ long nr_swap_pages; int percpu_pagelist_fraction; static void fastcall free_hot_cold_page(struct page *page, int cold); +static void __free_pages_ok(struct page *page, unsigned int order); /* * results with 256, 32 in the lowmem_reserve sysctl: @@ -169,20 +170,23 @@ static void bad_page(struct page *page) * All pages have PG_compound set. All pages have their ->private pointing at * the head page (even the head page has this). * - * The first tail page's ->mapping, if non-zero, holds the address of the - * compound page's put_page() function. - * - * The order of the allocation is stored in the first tail page's ->index - * This is only for debug at present. This usage means that zero-order pages - * may not be compound. + * The first tail page's ->lru.next holds the address of the compound page's + * put_page() function. Its ->lru.prev holds the order of allocation. + * This usage means that zero-order pages may not be compound. */ + +static void free_compound_page(struct page *page) +{ + __free_pages_ok(page, (unsigned long)page[1].lru.prev); +} + static void prep_compound_page(struct page *page, unsigned long order) { int i; int nr_pages = 1 << order; - page[1].mapping = NULL; - page[1].index = order; + page[1].lru.next = (void *)free_compound_page; /* set dtor */ + page[1].lru.prev = (void *)order; for (i = 0; i < nr_pages; i++) { struct page *p = page + i; @@ -196,7 +200,7 @@ static void destroy_compound_page(struct page *page, unsigned long order) int i; int nr_pages = 1 << order; - if (unlikely(page[1].index != order)) + if (unlikely((unsigned long)page[1].lru.prev != order)) bad_page(page); for (i = 0; i < nr_pages; i++) { @@ -1537,29 +1541,29 @@ static int __initdata node_load[MAX_NUMNODES]; */ static int __init find_next_best_node(int node, nodemask_t *used_node_mask) { - int i, n, val; + int n, val; int min_val = INT_MAX; int best_node = -1; - for_each_online_node(i) { - cpumask_t tmp; + /* Use the local node if we haven't already */ + if (!node_isset(node, *used_node_mask)) { + node_set(node, *used_node_mask); + return node; + } - /* Start from local node */ - n = (node+i) % num_online_nodes(); + for_each_online_node(n) { + cpumask_t tmp; /* Don't want a node to appear more than once */ if (node_isset(n, *used_node_mask)) continue; - /* Use the local node if we haven't already */ - if (!node_isset(node, *used_node_mask)) { - best_node = node; - break; - } - /* Use the distance array to find the distance */ val = node_distance(node, n); + /* Penalize nodes under us ("prefer the next node") */ + val += (n < node); + /* Give preference to headless and unused nodes */ tmp = node_to_cpumask(n); if (!cpus_empty(tmp)) diff --git a/mm/swap.c b/mm/swap.c index 76247424dea1..cce3dda59c59 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -40,7 +40,7 @@ static void put_compound_page(struct page *page) if (put_page_testzero(page)) { void (*dtor)(struct page *page); - dtor = (void (*)(struct page *))page[1].mapping; + dtor = (void (*)(struct page *))page[1].lru.next; (*dtor)(page); } } diff --git a/net/802/p8023.c b/net/802/p8023.c index d23e906456eb..53cf05709283 100644 --- a/net/802/p8023.c +++ b/net/802/p8023.c @@ -59,3 +59,5 @@ void destroy_8023_client(struct datalink_proto *dl) EXPORT_SYMBOL(destroy_8023_client); EXPORT_SYMBOL(make_8023_client); + +MODULE_LICENSE("GPL"); diff --git a/net/atm/signaling.c b/net/atm/signaling.c index e7211a7f382c..93ad59a28ef5 100644 --- a/net/atm/signaling.c +++ b/net/atm/signaling.c @@ -56,7 +56,8 @@ static void sigd_put_skb(struct sk_buff *skb) remove_wait_queue(&sigd_sleep,&wait); #else if (!sigd) { - printk(KERN_WARNING "atmsvc: no signaling demon\n"); + if (net_ratelimit()) + printk(KERN_WARNING "atmsvc: no signaling demon\n"); kfree_skb(skb); return; } diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index bdb6458c6bd5..97bdec73d17e 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -143,13 +143,15 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) static int hci_sock_release(struct socket *sock) { struct sock *sk = sock->sk; - struct hci_dev *hdev = hci_pi(sk)->hdev; + struct hci_dev *hdev; BT_DBG("sock %p sk %p", sock, sk); if (!sk) return 0; + hdev = hci_pi(sk)->hdev; + bt_sock_unlink(&hci_sk_list, sk); if (hdev) { @@ -311,14 +313,18 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *add { struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr; struct sock *sk = sock->sk; + struct hci_dev *hdev = hci_pi(sk)->hdev; BT_DBG("sock %p sk %p", sock, sk); + if (!hdev) + return -EBADFD; + lock_sock(sk); *addr_len = sizeof(*haddr); haddr->hci_family = AF_BLUETOOTH; - haddr->hci_dev = hci_pi(sk)->hdev->id; + haddr->hci_dev = hdev->id; release_sock(sk); return 0; diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 0d89d6434136..5b4253c61f62 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -46,13 +46,15 @@ #include <net/bluetooth/l2cap.h> #include <net/bluetooth/rfcomm.h> -#define VERSION "1.6" - #ifndef CONFIG_BT_RFCOMM_DEBUG #undef BT_DBG #define BT_DBG(D...) #endif +#define VERSION "1.7" + +static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU; + static struct task_struct *rfcomm_thread; static DECLARE_MUTEX(rfcomm_sem); @@ -623,7 +625,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst /* Set L2CAP options */ sk = sock->sk; lock_sock(sk); - l2cap_pi(sk)->imtu = RFCOMM_MAX_L2CAP_MTU; + l2cap_pi(sk)->imtu = l2cap_mtu; release_sock(sk); s = rfcomm_session_add(sock, BT_BOUND); @@ -1868,7 +1870,7 @@ static int rfcomm_add_listener(bdaddr_t *ba) /* Set L2CAP options */ sk = sock->sk; lock_sock(sk); - l2cap_pi(sk)->imtu = RFCOMM_MAX_L2CAP_MTU; + l2cap_pi(sk)->imtu = l2cap_mtu; release_sock(sk); /* Start listening on the socket */ @@ -2070,6 +2072,9 @@ static void __exit rfcomm_exit(void) module_init(rfcomm_init); module_exit(rfcomm_exit); +module_param(l2cap_mtu, uint, 0644); +MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection"); + MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Bluetooth RFCOMM ver " VERSION); MODULE_VERSION(VERSION); diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index c06cb0983530..6bb0c7eb1ef0 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -805,8 +805,8 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb, print_error: if (skb->dev != NULL) { printk("[%s]", skb->dev->name); - if (bridge_parent(skb->dev)) - printk("[%s]", bridge_parent(skb->dev)->name); + if (realoutdev) + printk("[%s]", realoutdev->name); } printk(" head:%p, raw:%p, data:%p\n", skb->head, skb->mac.raw, skb->data); diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index cc047f7fb6ef..35cf3a074087 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c @@ -67,7 +67,7 @@ void br_stp_disable_bridge(struct net_bridge *br) { struct net_bridge_port *p; - spin_lock(&br->lock); + spin_lock_bh(&br->lock); list_for_each_entry(p, &br->port_list, list) { if (p->state != BR_STATE_DISABLED) br_stp_disable_port(p); @@ -76,7 +76,7 @@ void br_stp_disable_bridge(struct net_bridge *br) br->topology_change = 0; br->topology_change_detected = 0; - spin_unlock(&br->lock); + spin_unlock_bh(&br->lock); del_timer_sync(&br->hello_timer); del_timer_sync(&br->topology_change_timer); diff --git a/net/core/datagram.c b/net/core/datagram.c index f8d322e1ea92..b8ce6bf81188 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -247,49 +247,74 @@ EXPORT_SYMBOL(skb_kill_datagram); int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, struct iovec *to, int len) { - int i, err, fraglen, end = 0; - struct sk_buff *next = skb_shinfo(skb)->frag_list; + int start = skb_headlen(skb); + int i, copy = start - offset; - if (!len) - return 0; + /* Copy header. */ + if (copy > 0) { + if (copy > len) + copy = len; + if (memcpy_toiovec(to, skb->data + offset, copy)) + goto fault; + if ((len -= copy) == 0) + return 0; + offset += copy; + } -next_skb: - fraglen = skb_headlen(skb); - i = -1; + /* Copy paged appendix. Hmm... why does this look so complicated? */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + int end; - while (1) { - int start = end; + BUG_TRAP(start <= offset + len); - if ((end += fraglen) > offset) { - int copy = end - offset, o = offset - start; + end = start + skb_shinfo(skb)->frags[i].size; + if ((copy = end - offset) > 0) { + int err; + u8 *vaddr; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + struct page *page = frag->page; if (copy > len) copy = len; - if (i == -1) - err = memcpy_toiovec(to, skb->data + o, copy); - else { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - struct page *page = frag->page; - void *p = kmap(page) + frag->page_offset + o; - err = memcpy_toiovec(to, p, copy); - kunmap(page); - } + vaddr = kmap(page); + err = memcpy_toiovec(to, vaddr + frag->page_offset + + offset - start, copy); + kunmap(page); if (err) goto fault; if (!(len -= copy)) return 0; offset += copy; } - if (++i >= skb_shinfo(skb)->nr_frags) - break; - fraglen = skb_shinfo(skb)->frags[i].size; + start = end; } - if (next) { - skb = next; - BUG_ON(skb_shinfo(skb)->frag_list); - next = skb->next; - goto next_skb; + + if (skb_shinfo(skb)->frag_list) { + struct sk_buff *list = skb_shinfo(skb)->frag_list; + + for (; list; list = list->next) { + int end; + + BUG_TRAP(start <= offset + len); + + end = start + list->len; + if ((copy = end - offset) > 0) { + if (copy > len) + copy = len; + if (skb_copy_datagram_iovec(list, + offset - start, + to, copy)) + goto fault; + if ((len -= copy) == 0) + return 0; + offset += copy; + } + start = end; + } } + if (!len) + return 0; + fault: return -EFAULT; } diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 4d1c40972a4b..e7bbff4340bb 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -192,7 +192,7 @@ int sysctl_icmp_echo_ignore_all; int sysctl_icmp_echo_ignore_broadcasts = 1; /* Control parameter - ignore bogus broadcast responses? */ -int sysctl_icmp_ignore_bogus_error_responses; +int sysctl_icmp_ignore_bogus_error_responses = 1; /* * Configurable global rate limit. diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index abe23923e4e7..9981dcd68f11 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -830,7 +830,8 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) skb->h.raw = skb->nh.raw; skb->nh.raw = skb_push(skb, gre_hlen); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); - IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE|IPSKB_XFRM_TRANSFORMED); + IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | + IPSKB_REROUTED); dst_release(skb->dst); skb->dst = &rt->u.dst; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 3324fbfe528a..57d290d89ec2 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -207,8 +207,10 @@ static inline int ip_finish_output(struct sk_buff *skb) { #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) /* Policy lookup after SNAT yielded a new policy */ - if (skb->dst->xfrm != NULL) - return xfrm4_output_finish(skb); + if (skb->dst->xfrm != NULL) { + IPCB(skb)->flags |= IPSKB_REROUTED; + return dst_output(skb); + } #endif if (skb->len > dst_mtu(skb->dst) && !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size)) @@ -271,8 +273,9 @@ int ip_mc_output(struct sk_buff *skb) newskb->dev, ip_dev_loopback_xmit); } - return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dev, - ip_finish_output); + return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dev, + ip_finish_output, + !(IPCB(skb)->flags & IPSKB_REROUTED)); } int ip_output(struct sk_buff *skb) @@ -284,8 +287,9 @@ int ip_output(struct sk_buff *skb) skb->dev = dev; skb->protocol = htons(ETH_P_IP); - return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev, - ip_finish_output); + return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev, + ip_finish_output, + !(IPCB(skb)->flags & IPSKB_REROUTED)); } int ip_queue_xmit(struct sk_buff *skb, int ipfragok) diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index e5cbe72c6b80..03d13742a4b8 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -622,7 +622,8 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) skb->h.raw = skb->nh.raw; skb->nh.raw = skb_push(skb, sizeof(struct iphdr)); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); - IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE|IPSKB_XFRM_TRANSFORMED); + IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | + IPSKB_REROUTED); dst_release(skb->dst); skb->dst = &rt->u.dst; diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index 52a3d7c57907..ed42cdc57cd9 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -78,6 +78,47 @@ int ip_route_me_harder(struct sk_buff **pskb) } EXPORT_SYMBOL(ip_route_me_harder); +#ifdef CONFIG_XFRM +int ip_xfrm_me_harder(struct sk_buff **pskb) +{ + struct flowi fl; + unsigned int hh_len; + struct dst_entry *dst; + + if (IPCB(*pskb)->flags & IPSKB_XFRM_TRANSFORMED) + return 0; + if (xfrm_decode_session(*pskb, &fl, AF_INET) < 0) + return -1; + + dst = (*pskb)->dst; + if (dst->xfrm) + dst = ((struct xfrm_dst *)dst)->route; + dst_hold(dst); + + if (xfrm_lookup(&dst, &fl, (*pskb)->sk, 0) < 0) + return -1; + + dst_release((*pskb)->dst); + (*pskb)->dst = dst; + + /* Change in oif may mean change in hh_len. */ + hh_len = (*pskb)->dst->dev->hard_header_len; + if (skb_headroom(*pskb) < hh_len) { + struct sk_buff *nskb; + + nskb = skb_realloc_headroom(*pskb, hh_len); + if (!nskb) + return -1; + if ((*pskb)->sk) + skb_set_owner_w(nskb, (*pskb)->sk); + kfree_skb(*pskb); + *pskb = nskb; + } + return 0; +} +EXPORT_SYMBOL(ip_xfrm_me_harder); +#endif + void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *); EXPORT_SYMBOL(ip_nat_decode_session); diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c index 92c54999a19d..7c3f7d380240 100644 --- a/net/ipv4/netfilter/ip_nat_standalone.c +++ b/net/ipv4/netfilter/ip_nat_standalone.c @@ -235,19 +235,19 @@ ip_nat_out(unsigned int hooknum, return NF_ACCEPT; ret = ip_nat_fn(hooknum, pskb, in, out, okfn); +#ifdef CONFIG_XFRM if (ret != NF_DROP && ret != NF_STOLEN && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) { enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); if (ct->tuplehash[dir].tuple.src.ip != ct->tuplehash[!dir].tuple.dst.ip -#ifdef CONFIG_XFRM || ct->tuplehash[dir].tuple.src.u.all != ct->tuplehash[!dir].tuple.dst.u.all -#endif ) - return ip_route_me_harder(pskb) == 0 ? ret : NF_DROP; + return ip_xfrm_me_harder(pskb) == 0 ? ret : NF_DROP; } +#endif return ret; } diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 167619f638c6..6c8624a54933 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -529,15 +529,10 @@ static int init_or_cleanup(int init) goto cleanup_localinops; } #endif - - /* For use by REJECT target */ - ip_ct_attach = __nf_conntrack_attach; - return ret; cleanup: synchronize_net(); - ip_ct_attach = NULL; #ifdef CONFIG_SYSCTL unregister_sysctl_table(nf_ct_ipv4_sysctl_header); cleanup_localinops: diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index d4df0ddd424b..32ad229b4fed 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c @@ -152,10 +152,16 @@ error_nolock: goto out_exit; } -int xfrm4_output_finish(struct sk_buff *skb) +static int xfrm4_output_finish(struct sk_buff *skb) { int err; +#ifdef CONFIG_NETFILTER + if (!skb->dst->xfrm) { + IPCB(skb)->flags |= IPSKB_REROUTED; + return dst_output(skb); + } +#endif while (likely((err = xfrm4_output_one(skb)) == 0)) { nf_reset(skb); @@ -178,6 +184,7 @@ int xfrm4_output_finish(struct sk_buff *skb) int xfrm4_output(struct sk_buff *skb) { - return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dst->dev, - xfrm4_output_finish); + return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dst->dev, + xfrm4_output_finish, + !(IPCB(skb)->flags & IPSKB_REROUTED)); } diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index fcf883183cef..21eb725e885f 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -42,6 +42,7 @@ #include <linux/net.h> #include <linux/skbuff.h> #include <linux/init.h> +#include <linux/netfilter.h> #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> @@ -255,6 +256,7 @@ out: struct icmpv6_msg { struct sk_buff *skb; int offset; + uint8_t type; }; static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) @@ -266,6 +268,8 @@ static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, st csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset, to, len, csum); skb->csum = csum_block_add(skb->csum, csum, odd); + if (!(msg->type & ICMPV6_INFOMSG_MASK)) + nf_ct_attach(skb, org_skb); return 0; } @@ -403,6 +407,7 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info, msg.skb = skb; msg.offset = skb->nh.raw - skb->data; + msg.type = type; len = skb->len - msg.offset; len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr)); @@ -500,6 +505,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) msg.skb = skb; msg.offset = 0; + msg.type = ICMPV6_ECHO_REPLY; err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl, diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c index c745717b4ce2..0e6d1d4bbd5c 100644 --- a/net/ipv6/netfilter/ip6t_REJECT.c +++ b/net/ipv6/netfilter/ip6t_REJECT.c @@ -160,6 +160,8 @@ static void send_reset(struct sk_buff *oldskb) csum_partial((char *)tcph, sizeof(struct tcphdr), 0)); + nf_ct_attach(nskb, oldskb); + NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, nskb, NULL, nskb->dst->dev, dst_output); } diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 738376cf0c51..ae20a0ec9bd8 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -803,10 +803,7 @@ back_from_confirm: err = rawv6_push_pending_frames(sk, &fl, rp); } done: - ip6_dst_store(sk, dst, - ipv6_addr_equal(&fl.fl6_dst, &np->daddr) ? - &np->daddr : NULL); - + dst_release(dst); release_sock(sk); out: fl6_sock_release(flowlabel); diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 99c0a0fa4a97..a8e5544da93e 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -102,8 +102,6 @@ config NF_CT_NETLINK help This option enables support for a netlink-based userspace interface -endmenu - config NETFILTER_XTABLES tristate "Netfilter Xtables support (required for ip_tables)" help @@ -128,7 +126,7 @@ config NETFILTER_XT_TARGET_CONNMARK tristate '"CONNMARK" target support' depends on NETFILTER_XTABLES depends on IP_NF_MANGLE || IP6_NF_MANGLE - depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK_IPV4) + depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK) help This option adds a `CONNMARK' target, which allows one to manipulate the connection mark value. Similar to the MARK target, but @@ -189,7 +187,7 @@ config NETFILTER_XT_MATCH_COMMENT config NETFILTER_XT_MATCH_CONNBYTES tristate '"connbytes" per-connection counter match support' depends on NETFILTER_XTABLES - depends on (IP_NF_CONNTRACK && IP_NF_CT_ACCT) || NF_CT_ACCT + depends on (IP_NF_CONNTRACK && IP_NF_CT_ACCT) || (NF_CT_ACCT && NF_CONNTRACK) help This option adds a `connbytes' match, which allows you to match the number of bytes and/or packets for each direction within a connection. @@ -200,7 +198,7 @@ config NETFILTER_XT_MATCH_CONNBYTES config NETFILTER_XT_MATCH_CONNMARK tristate '"connmark" connection mark match support' depends on NETFILTER_XTABLES - depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || NF_CONNTRACK_MARK + depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK) help This option adds a `connmark' match, which allows you to match the connection mark value previously set for the session by `CONNMARK'. @@ -361,3 +359,5 @@ config NETFILTER_XT_MATCH_TCPMSS To compile it as a module, choose M here. If unsure, say N. +endmenu + diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 0ce337a1d974..d622ddf08bb0 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1556,6 +1556,8 @@ void nf_conntrack_cleanup(void) { int i; + ip_ct_attach = NULL; + /* This makes sure all current packets have passed through netfilter framework. Roll on, two-stage module delete... */ @@ -1715,6 +1717,9 @@ int __init nf_conntrack_init(void) nf_ct_l3protos[i] = &nf_conntrack_generic_l3proto; write_unlock_bh(&nf_conntrack_lock); + /* For use by REJECT target */ + ip_ct_attach = __nf_conntrack_attach; + /* Set up fake conntrack: - to never be deleted, not in any hashes */ atomic_set(&nf_conntrack_untracked.ct_general.use, 1); diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index df99138c3b3b..6492ed66fb3c 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -864,7 +864,9 @@ static int csum6(const struct sk_buff *skb, unsigned int dataoff) { return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr, skb->len - dataoff, IPPROTO_TCP, - skb->ip_summed == CHECKSUM_HW ? skb->csum + skb->ip_summed == CHECKSUM_HW + ? csum_sub(skb->csum, + skb_checksum(skb, 0, dataoff, 0)) : skb_checksum(skb, dataoff, skb->len - dataoff, 0)); } diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index 4264dd079a16..831d206344e0 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c @@ -161,7 +161,9 @@ static int csum6(const struct sk_buff *skb, unsigned int dataoff) { return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr, skb->len - dataoff, IPPROTO_UDP, - skb->ip_summed == CHECKSUM_HW ? skb->csum + skb->ip_summed == CHECKSUM_HW + ? csum_sub(skb->csum, + skb_checksum(skb, 0, dataoff, 0)) : skb_checksum(skb, dataoff, skb->len - dataoff, 0)); } diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 4ae1538c54a9..43e72419c868 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -238,7 +238,7 @@ int genl_register_family(struct genl_family *family) sizeof(struct nlattr *), GFP_KERNEL); if (family->attrbuf == NULL) { err = -ENOMEM; - goto errout; + goto errout_locked; } } else family->attrbuf = NULL; @@ -288,7 +288,7 @@ int genl_unregister_family(struct genl_family *family) return -ENOENT; } -static inline int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, +static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp) { struct genl_ops *ops; @@ -375,7 +375,7 @@ static void genl_rcv(struct sock *sk, int len) do { if (genl_trylock()) return; - netlink_run_queue(sk, &qlen, &genl_rcv_msg); + netlink_run_queue(sk, &qlen, genl_rcv_msg); genl_unlock(); } while (qlen && genl_sock && genl_sock->sk_receive_queue.qlen); } @@ -549,10 +549,8 @@ static int __init genl_init(void) netlink_set_nonroot(NETLINK_GENERIC, NL_NONROOT_RECV); genl_sock = netlink_kernel_create(NETLINK_GENERIC, GENL_MAX_ID, genl_rcv, THIS_MODULE); - if (genl_sock == NULL) { + if (genl_sock == NULL) panic("GENL: Cannot initialize generic netlink\n"); - return -ENOMEM; - } return 0; @@ -560,7 +558,6 @@ errout_register: genl_unregister_family(&genl_ctrl); errout: panic("GENL: Cannot register controller: %d\n", err); - return err; } subsys_initcall(genl_init); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index dbf4620768d6..98ec53bd3ac7 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -889,7 +889,9 @@ restart: xfrm_pol_put(policy); if (dst) dst_free(dst); - goto restart; + + err = -EHOSTUNREACH; + goto error; } dst->next = policy->bundles; policy->bundles = dst; |