summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/feature-removal-schedule.txt16
-rw-r--r--Documentation/scheduler/sched-stats.txt33
-rw-r--r--MAINTAINERS14
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/include/asm/cacheflush.h23
-rw-r--r--arch/s390/include/asm/tlb.h1
-rw-r--r--arch/s390/lib/uaccess_std.c10
-rw-r--r--arch/s390/mm/pgtable.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c12
-rw-r--r--drivers/media/rc/ir-lirc-codec.c6
-rw-r--r--drivers/media/rc/keymaps/rc-rc6-mce.c6
-rw-r--r--drivers/media/rc/mceusb.c9
-rw-r--r--drivers/media/rc/nuvoton-cir.c6
-rw-r--r--drivers/media/rc/streamzap.c14
-rw-r--r--drivers/media/video/gspca/zc3xx.c31
-rw-r--r--drivers/media/video/hdpvr/hdpvr-core.c24
-rw-r--r--drivers/media/video/hdpvr/hdpvr-i2c.c30
-rw-r--r--drivers/media/video/hdpvr/hdpvr.h3
-rw-r--r--drivers/media/video/ir-kbd-i2c.c13
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-core.c1
-rw-r--r--drivers/media/video/saa7115.c2
-rw-r--r--drivers/s390/block/dasd_alias.c6
-rw-r--r--drivers/s390/cio/qdio_main.c4
-rw-r--r--drivers/staging/lirc/lirc_zilog.c32
-rw-r--r--fs/eventpoll.c16
-rw-r--r--fs/exec.c4
-rw-r--r--fs/exofs/inode.c2
-rw-r--r--fs/fcntl.c2
-rw-r--r--fs/ioctl.c7
-rw-r--r--include/linux/fs.h5
-rw-r--r--include/linux/res_counter.h20
-rw-r--r--kernel/perf_event.c10
-rw-r--r--kernel/sched_rt.c2
-rw-r--r--kernel/watchdog.c43
-rw-r--r--mm/huge_memory.c7
-rw-r--r--mm/memcontrol.c67
-rw-r--r--mm/memory-failure.c94
-rw-r--r--mm/migrate.c7
38 files changed, 405 insertions, 182 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index b959659c5df4..b3f35e5f9c95 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -603,3 +603,19 @@ Why: The adm9240, w83792d and w83793 hardware monitoring drivers have
Who: Jean Delvare <khali@linux-fr.org>
----------------------------
+
+What: noswapaccount kernel command line parameter
+When: 2.6.40
+Why: The original implementation of memsw feature enabled by
+ CONFIG_CGROUP_MEM_RES_CTLR_SWAP could be disabled by the noswapaccount
+ kernel parameter (introduced in 2.6.29-rc1). Later on, this decision
+ turned out to be not ideal because we cannot have the feature compiled
+ in and disabled by default and let only interested to enable it
+ (e.g. general distribution kernels might need it). Therefore we have
+ added swapaccount[=0|1] parameter (introduced in 2.6.37) which provides
+ the both possibilities. If we remove noswapaccount we will have
+ less command line parameters with the same functionality and we
+ can also cleanup the parameter handling a bit ().
+Who: Michal Hocko <mhocko@suse.cz>
+
+----------------------------
diff --git a/Documentation/scheduler/sched-stats.txt b/Documentation/scheduler/sched-stats.txt
index 01e69404ee5e..1cd5d51bc761 100644
--- a/Documentation/scheduler/sched-stats.txt
+++ b/Documentation/scheduler/sched-stats.txt
@@ -1,3 +1,7 @@
+Version 15 of schedstats dropped counters for some sched_yield:
+yld_exp_empty, yld_act_empty and yld_both_empty. Otherwise, it is
+identical to version 14.
+
Version 14 of schedstats includes support for sched_domains, which hit the
mainline kernel in 2.6.20 although it is identical to the stats from version
12 which was in the kernel from 2.6.13-2.6.19 (version 13 never saw a kernel
@@ -28,32 +32,25 @@ to write their own scripts, the fields are described here.
CPU statistics
--------------
-cpu<N> 1 2 3 4 5 6 7 8 9 10 11 12
-
-NOTE: In the sched_yield() statistics, the active queue is considered empty
- if it has only one process in it, since obviously the process calling
- sched_yield() is that process.
+cpu<N> 1 2 3 4 5 6 7 8 9
-First four fields are sched_yield() statistics:
- 1) # of times both the active and the expired queue were empty
- 2) # of times just the active queue was empty
- 3) # of times just the expired queue was empty
- 4) # of times sched_yield() was called
+First field is a sched_yield() statistic:
+ 1) # of times sched_yield() was called
Next three are schedule() statistics:
- 5) # of times we switched to the expired queue and reused it
- 6) # of times schedule() was called
- 7) # of times schedule() left the processor idle
+ 2) # of times we switched to the expired queue and reused it
+ 3) # of times schedule() was called
+ 4) # of times schedule() left the processor idle
Next two are try_to_wake_up() statistics:
- 8) # of times try_to_wake_up() was called
- 9) # of times try_to_wake_up() was called to wake up the local cpu
+ 5) # of times try_to_wake_up() was called
+ 6) # of times try_to_wake_up() was called to wake up the local cpu
Next three are statistics describing scheduling latency:
- 10) sum of all time spent running by tasks on this processor (in jiffies)
- 11) sum of all time spent waiting to run by tasks on this processor (in
+ 7) sum of all time spent running by tasks on this processor (in jiffies)
+ 8) sum of all time spent waiting to run by tasks on this processor (in
jiffies)
- 12) # of timeslices run on this cpu
+ 9) # of timeslices run on this cpu
Domain statistics
diff --git a/MAINTAINERS b/MAINTAINERS
index 445537d46e7a..9511bff301c9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -978,6 +978,8 @@ S: Maintained
F: arch/arm/plat-samsung/
F: arch/arm/plat-s3c24xx/
F: arch/arm/plat-s5p/
+F: drivers/*/*s3c2410*
+F: drivers/*/*/*s3c2410*
ARM/S3C2410 ARM ARCHITECTURE
M: Ben Dooks <ben-linux@fluff.org>
@@ -5614,18 +5616,20 @@ F: include/linux/sfi*.h
SIMTEC EB110ATX (Chalice CATS)
P: Ben Dooks
-M: Vincent Sanders <support@simtec.co.uk>
+P: Vincent Sanders <vince@simtec.co.uk>
+M: Simtec Linux Team <linux@simtec.co.uk>
W: http://www.simtec.co.uk/products/EB110ATX/
S: Supported
SIMTEC EB2410ITX (BAST)
P: Ben Dooks
-M: Vincent Sanders <support@simtec.co.uk>
+P: Vincent Sanders <vince@simtec.co.uk>
+M: Simtec Linux Team <linux@simtec.co.uk>
W: http://www.simtec.co.uk/products/EB2410ITX/
S: Supported
-F: arch/arm/mach-s3c2410/
-F: drivers/*/*s3c2410*
-F: drivers/*/*/*s3c2410*
+F: arch/arm/mach-s3c2410/mach-bast.c
+F: arch/arm/mach-s3c2410/bast-ide.c
+F: arch/arm/mach-s3c2410/bast-irq.c
TI DAVINCI MACHINE SUPPORT
M: Kevin Hilman <khilman@deeprootsystems.com>
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index ff19efdf6fef..636bcb81d068 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -406,7 +406,7 @@ config QDIO
If unsure, say Y.
config CHSC_SCH
- def_tristate y
+ def_tristate m
prompt "Support for CHSC subchannels"
help
This driver allows usage of CHSC subchannels. A CHSC subchannel
diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h
index 405cc97c6249..7e1f77620624 100644
--- a/arch/s390/include/asm/cacheflush.h
+++ b/arch/s390/include/asm/cacheflush.h
@@ -1,29 +1,8 @@
#ifndef _S390_CACHEFLUSH_H
#define _S390_CACHEFLUSH_H
-/* Keep includes the same across arches. */
-#include <linux/mm.h>
-
/* Caches aren't brain-dead on the s390. */
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_dup_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page) do { } while (0)
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-#define flush_icache_range(start, end) do { } while (0)
-#define flush_icache_page(vma,pg) do { } while (0)
-#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
-#define flush_cache_vmap(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+#include <asm-generic/cacheflush.h>
#ifdef CONFIG_DEBUG_PAGEALLOC
void kernel_map_pages(struct page *page, int numpages, int enable);
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index f1f644f2240a..9074a54c4d10 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -22,6 +22,7 @@
*/
#include <linux/mm.h>
+#include <linux/pagemap.h>
#include <linux/swap.h>
#include <asm/processor.h>
#include <asm/pgalloc.h>
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
index 07deaeee14c8..a6c4f7ed24a4 100644
--- a/arch/s390/lib/uaccess_std.c
+++ b/arch/s390/lib/uaccess_std.c
@@ -125,9 +125,9 @@ static size_t copy_in_user_std(size_t size, void __user *to,
unsigned long tmp1;
asm volatile(
+ " sacf 256\n"
" "AHI" %0,-1\n"
" jo 5f\n"
- " sacf 256\n"
" bras %3,3f\n"
"0:"AHI" %0,257\n"
"1: mvc 0(1,%1),0(%2)\n"
@@ -142,9 +142,8 @@ static size_t copy_in_user_std(size_t size, void __user *to,
"3:"AHI" %0,-256\n"
" jnm 2b\n"
"4: ex %0,1b-0b(%3)\n"
- " sacf 0\n"
"5: "SLR" %0,%0\n"
- "6:\n"
+ "6: sacf 0\n"
EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
: "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
: : "cc", "memory");
@@ -156,9 +155,9 @@ static size_t clear_user_std(size_t size, void __user *to)
unsigned long tmp1, tmp2;
asm volatile(
+ " sacf 256\n"
" "AHI" %0,-1\n"
" jo 5f\n"
- " sacf 256\n"
" bras %3,3f\n"
" xc 0(1,%1),0(%1)\n"
"0:"AHI" %0,257\n"
@@ -178,9 +177,8 @@ static size_t clear_user_std(size_t size, void __user *to)
"3:"AHI" %0,-256\n"
" jnm 2b\n"
"4: ex %0,0(%3)\n"
- " sacf 0\n"
"5: "SLR" %0,%0\n"
- "6:\n"
+ "6: sacf 0\n"
EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
: "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
: : "cc", "memory");
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 0c719c61972e..e1850c28cd68 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -336,7 +336,8 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
page->flags ^= bits;
if (page->flags & FRAG_MASK) {
/* Page now has some free pgtable fragments. */
- list_move(&page->lru, &mm->context.pgtable_list);
+ if (!list_empty(&page->lru))
+ list_move(&page->lru, &mm->context.pgtable_list);
page = NULL;
} else
/* All fragments of the 4K page have been freed. */
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index e56b9bfbabd1..f7a0993c1e7c 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -682,7 +682,7 @@ static int p4_validate_raw_event(struct perf_event *event)
* if an event is shared accross the logical threads
* the user needs special permissions to be able to use it
*/
- if (p4_event_bind_map[v].shared) {
+ if (p4_ht_active() && p4_event_bind_map[v].shared) {
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
return -EACCES;
}
@@ -727,7 +727,8 @@ static int p4_hw_config(struct perf_event *event)
event->hw.config = p4_set_ht_bit(event->hw.config);
if (event->attr.type == PERF_TYPE_RAW) {
-
+ struct p4_event_bind *bind;
+ unsigned int esel;
/*
* Clear bits we reserve to be managed by kernel itself
* and never allowed from a user space
@@ -743,6 +744,13 @@ static int p4_hw_config(struct perf_event *event)
* bits since we keep additional info here (for cache events and etc)
*/
event->hw.config |= event->attr.config;
+ bind = p4_config_get_bind(event->attr.config);
+ if (!bind) {
+ rc = -EINVAL;
+ goto out;
+ }
+ esel = P4_OPCODE_ESEL(bind->opcode);
+ event->hw.config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel));
}
rc = x86_setup_perfctr(event);
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index f011c5d9dea1..1c5cc65ea1e1 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -1,4 +1,4 @@
-/* ir-lirc-codec.c - ir-core to classic lirc interface bridge
+/* ir-lirc-codec.c - rc-core to classic lirc interface bridge
*
* Copyright (C) 2010 by Jarod Wilson <jarod@redhat.com>
*
@@ -47,6 +47,7 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
/* Carrier reports */
if (ev.carrier_report) {
sample = LIRC_FREQUENCY(ev.carrier);
+ IR_dprintk(2, "carrier report (freq: %d)\n", sample);
/* Packet end */
} else if (ev.timeout) {
@@ -62,6 +63,7 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
return 0;
sample = LIRC_TIMEOUT(ev.duration / 1000);
+ IR_dprintk(2, "timeout report (duration: %d)\n", sample);
/* Normal sample */
} else {
@@ -85,6 +87,8 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
sample = ev.pulse ? LIRC_PULSE(ev.duration / 1000) :
LIRC_SPACE(ev.duration / 1000);
+ IR_dprintk(2, "delivering %uus %s to lirc_dev\n",
+ TO_US(ev.duration), TO_STR(ev.pulse));
}
lirc_buffer_write(dev->raw->lirc.drv->rbuf,
diff --git a/drivers/media/rc/keymaps/rc-rc6-mce.c b/drivers/media/rc/keymaps/rc-rc6-mce.c
index 3bf3337875d1..2f5dc0622b94 100644
--- a/drivers/media/rc/keymaps/rc-rc6-mce.c
+++ b/drivers/media/rc/keymaps/rc-rc6-mce.c
@@ -3,6 +3,9 @@
*
* Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com>
*
+ * See http://mediacenterguides.com/book/export/html/31 for details on
+ * key mappings.
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -60,6 +63,9 @@ static struct rc_map_table rc6_mce[] = {
{ 0x800f0426, KEY_EPG }, /* Guide */
{ 0x800f0427, KEY_ZOOM }, /* Aspect */
+ { 0x800f0432, KEY_MODE }, /* Visualization */
+ { 0x800f0433, KEY_PRESENTATION }, /* Slide Show */
+ { 0x800f0434, KEY_EJECTCD },
{ 0x800f043a, KEY_BRIGHTNESSUP },
{ 0x800f0446, KEY_TV },
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 079353e5d558..6df0a4980645 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -816,7 +816,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
switch (ir->buf_in[index]) {
/* 2-byte return value commands */
case MCE_CMD_S_TIMEOUT:
- ir->rc->timeout = MS_TO_NS((hi << 8 | lo) / 2);
+ ir->rc->timeout = US_TO_NS((hi << 8 | lo) / 2);
break;
/* 1-byte return value commands */
@@ -855,9 +855,10 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
break;
case PARSE_IRDATA:
ir->rem--;
+ init_ir_raw_event(&rawir);
rawir.pulse = ((ir->buf_in[i] & MCE_PULSE_BIT) != 0);
rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK)
- * MS_TO_US(MCE_TIME_UNIT);
+ * US_TO_NS(MCE_TIME_UNIT);
dev_dbg(ir->dev, "Storing %s with duration %d\n",
rawir.pulse ? "pulse" : "space",
@@ -883,6 +884,8 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
i, ir->rem + 1, false);
if (ir->rem)
ir->parser_state = PARSE_IRDATA;
+ else
+ ir_raw_event_reset(ir->rc);
break;
}
@@ -1060,7 +1063,7 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
rc->priv = ir;
rc->driver_type = RC_DRIVER_IR_RAW;
rc->allowed_protos = RC_TYPE_ALL;
- rc->timeout = MS_TO_NS(1000);
+ rc->timeout = US_TO_NS(1000);
if (!ir->flags.no_tx) {
rc->s_tx_mask = mceusb_set_tx_mask;
rc->s_tx_carrier = mceusb_set_tx_carrier;
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index dd4caf8ef80b..273d9d674792 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -460,7 +460,7 @@ static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
return 0;
}
- carrier = (count * 1000000) / duration;
+ carrier = MS_TO_NS(count) / duration;
if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER))
nvt_dbg("WTF? Carrier frequency out of range!");
@@ -612,8 +612,8 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
sample = nvt->buf[i];
rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
- rawir.duration = (sample & BUF_LEN_MASK)
- * SAMPLE_PERIOD * 1000;
+ rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
+ * SAMPLE_PERIOD);
if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) {
if (nvt->rawir.pulse == rawir.pulse)
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index 6e2911c2abfb..e435d94c0776 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -164,7 +164,7 @@ static void sz_push_full_pulse(struct streamzap_ir *sz,
sz->signal_start.tv_usec -
sz->signal_last.tv_usec);
rawir.duration -= sz->sum;
- rawir.duration *= 1000;
+ rawir.duration = US_TO_NS(rawir.duration);
rawir.duration &= IR_MAX_DURATION;
}
sz_push(sz, rawir);
@@ -177,7 +177,7 @@ static void sz_push_full_pulse(struct streamzap_ir *sz,
rawir.duration = ((int) value) * SZ_RESOLUTION;
rawir.duration += SZ_RESOLUTION / 2;
sz->sum += rawir.duration;
- rawir.duration *= 1000;
+ rawir.duration = US_TO_NS(rawir.duration);
rawir.duration &= IR_MAX_DURATION;
sz_push(sz, rawir);
}
@@ -197,7 +197,7 @@ static void sz_push_full_space(struct streamzap_ir *sz,
rawir.duration = ((int) value) * SZ_RESOLUTION;
rawir.duration += SZ_RESOLUTION / 2;
sz->sum += rawir.duration;
- rawir.duration *= 1000;
+ rawir.duration = US_TO_NS(rawir.duration);
sz_push(sz, rawir);
}
@@ -273,6 +273,7 @@ static void streamzap_callback(struct urb *urb)
if (sz->timeout_enabled)
sz_push(sz, rawir);
ir_raw_event_handle(sz->rdev);
+ ir_raw_event_reset(sz->rdev);
} else {
sz_push_full_space(sz, sz->buf_in[i]);
}
@@ -290,6 +291,7 @@ static void streamzap_callback(struct urb *urb)
}
}
+ ir_raw_event_handle(sz->rdev);
usb_submit_urb(urb, GFP_ATOMIC);
return;
@@ -430,13 +432,13 @@ static int __devinit streamzap_probe(struct usb_interface *intf,
sz->decoder_state = PulseSpace;
/* FIXME: don't yet have a way to set this */
sz->timeout_enabled = true;
- sz->rdev->timeout = (((SZ_TIMEOUT * SZ_RESOLUTION * 1000) &
+ sz->rdev->timeout = ((US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION) &
IR_MAX_DURATION) | 0x03000000);
#if 0
/* not yet supported, depends on patches from maxim */
/* see also: LIRC_GET_REC_RESOLUTION and LIRC_SET_REC_TIMEOUT */
- sz->min_timeout = SZ_TIMEOUT * SZ_RESOLUTION * 1000;
- sz->max_timeout = SZ_TIMEOUT * SZ_RESOLUTION * 1000;
+ sz->min_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION);
+ sz->max_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION);
#endif
do_gettimeofday(&sz->signal_start);
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 865216e9362c..47236a58bf33 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -5793,7 +5793,7 @@ static void usb_exchange(struct gspca_dev *gspca_dev,
break;
default:
/* case 0xdd: * delay */
- msleep(action->val / 64 + 10);
+ msleep(action->idx);
break;
}
action++;
@@ -5830,7 +5830,7 @@ static void setmatrix(struct gspca_dev *gspca_dev)
[SENSOR_GC0305] = gc0305_matrix,
[SENSOR_HDCS2020b] = NULL,
[SENSOR_HV7131B] = NULL,
- [SENSOR_HV7131R] = NULL,
+ [SENSOR_HV7131R] = po2030_matrix,
[SENSOR_ICM105A] = po2030_matrix,
[SENSOR_MC501CB] = NULL,
[SENSOR_MT9V111_1] = gc0305_matrix,
@@ -5936,6 +5936,7 @@ static void setquality(struct gspca_dev *gspca_dev)
case SENSOR_ADCM2700:
case SENSOR_GC0305:
case SENSOR_HV7131B:
+ case SENSOR_HV7131R:
case SENSOR_OV7620:
case SENSOR_PAS202B:
case SENSOR_PO2030:
@@ -6108,11 +6109,13 @@ static void send_unknown(struct gspca_dev *gspca_dev, int sensor)
reg_w(gspca_dev, 0x02, 0x003b);
reg_w(gspca_dev, 0x00, 0x0038);
break;
+ case SENSOR_HV7131R:
case SENSOR_PAS202B:
reg_w(gspca_dev, 0x03, 0x003b);
reg_w(gspca_dev, 0x0c, 0x003a);
reg_w(gspca_dev, 0x0b, 0x0039);
- reg_w(gspca_dev, 0x0b, 0x0038);
+ if (sensor == SENSOR_PAS202B)
+ reg_w(gspca_dev, 0x0b, 0x0038);
break;
}
}
@@ -6704,10 +6707,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x02, 0x003b);
reg_w(gspca_dev, 0x00, 0x0038);
break;
+ case SENSOR_HV7131R:
case SENSOR_PAS202B:
reg_w(gspca_dev, 0x03, 0x003b);
reg_w(gspca_dev, 0x0c, 0x003a);
reg_w(gspca_dev, 0x0b, 0x0039);
+ if (sd->sensor == SENSOR_HV7131R)
+ reg_w(gspca_dev, 0x50, ZC3XX_R11D_GLOBALGAIN);
break;
}
@@ -6720,6 +6726,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
break;
case SENSOR_PAS202B:
case SENSOR_GC0305:
+ case SENSOR_HV7131R:
case SENSOR_TAS5130C:
reg_r(gspca_dev, 0x0008);
/* fall thru */
@@ -6760,6 +6767,12 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* ms-win + */
reg_w(gspca_dev, 0x40, 0x0117);
break;
+ case SENSOR_HV7131R:
+ i2c_write(gspca_dev, 0x25, 0x04, 0x00); /* exposure */
+ i2c_write(gspca_dev, 0x26, 0x93, 0x00);
+ i2c_write(gspca_dev, 0x27, 0xe0, 0x00);
+ reg_w(gspca_dev, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN);
+ break;
case SENSOR_GC0305:
case SENSOR_TAS5130C:
reg_w(gspca_dev, 0x09, 0x01ad); /* (from win traces) */
@@ -6808,9 +6821,17 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
{
struct sd *sd = (struct sd *) gspca_dev;
- if (data[0] == 0xff && data[1] == 0xd8) { /* start of frame */
+ /* check the JPEG end of frame */
+ if (len >= 3
+ && data[len - 3] == 0xff && data[len - 2] == 0xd9) {
+/*fixme: what does the last byte mean?*/
gspca_frame_add(gspca_dev, LAST_PACKET,
- NULL, 0);
+ data, len - 1);
+ return;
+ }
+
+ /* check the JPEG start of a frame */
+ if (data[0] == 0xff && data[1] == 0xd8) {
/* put the JPEG header in the new frame */
gspca_frame_add(gspca_dev, FIRST_PACKET,
sd->jpeg_hdr, JPEG_HDR_SZ);
diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c
index a6572e5ae369..a27d93b503a5 100644
--- a/drivers/media/video/hdpvr/hdpvr-core.c
+++ b/drivers/media/video/hdpvr/hdpvr-core.c
@@ -283,6 +283,7 @@ static int hdpvr_probe(struct usb_interface *interface,
struct hdpvr_device *dev;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
+ struct i2c_client *client;
size_t buffer_size;
int i;
int retval = -ENOMEM;
@@ -381,13 +382,21 @@ static int hdpvr_probe(struct usb_interface *interface,
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
retval = hdpvr_register_i2c_adapter(dev);
if (retval < 0) {
- v4l2_err(&dev->v4l2_dev, "registering i2c adapter failed\n");
+ v4l2_err(&dev->v4l2_dev, "i2c adapter register failed\n");
goto error;
}
- retval = hdpvr_register_i2c_ir(dev);
- if (retval < 0)
- v4l2_err(&dev->v4l2_dev, "registering i2c IR devices failed\n");
+ client = hdpvr_register_ir_rx_i2c(dev);
+ if (!client) {
+ v4l2_err(&dev->v4l2_dev, "i2c IR RX device register failed\n");
+ goto reg_fail;
+ }
+
+ client = hdpvr_register_ir_tx_i2c(dev);
+ if (!client) {
+ v4l2_err(&dev->v4l2_dev, "i2c IR TX device register failed\n");
+ goto reg_fail;
+ }
#endif
/* let the user know what node this device is now attached to */
@@ -395,6 +404,10 @@ static int hdpvr_probe(struct usb_interface *interface,
video_device_node_name(dev->video_dev));
return 0;
+reg_fail:
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ i2c_del_adapter(&dev->i2c_adapter);
+#endif
error:
if (dev) {
/* Destroy single thread */
@@ -424,6 +437,9 @@ static void hdpvr_disconnect(struct usb_interface *interface)
mutex_lock(&dev->io_mutex);
hdpvr_cancel_queue(dev);
mutex_unlock(&dev->io_mutex);
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ i2c_del_adapter(&dev->i2c_adapter);
+#endif
video_unregister_device(dev->video_dev);
atomic_dec(&dev_nr);
}
diff --git a/drivers/media/video/hdpvr/hdpvr-i2c.c b/drivers/media/video/hdpvr/hdpvr-i2c.c
index 89b71faeaac2..e53fa55d56a1 100644
--- a/drivers/media/video/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/video/hdpvr/hdpvr-i2c.c
@@ -31,26 +31,34 @@
#define Z8F0811_IR_RX_I2C_ADDR 0x71
-static struct i2c_board_info hdpvr_i2c_board_info = {
- I2C_BOARD_INFO("ir_tx_z8f0811_hdpvr", Z8F0811_IR_TX_I2C_ADDR),
- I2C_BOARD_INFO("ir_rx_z8f0811_hdpvr", Z8F0811_IR_RX_I2C_ADDR),
-};
+struct i2c_client *hdpvr_register_ir_tx_i2c(struct hdpvr_device *dev)
+{
+ struct IR_i2c_init_data *init_data = &dev->ir_i2c_init_data;
+ struct i2c_board_info hdpvr_ir_tx_i2c_board_info = {
+ I2C_BOARD_INFO("ir_tx_z8f0811_hdpvr", Z8F0811_IR_TX_I2C_ADDR),
+ };
+
+ init_data->name = "HD-PVR";
+ hdpvr_ir_tx_i2c_board_info.platform_data = init_data;
-int hdpvr_register_i2c_ir(struct hdpvr_device *dev)
+ return i2c_new_device(&dev->i2c_adapter, &hdpvr_ir_tx_i2c_board_info);
+}
+
+struct i2c_client *hdpvr_register_ir_rx_i2c(struct hdpvr_device *dev)
{
- struct i2c_client *c;
struct IR_i2c_init_data *init_data = &dev->ir_i2c_init_data;
+ struct i2c_board_info hdpvr_ir_rx_i2c_board_info = {
+ I2C_BOARD_INFO("ir_rx_z8f0811_hdpvr", Z8F0811_IR_RX_I2C_ADDR),
+ };
/* Our default information for ir-kbd-i2c.c to use */
init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
init_data->type = RC_TYPE_RC5;
- init_data->name = "HD PVR";
- hdpvr_i2c_board_info.platform_data = init_data;
-
- c = i2c_new_device(&dev->i2c_adapter, &hdpvr_i2c_board_info);
+ init_data->name = "HD-PVR";
+ hdpvr_ir_rx_i2c_board_info.platform_data = init_data;
- return (c == NULL) ? -ENODEV : 0;
+ return i2c_new_device(&dev->i2c_adapter, &hdpvr_ir_rx_i2c_board_info);
}
static int hdpvr_i2c_read(struct hdpvr_device *dev, int bus,
diff --git a/drivers/media/video/hdpvr/hdpvr.h b/drivers/media/video/hdpvr/hdpvr.h
index ee74e3be9a6a..072f23c570f3 100644
--- a/drivers/media/video/hdpvr/hdpvr.h
+++ b/drivers/media/video/hdpvr/hdpvr.h
@@ -313,7 +313,8 @@ int hdpvr_cancel_queue(struct hdpvr_device *dev);
/* i2c adapter registration */
int hdpvr_register_i2c_adapter(struct hdpvr_device *dev);
-int hdpvr_register_i2c_ir(struct hdpvr_device *dev);
+struct i2c_client *hdpvr_register_ir_rx_i2c(struct hdpvr_device *dev);
+struct i2c_client *hdpvr_register_ir_tx_i2c(struct hdpvr_device *dev);
/*========================================================================*/
/* buffer management */
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index d2b20ad383a3..a221ad68b330 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -128,6 +128,19 @@ static int get_key_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
static int get_key_haup_xvr(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
{
+ int ret;
+ unsigned char buf[1] = { 0 };
+
+ /*
+ * This is the same apparent "are you ready?" poll command observed
+ * watching Windows driver traffic and implemented in lirc_zilog. With
+ * this added, we get far saner remote behavior with z8 chips on usb
+ * connected devices, even with the default polling interval of 100ms.
+ */
+ ret = i2c_master_send(ir->c, buf, 1);
+ if (ret != 1)
+ return (ret < 0) ? ret : -EINVAL;
+
return get_key_haup_common (ir, ir_key, ir_raw, 6, 3);
}
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
index ccc884948f34..451ecd485f97 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
@@ -597,7 +597,6 @@ static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw)
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
init_data->type = RC_TYPE_RC5;
init_data->name = hdw->hdw_desc->description;
- init_data->polling_interval = 260; /* ms From lirc_zilog */
/* IR Receiver */
info.addr = 0x71;
info.platform_data = init_data;
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index f35459d1f42f..0db90922ee93 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -1565,7 +1565,7 @@ static int saa711x_probe(struct i2c_client *client,
chip_id = name[5];
/* Check whether this chip is part of the saa711x series */
- if (memcmp(name, "1f711", 5)) {
+ if (memcmp(name + 1, "f711", 4)) {
v4l_dbg(1, debug, client, "chip found @ 0x%x (ID %s) does not match a known saa711x chip.\n",
client->addr << 1, name);
return -ENODEV;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 4155805dcdff..2b771f18d1ad 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -319,6 +319,9 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
private = (struct dasd_eckd_private *) device->private;
lcu = private->lcu;
+ /* nothing to do if already disconnected */
+ if (!lcu)
+ return;
device->discipline->get_uid(device, &uid);
spin_lock_irqsave(&lcu->lock, flags);
list_del_init(&device->alias_list);
@@ -680,6 +683,9 @@ int dasd_alias_remove_device(struct dasd_device *device)
private = (struct dasd_eckd_private *) device->private;
lcu = private->lcu;
+ /* nothing to do if already removed */
+ if (!lcu)
+ return 0;
spin_lock_irqsave(&lcu->lock, flags);
_remove_device_from_lcu(lcu, device);
spin_unlock_irqrestore(&lcu->lock, flags);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index e9fff2b9bce2..5640c89cd9de 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -476,7 +476,7 @@ static inline void inbound_primed(struct qdio_q *q, int count)
static int get_inbound_buffer_frontier(struct qdio_q *q)
{
int count, stop;
- unsigned char state;
+ unsigned char state = 0;
/*
* Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -643,7 +643,7 @@ void qdio_inbound_processing(unsigned long data)
static int get_outbound_buffer_frontier(struct qdio_q *q)
{
int count, stop;
- unsigned char state;
+ unsigned char state = 0;
if (need_siga_sync(q))
if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
diff --git a/drivers/staging/lirc/lirc_zilog.c b/drivers/staging/lirc/lirc_zilog.c
index 3fe5f4160194..0aad0d7a74a3 100644
--- a/drivers/staging/lirc/lirc_zilog.c
+++ b/drivers/staging/lirc/lirc_zilog.c
@@ -495,7 +495,7 @@ static int send_data_block(struct IR_tx *tx, unsigned char *data_block)
/* send boot data to the IR TX device */
static int send_boot_data(struct IR_tx *tx)
{
- int ret;
+ int ret, i;
unsigned char buf[4];
/* send the boot block */
@@ -503,7 +503,7 @@ static int send_boot_data(struct IR_tx *tx)
if (ret != 0)
return ret;
- /* kick it off? */
+ /* Hit the go button to activate the new boot data */
buf[0] = 0x00;
buf[1] = 0x20;
ret = i2c_master_send(tx->c, buf, 2);
@@ -511,7 +511,19 @@ static int send_boot_data(struct IR_tx *tx)
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
- ret = i2c_master_send(tx->c, buf, 1);
+
+ /*
+ * Wait for zilog to settle after hitting go post boot block upload.
+ * Without this delay, the HD-PVR and HVR-1950 both return an -EIO
+ * upon attempting to get firmware revision, and tx probe thus fails.
+ */
+ for (i = 0; i < 10; i++) {
+ ret = i2c_master_send(tx->c, buf, 1);
+ if (ret == 1)
+ break;
+ udelay(100);
+ }
+
if (ret != 1) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
@@ -523,8 +535,8 @@ static int send_boot_data(struct IR_tx *tx)
zilog_error("i2c_master_recv failed with %d\n", ret);
return 0;
}
- if (buf[0] != 0x80) {
- zilog_error("unexpected IR TX response: %02x\n", buf[0]);
+ if ((buf[0] != 0x80) && (buf[0] != 0xa0)) {
+ zilog_error("unexpected IR TX init response: %02x\n", buf[0]);
return 0;
}
zilog_notify("Zilog/Hauppauge IR blaster firmware version "
@@ -827,7 +839,15 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
- ret = i2c_master_send(tx->c, buf, 1);
+
+ /* Give the z8 a moment to process data block */
+ for (i = 0; i < 10; i++) {
+ ret = i2c_master_send(tx->c, buf, 1);
+ if (ret == 1)
+ break;
+ udelay(100);
+ }
+
if (ret != 1) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index cc8a9b7d6064..267d0ada4541 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1114,6 +1114,17 @@ static int ep_send_events(struct eventpoll *ep,
return ep_scan_ready_list(ep, ep_send_events_proc, &esed);
}
+static inline struct timespec ep_set_mstimeout(long ms)
+{
+ struct timespec now, ts = {
+ .tv_sec = ms / MSEC_PER_SEC,
+ .tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC),
+ };
+
+ ktime_get_ts(&now);
+ return timespec_add_safe(now, ts);
+}
+
static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
int maxevents, long timeout)
{
@@ -1121,12 +1132,11 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
unsigned long flags;
long slack;
wait_queue_t wait;
- struct timespec end_time;
ktime_t expires, *to = NULL;
if (timeout > 0) {
- ktime_get_ts(&end_time);
- timespec_add_ns(&end_time, (u64)timeout * NSEC_PER_MSEC);
+ struct timespec end_time = ep_set_mstimeout(timeout);
+
slack = select_estimate_accuracy(&end_time);
to = &expires;
*to = timespec_to_ktime(end_time);
diff --git a/fs/exec.c b/fs/exec.c
index c62efcb959c7..52a447d9b6ab 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -120,7 +120,7 @@ SYSCALL_DEFINE1(uselib, const char __user *, library)
goto out;
file = do_filp_open(AT_FDCWD, tmp,
- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
+ O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 0,
MAY_READ | MAY_EXEC | MAY_OPEN);
putname(tmp);
error = PTR_ERR(file);
@@ -723,7 +723,7 @@ struct file *open_exec(const char *name)
int err;
file = do_filp_open(AT_FDCWD, name,
- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
+ O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 0,
MAY_EXEC | MAY_OPEN);
if (IS_ERR(file))
goto out;
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 42685424817b..a7555238c41a 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -1030,7 +1030,6 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
}
- inode->i_mapping->backing_dev_info = sb->s_bdi;
if (S_ISREG(inode->i_mode)) {
inode->i_op = &exofs_file_inode_operations;
inode->i_fop = &exofs_file_operations;
@@ -1131,7 +1130,6 @@ struct inode *exofs_new_inode(struct inode *dir, int mode)
sbi = sb->s_fs_info;
- inode->i_mapping->backing_dev_info = sb->s_bdi;
sb->s_dirt = 1;
inode_init_owner(inode, dir, mode);
inode->i_ino = sbi->s_nextid++;
diff --git a/fs/fcntl.c b/fs/fcntl.c
index ecc8b3954ed6..cb1026181bdc 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -815,7 +815,7 @@ static int __init fcntl_init(void)
__O_SYNC | O_DSYNC | FASYNC |
O_DIRECT | O_LARGEFILE | O_DIRECTORY |
O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
- FMODE_EXEC
+ __FMODE_EXEC
));
fasync_cache = kmem_cache_create("fasync_cache",
diff --git a/fs/ioctl.c b/fs/ioctl.c
index a59635e295fa..1eebeb72b202 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -273,6 +273,13 @@ int __generic_block_fiemap(struct inode *inode,
len = isize;
}
+ /*
+ * Some filesystems can't deal with being asked to map less than
+ * blocksize, so make sure our len is at least block length.
+ */
+ if (logical_to_blk(inode, len) == 0)
+ len = blk_to_logical(inode, 1);
+
start_blk = logical_to_blk(inode, start);
last_blk = logical_to_blk(inode, start + len - 1);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 32b38cd829d3..bd3215940c37 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2555,9 +2555,12 @@ int proc_nr_inodes(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
int __init get_filesystem_list(char *buf);
+#define __FMODE_EXEC ((__force int) FMODE_EXEC)
+#define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY)
+
#define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE])
#define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \
- (flag & FMODE_NONOTIFY)))
+ (flag & __FMODE_NONOTIFY)))
#endif /* __KERNEL__ */
#endif /* _LINUX_FS_H */
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index fcb9884df618..a5930cb66145 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -182,6 +182,26 @@ static inline bool res_counter_check_under_limit(struct res_counter *cnt)
return ret;
}
+/**
+ * res_counter_check_margin - check if the counter allows charging
+ * @cnt: the resource counter to check
+ * @bytes: the number of bytes to check the remaining space against
+ *
+ * Returns a boolean value on whether the counter can be charged
+ * @bytes or whether this would exceed the limit.
+ */
+static inline bool res_counter_check_margin(struct res_counter *cnt,
+ unsigned long bytes)
+{
+ bool ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cnt->lock, flags);
+ ret = cnt->limit - cnt->usage >= bytes;
+ spin_unlock_irqrestore(&cnt->lock, flags);
+ return ret;
+}
+
static inline bool res_counter_check_under_soft_limit(struct res_counter *cnt)
{
bool ret;
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 126a302c481c..999835b6112b 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1901,11 +1901,12 @@ static void __perf_event_read(void *info)
return;
raw_spin_lock(&ctx->lock);
- update_context_time(ctx);
+ if (ctx->is_active)
+ update_context_time(ctx);
update_event_times(event);
+ if (event->state == PERF_EVENT_STATE_ACTIVE)
+ event->pmu->read(event);
raw_spin_unlock(&ctx->lock);
-
- event->pmu->read(event);
}
static inline u64 perf_event_count(struct perf_event *event)
@@ -1999,8 +2000,7 @@ static int alloc_callchain_buffers(void)
* accessed from NMI. Use a temporary manual per cpu allocation
* until that gets sorted out.
*/
- size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
- num_possible_cpus();
+ size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
entries = kzalloc(size, GFP_KERNEL);
if (!entries)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index c914ec747ca6..ad6267714c84 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -625,7 +625,7 @@ static void update_curr_rt(struct rq *rq)
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
u64 delta_exec;
- if (!task_has_rt_policy(curr))
+ if (curr->sched_class != &rt_sched_class)
return;
delta_exec = rq->clock_task - curr->se.exec_start;
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index d7ebdf4cea98..f37f974aa81b 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -27,7 +27,7 @@
#include <asm/irq_regs.h>
#include <linux/perf_event.h>
-int watchdog_enabled;
+int watchdog_enabled = 1;
int __read_mostly softlockup_thresh = 60;
static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
@@ -43,9 +43,6 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
#endif
-static int no_watchdog;
-
-
/* boot commands */
/*
* Should we panic when a soft-lockup or hard-lockup occurs:
@@ -58,7 +55,7 @@ static int __init hardlockup_panic_setup(char *str)
if (!strncmp(str, "panic", 5))
hardlockup_panic = 1;
else if (!strncmp(str, "0", 1))
- no_watchdog = 1;
+ watchdog_enabled = 0;
return 1;
}
__setup("nmi_watchdog=", hardlockup_panic_setup);
@@ -77,7 +74,7 @@ __setup("softlockup_panic=", softlockup_panic_setup);
static int __init nowatchdog_setup(char *str)
{
- no_watchdog = 1;
+ watchdog_enabled = 0;
return 1;
}
__setup("nowatchdog", nowatchdog_setup);
@@ -85,7 +82,7 @@ __setup("nowatchdog", nowatchdog_setup);
/* deprecated */
static int __init nosoftlockup_setup(char *str)
{
- no_watchdog = 1;
+ watchdog_enabled = 0;
return 1;
}
__setup("nosoftlockup", nosoftlockup_setup);
@@ -432,9 +429,6 @@ static int watchdog_enable(int cpu)
wake_up_process(p);
}
- /* if any cpu succeeds, watchdog is considered enabled for the system */
- watchdog_enabled = 1;
-
return 0;
}
@@ -462,12 +456,16 @@ static void watchdog_disable(int cpu)
static void watchdog_enable_all_cpus(void)
{
int cpu;
- int result = 0;
+
+ watchdog_enabled = 0;
for_each_online_cpu(cpu)
- result += watchdog_enable(cpu);
+ if (!watchdog_enable(cpu))
+ /* if any cpu succeeds, watchdog is considered
+ enabled for the system */
+ watchdog_enabled = 1;
- if (result)
+ if (!watchdog_enabled)
printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n");
}
@@ -476,9 +474,6 @@ static void watchdog_disable_all_cpus(void)
{
int cpu;
- if (no_watchdog)
- return;
-
for_each_online_cpu(cpu)
watchdog_disable(cpu);
@@ -498,10 +493,12 @@ int proc_dowatchdog_enabled(struct ctl_table *table, int write,
{
proc_dointvec(table, write, buffer, length, ppos);
- if (watchdog_enabled)
- watchdog_enable_all_cpus();
- else
- watchdog_disable_all_cpus();
+ if (write) {
+ if (watchdog_enabled)
+ watchdog_enable_all_cpus();
+ else
+ watchdog_disable_all_cpus();
+ }
return 0;
}
@@ -530,7 +527,8 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- err = watchdog_enable(hotcpu);
+ if (watchdog_enabled)
+ err = watchdog_enable(hotcpu);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED:
@@ -555,9 +553,6 @@ void __init lockup_detector_init(void)
void *cpu = (void *)(long)smp_processor_id();
int err;
- if (no_watchdog)
- return;
-
err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
WARN_ON(notifier_to_errno(err));
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e187454d82f6..b6c1ce3c53b5 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1162,7 +1162,12 @@ static void __split_huge_page_refcount(struct page *page)
/* after clearing PageTail the gup refcount can be released */
smp_mb();
- page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
+ /*
+ * retain hwpoison flag of the poisoned tail page:
+ * fix for the unsuitable process killed on Guest Machine(KVM)
+ * by the memory-failure.
+ */
+ page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
page_tail->flags |= (page->flags &
((1L << PG_referenced) |
(1L << PG_swapbacked) |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 3878cfe399dc..da53a252b259 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -612,8 +612,10 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
/* pagein of a big page is an event. So, ignore page size */
if (nr_pages > 0)
__this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
- else
+ else {
__this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
+ nr_pages = -nr_pages; /* for event */
+ }
__this_cpu_add(mem->stat->count[MEM_CGROUP_EVENTS], nr_pages);
@@ -1111,6 +1113,23 @@ static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
return false;
}
+/**
+ * mem_cgroup_check_margin - check if the memory cgroup allows charging
+ * @mem: memory cgroup to check
+ * @bytes: the number of bytes the caller intends to charge
+ *
+ * Returns a boolean value on whether @mem can be charged @bytes or
+ * whether this would exceed the limit.
+ */
+static bool mem_cgroup_check_margin(struct mem_cgroup *mem, unsigned long bytes)
+{
+ if (!res_counter_check_margin(&mem->res, bytes))
+ return false;
+ if (do_swap_account && !res_counter_check_margin(&mem->memsw, bytes))
+ return false;
+ return true;
+}
+
static unsigned int get_swappiness(struct mem_cgroup *memcg)
{
struct cgroup *cgrp = memcg->css.cgroup;
@@ -1837,23 +1856,34 @@ static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
flags |= MEM_CGROUP_RECLAIM_NOSWAP;
} else
mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
-
- if (csize > PAGE_SIZE) /* change csize and retry */
+ /*
+ * csize can be either a huge page (HPAGE_SIZE), a batch of
+ * regular pages (CHARGE_SIZE), or a single regular page
+ * (PAGE_SIZE).
+ *
+ * Never reclaim on behalf of optional batching, retry with a
+ * single page instead.
+ */
+ if (csize == CHARGE_SIZE)
return CHARGE_RETRY;
if (!(gfp_mask & __GFP_WAIT))
return CHARGE_WOULDBLOCK;
ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
- gfp_mask, flags);
+ gfp_mask, flags);
+ if (mem_cgroup_check_margin(mem_over_limit, csize))
+ return CHARGE_RETRY;
/*
- * try_to_free_mem_cgroup_pages() might not give us a full
- * picture of reclaim. Some pages are reclaimed and might be
- * moved to swap cache or just unmapped from the cgroup.
- * Check the limit again to see if the reclaim reduced the
- * current usage of the cgroup before giving up
+ * Even though the limit is exceeded at this point, reclaim
+ * may have been able to free some pages. Retry the charge
+ * before killing the task.
+ *
+ * Only for regular pages, though: huge pages are rather
+ * unlikely to succeed so close to the limit, and we fall back
+ * to regular pages anyway in case of failure.
*/
- if (ret || mem_cgroup_check_under_limit(mem_over_limit))
+ if (csize == PAGE_SIZE && ret)
return CHARGE_RETRY;
/*
@@ -2323,13 +2353,19 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, enum charge_type ctype)
{
struct mem_cgroup *mem = NULL;
+ int page_size = PAGE_SIZE;
struct page_cgroup *pc;
+ bool oom = true;
int ret;
- int page_size = PAGE_SIZE;
if (PageTransHuge(page)) {
page_size <<= compound_order(page);
VM_BUG_ON(!PageTransHuge(page));
+ /*
+ * Never OOM-kill a process for a huge page. The
+ * fault handler will fall back to regular pages.
+ */
+ oom = false;
}
pc = lookup_page_cgroup(page);
@@ -2338,7 +2374,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
return 0;
prefetchw(pc);
- ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true, page_size);
+ ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, oom, page_size);
if (ret || !mem)
return ret;
@@ -5024,9 +5060,9 @@ struct cgroup_subsys mem_cgroup_subsys = {
static int __init enable_swap_account(char *s)
{
/* consider enabled if no parameter or 1 is given */
- if (!s || !strcmp(s, "1"))
+ if (!(*s) || !strcmp(s, "=1"))
really_do_swap_account = 1;
- else if (!strcmp(s, "0"))
+ else if (!strcmp(s, "=0"))
really_do_swap_account = 0;
return 1;
}
@@ -5034,7 +5070,8 @@ __setup("swapaccount", enable_swap_account);
static int __init disable_swap_account(char *s)
{
- enable_swap_account("0");
+ printk_once("noswapaccount is deprecated and will be removed in 2.6.40. Use swapaccount=0 instead\n");
+ enable_swap_account("=0");
return 1;
}
__setup("noswapaccount", disable_swap_account);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 548fbd70f026..0207c2f6f8bd 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -233,8 +233,8 @@ void shake_page(struct page *p, int access)
}
/*
- * Only all shrink_slab here (which would also
- * shrink other caches) if access is not potentially fatal.
+ * Only call shrink_slab here (which would also shrink other caches) if
+ * access is not potentially fatal.
*/
if (access) {
int nr;
@@ -386,8 +386,6 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
struct task_struct *tsk;
struct anon_vma *av;
- if (!PageHuge(page) && unlikely(split_huge_page(page)))
- return;
read_lock(&tasklist_lock);
av = page_lock_anon_vma(page);
if (av == NULL) /* Not actually mapped anymore */
@@ -856,6 +854,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
int ret;
int kill = 1;
struct page *hpage = compound_head(p);
+ struct page *ppage;
if (PageReserved(p) || PageSlab(p))
return SWAP_SUCCESS;
@@ -897,6 +896,44 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
}
/*
+ * ppage: poisoned page
+ * if p is regular page(4k page)
+ * ppage == real poisoned page;
+ * else p is hugetlb or THP, ppage == head page.
+ */
+ ppage = hpage;
+
+ if (PageTransHuge(hpage)) {
+ /*
+ * Verify that this isn't a hugetlbfs head page, the check for
+ * PageAnon is just for avoid tripping a split_huge_page
+ * internal debug check, as split_huge_page refuses to deal with
+ * anything that isn't an anon page. PageAnon can't go away fro
+ * under us because we hold a refcount on the hpage, without a
+ * refcount on the hpage. split_huge_page can't be safely called
+ * in the first place, having a refcount on the tail isn't
+ * enough * to be safe.
+ */
+ if (!PageHuge(hpage) && PageAnon(hpage)) {
+ if (unlikely(split_huge_page(hpage))) {
+ /*
+ * FIXME: if splitting THP is failed, it is
+ * better to stop the following operation rather
+ * than causing panic by unmapping. System might
+ * survive if the page is freed later.
+ */
+ printk(KERN_INFO
+ "MCE %#lx: failed to split THP\n", pfn);
+
+ BUG_ON(!PageHWPoison(p));
+ return SWAP_FAIL;
+ }
+ /* THP is split, so ppage should be the real poisoned page. */
+ ppage = p;
+ }
+ }
+
+ /*
* First collect all the processes that have the page
* mapped in dirty form. This has to be done before try_to_unmap,
* because ttu takes the rmap data structures down.
@@ -905,12 +942,18 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
* there's nothing that can be done.
*/
if (kill)
- collect_procs(hpage, &tokill);
+ collect_procs(ppage, &tokill);
+
+ if (hpage != ppage)
+ lock_page_nosync(ppage);
- ret = try_to_unmap(hpage, ttu);
+ ret = try_to_unmap(ppage, ttu);
if (ret != SWAP_SUCCESS)
printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
- pfn, page_mapcount(hpage));
+ pfn, page_mapcount(ppage));
+
+ if (hpage != ppage)
+ unlock_page(ppage);
/*
* Now that the dirty bit has been propagated to the
@@ -921,7 +964,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
* use a more force-full uncatchable kill to prevent
* any accesses to the poisoned memory.
*/
- kill_procs_ao(&tokill, !!PageDirty(hpage), trapno,
+ kill_procs_ao(&tokill, !!PageDirty(ppage), trapno,
ret != SWAP_SUCCESS, p, pfn);
return ret;
@@ -1022,19 +1065,22 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
* The check (unnecessarily) ignores LRU pages being isolated and
* walked by the page reclaim code, however that's not a big loss.
*/
- if (!PageLRU(p) && !PageHuge(p))
- shake_page(p, 0);
- if (!PageLRU(p) && !PageHuge(p)) {
- /*
- * shake_page could have turned it free.
- */
- if (is_free_buddy_page(p)) {
- action_result(pfn, "free buddy, 2nd try", DELAYED);
- return 0;
+ if (!PageHuge(p) && !PageTransCompound(p)) {
+ if (!PageLRU(p))
+ shake_page(p, 0);
+ if (!PageLRU(p)) {
+ /*
+ * shake_page could have turned it free.
+ */
+ if (is_free_buddy_page(p)) {
+ action_result(pfn, "free buddy, 2nd try",
+ DELAYED);
+ return 0;
+ }
+ action_result(pfn, "non LRU", IGNORED);
+ put_page(p);
+ return -EBUSY;
}
- action_result(pfn, "non LRU", IGNORED);
- put_page(p);
- return -EBUSY;
}
/*
@@ -1064,7 +1110,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
* For error on the tail page, we should set PG_hwpoison
* on the head page to show that the hugepage is hwpoisoned
*/
- if (PageTail(p) && TestSetPageHWPoison(hpage)) {
+ if (PageHuge(p) && PageTail(p) && TestSetPageHWPoison(hpage)) {
action_result(pfn, "hugepage already hardware poisoned",
IGNORED);
unlock_page(hpage);
@@ -1295,7 +1341,10 @@ static int soft_offline_huge_page(struct page *page, int flags)
ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0,
true);
if (ret) {
- putback_lru_pages(&pagelist);
+ struct page *page1, *page2;
+ list_for_each_entry_safe(page1, page2, &pagelist, lru)
+ put_page(page1);
+
pr_debug("soft offline: %#lx: migration failed %d, type %lx\n",
pfn, ret, page->flags);
if (ret > 0)
@@ -1419,6 +1468,7 @@ int soft_offline_page(struct page *page, int flags)
ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
0, true);
if (ret) {
+ putback_lru_pages(&pagelist);
pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
pfn, ret, page->flags);
if (ret > 0)
diff --git a/mm/migrate.c b/mm/migrate.c
index 9f29a3b7aac2..766115253807 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -772,6 +772,7 @@ uncharge:
unlock:
unlock_page(page);
+move_newpage:
if (rc != -EAGAIN) {
/*
* A page that has been migrated has all references
@@ -785,8 +786,6 @@ unlock:
putback_lru_page(page);
}
-move_newpage:
-
/*
* Move the new page to the LRU. If migration was not successful
* then this will free the page.
@@ -981,10 +980,6 @@ int migrate_huge_pages(struct list_head *from,
}
rc = 0;
out:
-
- list_for_each_entry_safe(page, page2, from, lru)
- put_page(page);
-
if (rc)
return rc;