summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/cpu.h25
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/fdtable.h3
-rw-r--r--include/linux/jbd2.h11
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/pci.h1
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--include/linux/sched.h53
-rw-r--r--include/linux/topology.h1
-rw-r--r--include/linux/vgaarb.h1
11 files changed, 75 insertions, 32 deletions
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index e287863ac053..de6b1722cdca 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -48,6 +48,31 @@ extern ssize_t arch_cpu_release(const char *, size_t);
#endif
struct notifier_block;
+/*
+ * CPU notifier priorities.
+ */
+enum {
+ /*
+ * SCHED_ACTIVE marks a cpu which is coming up active during
+ * CPU_ONLINE and CPU_DOWN_FAILED and must be the first
+ * notifier. CPUSET_ACTIVE adjusts cpuset according to
+ * cpu_active mask right after SCHED_ACTIVE. During
+ * CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are
+ * ordered in the similar way.
+ *
+ * This ordering guarantees consistent cpu_active mask and
+ * migration behavior to all cpu notifiers.
+ */
+ CPU_PRI_SCHED_ACTIVE = INT_MAX,
+ CPU_PRI_CPUSET_ACTIVE = INT_MAX - 1,
+ CPU_PRI_SCHED_INACTIVE = INT_MIN + 1,
+ CPU_PRI_CPUSET_INACTIVE = INT_MIN,
+
+ /* migration should happen before other stuff but after perf */
+ CPU_PRI_PERF = 20,
+ CPU_PRI_MIGRATION = 10,
+};
+
#ifdef CONFIG_SMP
/* Need to know about CPUs going up/down? */
#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 457ed765a116..f20eb8f16025 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -20,6 +20,7 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
+extern void cpuset_update_active_cpus(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern int cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
@@ -132,6 +133,11 @@ static inline void set_mems_allowed(nodemask_t nodemask)
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}
+static inline void cpuset_update_active_cpus(void)
+{
+ partition_sched_domains(1, NULL, NULL);
+}
+
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 8e5a9dfb76bf..e7445df44d6c 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -873,6 +873,8 @@ struct fb_info {
static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
struct apertures_struct *a = kzalloc(sizeof(struct apertures_struct)
+ max_num * sizeof(struct aperture), GFP_KERNEL);
+ if (!a)
+ return NULL;
a->count = max_num;
return a;
}
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 013dc529e95f..d147461bc271 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -61,7 +61,8 @@ struct files_struct {
(rcu_dereference_check((fdtfd), \
rcu_read_lock_held() || \
lockdep_is_held(&(files)->file_lock) || \
- atomic_read(&(files)->count) == 1))
+ atomic_read(&(files)->count) == 1 || \
+ rcu_my_thread_group_empty()))
#define files_fdtable(files) \
(rcu_dereference_check_fdtable((files), (files)->fdt))
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index a4d2e9f7088a..adf832dec3f3 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1026,11 +1026,12 @@ void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
struct jbd2_buffer_trigger_type {
/*
- * Fired just before a buffer is written to the journal.
- * mapped_data is a mapped buffer that is the frozen data for
- * commit.
+ * Fired a the moment data to write to the journal are known to be
+ * stable - so either at the moment b_frozen_data is created or just
+ * before a buffer is written to the journal. mapped_data is a mapped
+ * buffer that is the frozen data for commit.
*/
- void (*t_commit)(struct jbd2_buffer_trigger_type *type,
+ void (*t_frozen)(struct jbd2_buffer_trigger_type *type,
struct buffer_head *bh, void *mapped_data,
size_t size);
@@ -1042,7 +1043,7 @@ struct jbd2_buffer_trigger_type {
struct buffer_head *bh);
};
-extern void jbd2_buffer_commit_trigger(struct journal_head *jh,
+extern void jbd2_buffer_frozen_trigger(struct journal_head *jh,
void *mapped_data,
struct jbd2_buffer_trigger_type *triggers);
extern void jbd2_buffer_abort_trigger(struct journal_head *jh,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index b969efb03787..a2b48041b910 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -999,7 +999,7 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
* querying the cache size, so a fastpath for that case is appropriate.
*/
struct shrinker {
- int (*shrink)(int nr_to_scan, gfp_t gfp_mask);
+ int (*shrink)(struct shrinker *, int nr_to_scan, gfp_t gfp_mask);
int seeks; /* seeks to recreate an obj */
/* These are for internal use */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 7cb00845f150..f26fda76b87f 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -288,6 +288,7 @@ struct pci_dev {
*/
unsigned int irq;
struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
+ resource_size_t fw_addr[DEVICE_COUNT_RESOURCE]; /* FW-assigned addr */
/* These fields are used by common fixups */
unsigned int transparent:1; /* Transparent PCI bridge */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 5d0266d94985..469e03e96fe7 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1068,7 +1068,7 @@ static inline void perf_event_disable(struct perf_event *event) { }
#define perf_cpu_notifier(fn) \
do { \
static struct notifier_block fn##_nb __cpuinitdata = \
- { .notifier_call = fn, .priority = 20 }; \
+ { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
(void *)(unsigned long)smp_processor_id()); \
fn(&fn##_nb, (unsigned long)CPU_STARTING, \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6e0bb86de990..2091ea2a2c5c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -271,13 +271,10 @@ extern int runqueue_is_locked(int cpu);
extern cpumask_var_t nohz_cpu_mask;
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
-extern int select_nohz_load_balancer(int cpu);
-extern int get_nohz_load_balancer(void);
+extern void select_nohz_load_balancer(int stop_tick);
+extern int get_nohz_timer_target(void);
#else
-static inline int select_nohz_load_balancer(int cpu)
-{
- return 0;
-}
+static inline void select_nohz_load_balancer(int stop_tick) { }
#endif
/*
@@ -798,7 +795,7 @@ enum cpu_idle_type {
#define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */
#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
-
+#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
enum powersavings_balance_level {
@@ -833,6 +830,8 @@ static inline int sd_balance_for_package_power(void)
return SD_PREFER_SIBLING;
}
+extern int __weak arch_sd_sibiling_asym_packing(void);
+
/*
* Optimise SD flags for power savings:
* SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings.
@@ -854,7 +853,7 @@ struct sched_group {
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
* single CPU.
*/
- unsigned int cpu_power;
+ unsigned int cpu_power, cpu_power_orig;
/*
* The CPUs this group covers.
@@ -1690,6 +1689,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
+#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
@@ -1784,20 +1784,23 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
#endif
/*
- * Architectures can set this to 1 if they have specified
- * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
- * but then during bootup it turns out that sched_clock()
- * is reliable after all:
+ * Do not use outside of architecture code which knows its limitations.
+ *
+ * sched_clock() has no promise of monotonicity or bounded drift between
+ * CPUs, use (which you should not) requires disabling IRQs.
+ *
+ * Please use one of the three interfaces below.
*/
-#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
-extern int sched_clock_stable;
-#endif
-
-/* ftrace calls sched_clock() directly */
extern unsigned long long notrace sched_clock(void);
+/*
+ * See the comment in kernel/sched_clock.c
+ */
+extern u64 cpu_clock(int cpu);
+extern u64 local_clock(void);
+extern u64 sched_clock_cpu(int cpu);
+
extern void sched_clock_init(void);
-extern u64 sched_clock_cpu(int cpu);
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static inline void sched_clock_tick(void)
@@ -1812,17 +1815,19 @@ static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
{
}
#else
+/*
+ * Architectures can set this to 1 if they have specified
+ * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
+ * but then during bootup it turns out that sched_clock()
+ * is reliable after all:
+ */
+extern int sched_clock_stable;
+
extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
#endif
-/*
- * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
- * clock constructed from sched_clock():
- */
-extern unsigned long long cpu_clock(int cpu);
-
extern unsigned long long
task_sched_runtime(struct task_struct *task);
extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
diff --git a/include/linux/topology.h b/include/linux/topology.h
index c44df50a05ab..b572e432d2f3 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -103,6 +103,7 @@ int arch_update_cpu_topology(void);
| 1*SD_SHARE_PKG_RESOURCES \
| 0*SD_SERIALIZE \
| 0*SD_PREFER_SIBLING \
+ | arch_sd_sibling_asym_packing() \
, \
.last_balance = jiffies, \
.balance_interval = 1, \
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index c9a975976995..814f294d4cd0 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -29,6 +29,7 @@
*/
#ifndef LINUX_VGA_H
+#define LINUX_VGA_H
#include <asm/vga.h>