summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2014-01-25 09:16:14 +0100
committerIngo Molnar <mingo@kernel.org>2014-01-25 09:16:14 +0100
commit2b45e0f9f34f718725e093f4e335600811d7105a (patch)
tree3c6d594539eb16fc955906da65b9fa7aacbc9145 /include
parenta85eba8814631d0d48361c8b9a7ee0984e80c03c (diff)
parent15c81026204da897a05424c79263aea861a782cc (diff)
downloadlinux-2b45e0f9f34f718725e093f4e335600811d7105a.tar.gz
linux-2b45e0f9f34f718725e093f4e335600811d7105a.tar.bz2
linux-2b45e0f9f34f718725e093f4e335600811d7105a.zip
Merge branch 'linus' into x86/urgent
Merge in the x86 changes to apply a fix. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/barrier.h55
-rw-r--r--include/linux/bottom_half.h32
-rw-r--r--include/linux/compiler.h9
-rw-r--r--include/linux/context_tracking.h10
-rw-r--r--include/linux/context_tracking_state.h11
-rw-r--r--include/linux/crash_dump.h2
-rw-r--r--include/linux/edac.h28
-rw-r--r--include/linux/efi.h17
-rw-r--r--include/linux/hardirq.h1
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/init_task.h10
-rw-r--r--include/linux/kernel.h9
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/platform_data/hwmon-s3c.h10
-rw-r--r--include/linux/platform_data/max197.h5
-rw-r--r--include/linux/platform_data/sht15.h5
-rw-r--r--include/linux/preempt.h37
-rw-r--r--include/linux/preempt_mask.h16
-rw-r--r--include/linux/rculist.h4
-rw-r--r--include/linux/rcupdate.h153
-rw-r--r--include/linux/rcutiny.h2
-rw-r--r--include/linux/rcutree.h36
-rw-r--r--include/linux/rtmutex.h18
-rw-r--r--include/linux/rwlock_api_smp.h12
-rw-r--r--include/linux/sched.h141
-rw-r--r--include/linux/sched/deadline.h24
-rw-r--r--include/linux/sched/rt.h5
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--include/linux/seqlock.h27
-rw-r--r--include/linux/spinlock.h10
-rw-r--r--include/linux/spinlock_api_smp.h12
-rw-r--r--include/linux/spinlock_api_up.h16
-rw-r--r--include/linux/syscalls.h6
-rw-r--r--include/linux/tick.h8
-rw-r--r--include/linux/uaccess.h5
-rw-r--r--include/linux/uprobes.h52
-rw-r--r--include/linux/vtime.h4
-rw-r--r--include/linux/zorro.h121
-rw-r--r--include/net/busy_poll.h19
-rw-r--r--include/net/if_inet6.h1
-rw-r--r--include/trace/events/ras.h10
-rw-r--r--include/uapi/asm-generic/statfs.h2
-rw-r--r--include/uapi/linux/Kbuild2
-rw-r--r--include/uapi/linux/kexec.h1
-rw-r--r--include/uapi/linux/perf_event.h1
-rw-r--r--include/uapi/linux/sched.h6
-rw-r--r--include/uapi/linux/zorro.h113
-rw-r--r--include/uapi/linux/zorro_ids.h (renamed from include/linux/zorro_ids.h)0
-rw-r--r--include/xen/interface/callback.h2
-rw-r--r--include/xen/interface/io/protocols.h3
50 files changed, 737 insertions, 340 deletions
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 639d7a4d033b..6f692f8ac664 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -1,4 +1,5 @@
-/* Generic barrier definitions, based on MN10300 definitions.
+/*
+ * Generic barrier definitions, originally based on MN10300 definitions.
*
* It should be possible to use these on really simple architectures,
* but it serves more as a starting point for new ports.
@@ -16,35 +17,65 @@
#ifndef __ASSEMBLY__
-#define nop() asm volatile ("nop")
+#include <linux/compiler.h>
+
+#ifndef nop
+#define nop() asm volatile ("nop")
+#endif
/*
- * Force strict CPU ordering.
- * And yes, this is required on UP too when we're talking
- * to devices.
+ * Force strict CPU ordering. And yes, this is required on UP too when we're
+ * talking to devices.
*
- * This implementation only contains a compiler barrier.
+ * Fall back to compiler barriers if nothing better is provided.
*/
-#define mb() asm volatile ("": : :"memory")
+#ifndef mb
+#define mb() barrier()
+#endif
+
+#ifndef rmb
#define rmb() mb()
-#define wmb() asm volatile ("": : :"memory")
+#endif
+
+#ifndef wmb
+#define wmb() mb()
+#endif
+
+#ifndef read_barrier_depends
+#define read_barrier_depends() do { } while (0)
+#endif
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
+#define smp_read_barrier_depends() read_barrier_depends()
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
+#define smp_read_barrier_depends() do { } while (0)
+#endif
+
+#ifndef set_mb
+#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
#endif
-#define set_mb(var, value) do { var = value; mb(); } while (0)
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
-#define read_barrier_depends() do {} while (0)
-#define smp_read_barrier_depends() do {} while (0)
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ___p1; \
+})
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_GENERIC_BARRIER_H */
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
index 27b1bcffe408..86c12c93e3cf 100644
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -1,9 +1,35 @@
#ifndef _LINUX_BH_H
#define _LINUX_BH_H
-extern void local_bh_disable(void);
+#include <linux/preempt.h>
+#include <linux/preempt_mask.h>
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
+#else
+static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
+{
+ preempt_count_add(cnt);
+ barrier();
+}
+#endif
+
+static inline void local_bh_disable(void)
+{
+ __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
+}
+
extern void _local_bh_enable(void);
-extern void local_bh_enable(void);
-extern void local_bh_enable_ip(unsigned long ip);
+extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt);
+
+static inline void local_bh_enable_ip(unsigned long ip)
+{
+ __local_bh_enable_ip(ip, SOFTIRQ_DISABLE_OFFSET);
+}
+
+static inline void local_bh_enable(void)
+{
+ __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
+}
#endif /* _LINUX_BH_H */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 92669cd182a6..fe7a686dfd8d 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -298,6 +298,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
#endif
+/* Is this type a native word size -- useful for atomic operations */
+#ifndef __native_word
+# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
+#endif
+
/* Compile time object size, -1 for unknown */
#ifndef __compiletime_object_size
# define __compiletime_object_size(obj) -1
@@ -337,6 +342,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#define compiletime_assert(condition, msg) \
_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
+#define compiletime_assert_atomic_type(t) \
+ compiletime_assert(__native_word(t), \
+ "Need native word sized stores/loads for atomicity.")
+
/*
* Prevent the compiler from merging or refetching accesses. The compiler
* is also forbidden from reordering successive instances of ACCESS_ONCE(),
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 158158704c30..37b81bd51ec0 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -17,13 +17,13 @@ extern void __context_tracking_task_switch(struct task_struct *prev,
static inline void user_enter(void)
{
- if (static_key_false(&context_tracking_enabled))
+ if (context_tracking_is_enabled())
context_tracking_user_enter();
}
static inline void user_exit(void)
{
- if (static_key_false(&context_tracking_enabled))
+ if (context_tracking_is_enabled())
context_tracking_user_exit();
}
@@ -31,7 +31,7 @@ static inline enum ctx_state exception_enter(void)
{
enum ctx_state prev_ctx;
- if (!static_key_false(&context_tracking_enabled))
+ if (!context_tracking_is_enabled())
return 0;
prev_ctx = this_cpu_read(context_tracking.state);
@@ -42,7 +42,7 @@ static inline enum ctx_state exception_enter(void)
static inline void exception_exit(enum ctx_state prev_ctx)
{
- if (static_key_false(&context_tracking_enabled)) {
+ if (context_tracking_is_enabled()) {
if (prev_ctx == IN_USER)
context_tracking_user_enter();
}
@@ -51,7 +51,7 @@ static inline void exception_exit(enum ctx_state prev_ctx)
static inline void context_tracking_task_switch(struct task_struct *prev,
struct task_struct *next)
{
- if (static_key_false(&context_tracking_enabled))
+ if (context_tracking_is_enabled())
__context_tracking_task_switch(prev, next);
}
#else
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index 0f1979d0674f..97a81225d037 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -22,15 +22,20 @@ struct context_tracking {
extern struct static_key context_tracking_enabled;
DECLARE_PER_CPU(struct context_tracking, context_tracking);
-static inline bool context_tracking_in_user(void)
+static inline bool context_tracking_is_enabled(void)
{
- return __this_cpu_read(context_tracking.state) == IN_USER;
+ return static_key_false(&context_tracking_enabled);
}
-static inline bool context_tracking_active(void)
+static inline bool context_tracking_cpu_is_enabled(void)
{
return __this_cpu_read(context_tracking.active);
}
+
+static inline bool context_tracking_in_user(void)
+{
+ return __this_cpu_read(context_tracking.state) == IN_USER;
+}
#else
static inline bool context_tracking_in_user(void) { return false; }
static inline bool context_tracking_active(void) { return false; }
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index fe68a5a98583..7032518f8542 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -6,6 +6,8 @@
#include <linux/proc_fs.h>
#include <linux/elf.h>
+#include <asm/pgtable.h> /* for pgprot_t */
+
#define ELFCORE_ADDR_MAX (-1ULL)
#define ELFCORE_ADDR_ERR (-2ULL)
diff --git a/include/linux/edac.h b/include/linux/edac.h
index dbdffe8d4469..8e6c20af11a2 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -35,6 +35,34 @@ extern void edac_atomic_assert_error(void);
extern struct bus_type *edac_get_sysfs_subsys(void);
extern void edac_put_sysfs_subsys(void);
+enum {
+ EDAC_REPORTING_ENABLED,
+ EDAC_REPORTING_DISABLED,
+ EDAC_REPORTING_FORCE
+};
+
+extern int edac_report_status;
+#ifdef CONFIG_EDAC
+static inline int get_edac_report_status(void)
+{
+ return edac_report_status;
+}
+
+static inline void set_edac_report_status(int new)
+{
+ edac_report_status = new;
+}
+#else
+static inline int get_edac_report_status(void)
+{
+ return EDAC_REPORTING_DISABLED;
+}
+
+static inline void set_edac_report_status(int new)
+{
+}
+#endif
+
static inline void opstate_init(void)
{
switch (edac_op_state) {
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 11ce6784a196..0a819e7a60c9 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -556,6 +556,9 @@ extern struct efi {
unsigned long hcdp; /* HCDP table */
unsigned long uga; /* UGA table */
unsigned long uv_systab; /* UV system table */
+ unsigned long fw_vendor; /* fw_vendor */
+ unsigned long runtime; /* runtime table */
+ unsigned long config_table; /* config tables */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
efi_get_wakeup_time_t *get_wakeup_time;
@@ -653,6 +656,7 @@ extern int __init efi_setup_pcdp_console(char *);
#define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */
#define EFI_MEMMAP 4 /* Can we use EFI memory map? */
#define EFI_64BIT 5 /* Is the firmware 64-bit? */
+#define EFI_ARCH_1 6 /* First arch-specific bit */
#ifdef CONFIG_EFI
# ifdef CONFIG_X86
@@ -872,4 +876,17 @@ int efivars_sysfs_init(void);
#endif /* CONFIG_EFI_VARS */
+#ifdef CONFIG_EFI_RUNTIME_MAP
+int efi_runtime_map_init(struct kobject *);
+void efi_runtime_map_setup(void *, int, u32);
+#else
+static inline int efi_runtime_map_init(struct kobject *kobj)
+{
+ return 0;
+}
+
+static inline void
+efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size) {}
+#endif
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index d9cf963ac832..12d5f972f23f 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -5,6 +5,7 @@
#include <linux/lockdep.h>
#include <linux/ftrace_irq.h>
#include <linux/vtime.h>
+#include <asm/hardirq.h>
extern void synchronize_irq(unsigned int irq);
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index eff50e062be8..d9c8dbd3373f 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -445,7 +445,7 @@ static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data)
static inline struct i2c_adapter *
i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter)
{
-#if IS_ENABLED(I2C_MUX)
+#if IS_ENABLED(CONFIG_I2C_MUX)
struct device *parent = adapter->dev.parent;
if (parent != NULL && parent->type == &i2c_adapter_type)
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index b0ed422e4e4a..f0e52383a001 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -11,6 +11,7 @@
#include <linux/user_namespace.h>
#include <linux/securebits.h>
#include <linux/seqlock.h>
+#include <linux/rbtree.h>
#include <net/net_namespace.h>
#include <linux/sched/rt.h>
@@ -154,6 +155,14 @@ extern struct task_group root_task_group;
#define INIT_TASK_COMM "swapper"
+#ifdef CONFIG_RT_MUTEXES
+# define INIT_RT_MUTEXES(tsk) \
+ .pi_waiters = RB_ROOT, \
+ .pi_waiters_leftmost = NULL,
+#else
+# define INIT_RT_MUTEXES(tsk)
+#endif
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -221,6 +230,7 @@ extern struct task_group root_task_group;
INIT_TRACE_RECURSION \
INIT_TASK_RCU_PREEMPT(tsk) \
INIT_CPUSET_SEQ(tsk) \
+ INIT_RT_MUTEXES(tsk) \
INIT_VTIME(tsk) \
}
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index ecb87544cc5d..2aa3d4b000e6 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -394,6 +394,15 @@ extern int panic_on_oops;
extern int panic_on_unrecovered_nmi;
extern int panic_on_io_nmi;
extern int sysctl_panic_on_stackoverflow;
+/*
+ * Only to be used by arch init code. If the user over-wrote the default
+ * CONFIG_PANIC_TIMEOUT, honor it.
+ */
+static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
+{
+ if (panic_timeout == arch_default_timeout)
+ panic_timeout = timeout;
+}
extern const char *print_tainted(void);
enum lockdep_ok {
LOCKDEP_STILL_OK,
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2e069d1288df..e56b07f5c9b6 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -320,6 +320,7 @@ struct perf_event {
struct list_head migrate_entry;
struct hlist_node hlist_entry;
+ struct list_head active_entry;
int nr_siblings;
int group_flags;
struct perf_event *group_leader;
diff --git a/include/linux/platform_data/hwmon-s3c.h b/include/linux/platform_data/hwmon-s3c.h
index c167e4429bc7..0e3cce130fe2 100644
--- a/include/linux/platform_data/hwmon-s3c.h
+++ b/include/linux/platform_data/hwmon-s3c.h
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s3c/include/plat/hwmon.h
- *
+/*
* Copyright 2005 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
* http://armlinux.simtec.co.uk/
@@ -11,8 +10,8 @@
* published by the Free Software Foundation.
*/
-#ifndef __ASM_ARCH_ADC_HWMON_H
-#define __ASM_ARCH_ADC_HWMON_H __FILE__
+#ifndef __HWMON_S3C_H__
+#define __HWMON_S3C_H__
/**
* s3c_hwmon_chcfg - channel configuration
@@ -47,5 +46,4 @@ struct s3c_hwmon_pdata {
*/
extern void __init s3c_hwmon_set_platdata(struct s3c_hwmon_pdata *pd);
-#endif /* __ASM_ARCH_ADC_HWMON_H */
-
+#endif /* __HWMON_S3C_H__ */
diff --git a/include/linux/platform_data/max197.h b/include/linux/platform_data/max197.h
index e2a41dd7690c..8da8f94ee15c 100644
--- a/include/linux/platform_data/max197.h
+++ b/include/linux/platform_data/max197.h
@@ -11,6 +11,9 @@
* For further information, see the Documentation/hwmon/max197 file.
*/
+#ifndef _PDATA_MAX197_H
+#define _PDATA_MAX197_H
+
/**
* struct max197_platform_data - MAX197 connectivity info
* @convert: Function used to start a conversion with control byte ctrl.
@@ -19,3 +22,5 @@
struct max197_platform_data {
int (*convert)(u8 ctrl);
};
+
+#endif /* _PDATA_MAX197_H */
diff --git a/include/linux/platform_data/sht15.h b/include/linux/platform_data/sht15.h
index 33e0fd27225e..12289c1e9413 100644
--- a/include/linux/platform_data/sht15.h
+++ b/include/linux/platform_data/sht15.h
@@ -12,6 +12,9 @@
* For further information, see the Documentation/hwmon/sht15 file.
*/
+#ifndef _PDATA_SHT15_H
+#define _PDATA_SHT15_H
+
/**
* struct sht15_platform_data - sht15 connectivity info
* @gpio_data: no. of gpio to which bidirectional data line is
@@ -31,3 +34,5 @@ struct sht15_platform_data {
bool no_otp_reload;
bool low_resolution;
};
+
+#endif /* _PDATA_SHT15_H */
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index a3d9dc8c2c00..59749fc48328 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -64,7 +64,11 @@ do { \
} while (0)
#else
-#define preempt_enable() preempt_enable_no_resched()
+#define preempt_enable() \
+do { \
+ barrier(); \
+ preempt_count_dec(); \
+} while (0)
#define preempt_check_resched() do { } while (0)
#endif
@@ -93,7 +97,11 @@ do { \
__preempt_schedule_context(); \
} while (0)
#else
-#define preempt_enable_notrace() preempt_enable_no_resched_notrace()
+#define preempt_enable_notrace() \
+do { \
+ barrier(); \
+ __preempt_count_dec(); \
+} while (0)
#endif
#else /* !CONFIG_PREEMPT_COUNT */
@@ -116,6 +124,31 @@ do { \
#endif /* CONFIG_PREEMPT_COUNT */
+#ifdef MODULE
+/*
+ * Modules have no business playing preemption tricks.
+ */
+#undef sched_preempt_enable_no_resched
+#undef preempt_enable_no_resched
+#undef preempt_enable_no_resched_notrace
+#undef preempt_check_resched
+#endif
+
+#ifdef CONFIG_PREEMPT
+#define preempt_set_need_resched() \
+do { \
+ set_preempt_need_resched(); \
+} while (0)
+#define preempt_fold_need_resched() \
+do { \
+ if (tif_need_resched()) \
+ set_preempt_need_resched(); \
+} while (0)
+#else
+#define preempt_set_need_resched() do { } while (0)
+#define preempt_fold_need_resched() do { } while (0)
+#endif
+
#ifdef CONFIG_PREEMPT_NOTIFIERS
struct preempt_notifier;
diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h
index d169820203dd..dbeec4d4a3be 100644
--- a/include/linux/preempt_mask.h
+++ b/include/linux/preempt_mask.h
@@ -2,7 +2,6 @@
#define LINUX_PREEMPT_MASK_H
#include <linux/preempt.h>
-#include <asm/hardirq.h>
/*
* We put the hardirq and softirq counter into the preemption
@@ -79,6 +78,21 @@
#endif
/*
+ * The preempt_count offset needed for things like:
+ *
+ * spin_lock_bh()
+ *
+ * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
+ * softirqs, such that unlock sequences of:
+ *
+ * spin_unlock();
+ * local_bh_enable();
+ *
+ * Work as expected.
+ */
+#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_CHECK_OFFSET)
+
+/*
* Are we running in atomic context? WARNING: this macro cannot
* always detect atomic context; in particular, it cannot know about
* held spinlocks in non-preemptible kernels. Thus it should not be
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 45a0a9e81478..dbaf99084112 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -55,8 +55,8 @@ static inline void __list_add_rcu(struct list_head *new,
next->prev = new;
}
#else
-extern void __list_add_rcu(struct list_head *new,
- struct list_head *prev, struct list_head *next);
+void __list_add_rcu(struct list_head *new,
+ struct list_head *prev, struct list_head *next);
#endif
/**
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 39cbb889e20d..3e355c688618 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -50,13 +50,13 @@ extern int rcutorture_runnable; /* for sysctl */
#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
-extern void rcutorture_record_test_transition(void);
-extern void rcutorture_record_progress(unsigned long vernum);
-extern void do_trace_rcu_torture_read(const char *rcutorturename,
- struct rcu_head *rhp,
- unsigned long secs,
- unsigned long c_old,
- unsigned long c);
+void rcutorture_record_test_transition(void);
+void rcutorture_record_progress(unsigned long vernum);
+void do_trace_rcu_torture_read(const char *rcutorturename,
+ struct rcu_head *rhp,
+ unsigned long secs,
+ unsigned long c_old,
+ unsigned long c);
#else
static inline void rcutorture_record_test_transition(void)
{
@@ -65,11 +65,11 @@ static inline void rcutorture_record_progress(unsigned long vernum)
{
}
#ifdef CONFIG_RCU_TRACE
-extern void do_trace_rcu_torture_read(const char *rcutorturename,
- struct rcu_head *rhp,
- unsigned long secs,
- unsigned long c_old,
- unsigned long c);
+void do_trace_rcu_torture_read(const char *rcutorturename,
+ struct rcu_head *rhp,
+ unsigned long secs,
+ unsigned long c_old,
+ unsigned long c);
#else
#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
do { } while (0)
@@ -118,8 +118,8 @@ extern void do_trace_rcu_torture_read(const char *rcutorturename,
* if CPU A and CPU B are the same CPU (but again only if the system has
* more than one CPU).
*/
-extern void call_rcu(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
+void call_rcu(struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
#else /* #ifdef CONFIG_PREEMPT_RCU */
@@ -149,8 +149,8 @@ extern void call_rcu(struct rcu_head *head,
* See the description of call_rcu() for more detailed information on
* memory ordering guarantees.
*/
-extern void call_rcu_bh(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
+void call_rcu_bh(struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
/**
* call_rcu_sched() - Queue an RCU for invocation after sched grace period.
@@ -171,16 +171,16 @@ extern void call_rcu_bh(struct rcu_head *head,
* See the description of call_rcu() for more detailed information on
* memory ordering guarantees.
*/
-extern void call_rcu_sched(struct rcu_head *head,
- void (*func)(struct rcu_head *rcu));
+void call_rcu_sched(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu));
-extern void synchronize_sched(void);
+void synchronize_sched(void);
#ifdef CONFIG_PREEMPT_RCU
-extern void __rcu_read_lock(void);
-extern void __rcu_read_unlock(void);
-extern void rcu_read_unlock_special(struct task_struct *t);
+void __rcu_read_lock(void);
+void __rcu_read_unlock(void);
+void rcu_read_unlock_special(struct task_struct *t);
void synchronize_rcu(void);
/*
@@ -216,19 +216,19 @@ static inline int rcu_preempt_depth(void)
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
/* Internal to kernel */
-extern void rcu_init(void);
-extern void rcu_sched_qs(int cpu);
-extern void rcu_bh_qs(int cpu);
-extern void rcu_check_callbacks(int cpu, int user);
+void rcu_init(void);
+void rcu_sched_qs(int cpu);
+void rcu_bh_qs(int cpu);
+void rcu_check_callbacks(int cpu, int user);
struct notifier_block;
-extern void rcu_idle_enter(void);
-extern void rcu_idle_exit(void);
-extern void rcu_irq_enter(void);
-extern void rcu_irq_exit(void);
+void rcu_idle_enter(void);
+void rcu_idle_exit(void);
+void rcu_irq_enter(void);
+void rcu_irq_exit(void);
#ifdef CONFIG_RCU_USER_QS
-extern void rcu_user_enter(void);
-extern void rcu_user_exit(void);
+void rcu_user_enter(void);
+void rcu_user_exit(void);
#else
static inline void rcu_user_enter(void) { }
static inline void rcu_user_exit(void) { }
@@ -262,7 +262,7 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
} while (0)
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
-extern bool __rcu_is_watching(void);
+bool __rcu_is_watching(void);
#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
/*
@@ -289,8 +289,8 @@ void wait_rcu_gp(call_rcu_func_t crf);
* initialization.
*/
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
-extern void init_rcu_head_on_stack(struct rcu_head *head);
-extern void destroy_rcu_head_on_stack(struct rcu_head *head);
+void init_rcu_head_on_stack(struct rcu_head *head);
+void destroy_rcu_head_on_stack(struct rcu_head *head);
#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
static inline void init_rcu_head_on_stack(struct rcu_head *head)
{
@@ -325,6 +325,7 @@ static inline void rcu_lock_release(struct lockdep_map *map)
extern struct lockdep_map rcu_lock_map;
extern struct lockdep_map rcu_bh_lock_map;
extern struct lockdep_map rcu_sched_lock_map;
+extern struct lockdep_map rcu_callback_map;
extern int debug_lockdep_rcu_enabled(void);
/**
@@ -362,7 +363,7 @@ static inline int rcu_read_lock_held(void)
* rcu_read_lock_bh_held() is defined out of line to avoid #include-file
* hell.
*/
-extern int rcu_read_lock_bh_held(void);
+int rcu_read_lock_bh_held(void);
/**
* rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
@@ -448,7 +449,7 @@ static inline int rcu_read_lock_sched_held(void)
#ifdef CONFIG_PROVE_RCU
-extern int rcu_my_thread_group_empty(void);
+int rcu_my_thread_group_empty(void);
/**
* rcu_lockdep_assert - emit lockdep splat if specified condition not met
@@ -548,10 +549,48 @@ static inline void rcu_preempt_sleep_check(void)
smp_read_barrier_depends(); \
(_________p1); \
})
-#define __rcu_assign_pointer(p, v, space) \
+
+/**
+ * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
+ * @v: The value to statically initialize with.
+ */
+#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
+
+/**
+ * rcu_assign_pointer() - assign to RCU-protected pointer
+ * @p: pointer to assign to
+ * @v: value to assign (publish)
+ *
+ * Assigns the specified value to the specified RCU-protected
+ * pointer, ensuring that any concurrent RCU readers will see
+ * any prior initialization.
+ *
+ * Inserts memory barriers on architectures that require them
+ * (which is most of them), and also prevents the compiler from
+ * reordering the code that initializes the structure after the pointer
+ * assignment. More importantly, this call documents which pointers
+ * will be dereferenced by RCU read-side code.
+ *
+ * In some special cases, you may use RCU_INIT_POINTER() instead
+ * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
+ * to the fact that it does not constrain either the CPU or the compiler.
+ * That said, using RCU_INIT_POINTER() when you should have used
+ * rcu_assign_pointer() is a very bad thing that results in
+ * impossible-to-diagnose memory corruption. So please be careful.
+ * See the RCU_INIT_POINTER() comment header for details.
+ *
+ * Note that rcu_assign_pointer() evaluates each of its arguments only
+ * once, appearances notwithstanding. One of the "extra" evaluations
+ * is in typeof() and the other visible only to sparse (__CHECKER__),
+ * neither of which actually execute the argument. As with most cpp
+ * macros, this execute-arguments-only-once property is important, so
+ * please be careful when making changes to rcu_assign_pointer() and the
+ * other macros that it invokes.
+ */
+#define rcu_assign_pointer(p, v) \
do { \
smp_wmb(); \
- (p) = (typeof(*v) __force space *)(v); \
+ ACCESS_ONCE(p) = RCU_INITIALIZER(v); \
} while (0)
@@ -890,32 +929,6 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
}
/**
- * rcu_assign_pointer() - assign to RCU-protected pointer
- * @p: pointer to assign to
- * @v: value to assign (publish)
- *
- * Assigns the specified value to the specified RCU-protected
- * pointer, ensuring that any concurrent RCU readers will see
- * any prior initialization.
- *
- * Inserts memory barriers on architectures that require them
- * (which is most of them), and also prevents the compiler from
- * reordering the code that initializes the structure after the pointer
- * assignment. More importantly, this call documents which pointers
- * will be dereferenced by RCU read-side code.
- *
- * In some special cases, you may use RCU_INIT_POINTER() instead
- * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
- * to the fact that it does not constrain either the CPU or the compiler.
- * That said, using RCU_INIT_POINTER() when you should have used
- * rcu_assign_pointer() is a very bad thing that results in
- * impossible-to-diagnose memory corruption. So please be careful.
- * See the RCU_INIT_POINTER() comment header for details.
- */
-#define rcu_assign_pointer(p, v) \
- __rcu_assign_pointer((p), (v), __rcu)
-
-/**
* RCU_INIT_POINTER() - initialize an RCU protected pointer
*
* Initialize an RCU-protected pointer in special cases where readers
@@ -949,7 +962,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
*/
#define RCU_INIT_POINTER(p, v) \
do { \
- p = (typeof(*v) __force __rcu *)(v); \
+ p = RCU_INITIALIZER(v); \
} while (0)
/**
@@ -958,7 +971,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
* GCC-style initialization for an RCU-protected pointer in a structure field.
*/
#define RCU_POINTER_INITIALIZER(p, v) \
- .p = (typeof(*v) __force __rcu *)(v)
+ .p = RCU_INITIALIZER(v)
/*
* Does the specified offset indicate that the corresponding rcu_head
@@ -1005,7 +1018,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
#ifdef CONFIG_RCU_NOCB_CPU
-extern bool rcu_is_nocb_cpu(int cpu);
+bool rcu_is_nocb_cpu(int cpu);
#else
static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
@@ -1013,8 +1026,8 @@ static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
/* Only for use by adaptive-ticks code. */
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
-extern bool rcu_sys_is_idle(void);
-extern void rcu_sysidle_force_exit(void);
+bool rcu_sys_is_idle(void);
+void rcu_sysidle_force_exit(void);
#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
static inline bool rcu_sys_is_idle(void)
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 09ebcbe9fd78..6f01771b571c 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -125,7 +125,7 @@ static inline void exit_rcu(void)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern int rcu_scheduler_active __read_mostly;
-extern void rcu_scheduler_starting(void);
+void rcu_scheduler_starting(void);
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
static inline void rcu_scheduler_starting(void)
{
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 4b9c81548742..72137ee8c603 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -30,9 +30,9 @@
#ifndef __LINUX_RCUTREE_H
#define __LINUX_RCUTREE_H
-extern void rcu_note_context_switch(int cpu);
-extern int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies);
-extern void rcu_cpu_stall_reset(void);
+void rcu_note_context_switch(int cpu);
+int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies);
+void rcu_cpu_stall_reset(void);
/*
* Note a virtualization-based context switch. This is simply a
@@ -44,9 +44,9 @@ static inline void rcu_virt_note_context_switch(int cpu)
rcu_note_context_switch(cpu);
}
-extern void synchronize_rcu_bh(void);
-extern void synchronize_sched_expedited(void);
-extern void synchronize_rcu_expedited(void);
+void synchronize_rcu_bh(void);
+void synchronize_sched_expedited(void);
+void synchronize_rcu_expedited(void);
void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
@@ -71,25 +71,25 @@ static inline void synchronize_rcu_bh_expedited(void)
synchronize_sched_expedited();
}
-extern void rcu_barrier(void);
-extern void rcu_barrier_bh(void);
-extern void rcu_barrier_sched(void);
+void rcu_barrier(void);
+void rcu_barrier_bh(void);
+void rcu_barrier_sched(void);
extern unsigned long rcutorture_testseq;
extern unsigned long rcutorture_vernum;
-extern long rcu_batches_completed(void);
-extern long rcu_batches_completed_bh(void);
-extern long rcu_batches_completed_sched(void);
+long rcu_batches_completed(void);
+long rcu_batches_completed_bh(void);
+long rcu_batches_completed_sched(void);
-extern void rcu_force_quiescent_state(void);
-extern void rcu_bh_force_quiescent_state(void);
-extern void rcu_sched_force_quiescent_state(void);
+void rcu_force_quiescent_state(void);
+void rcu_bh_force_quiescent_state(void);
+void rcu_sched_force_quiescent_state(void);
-extern void exit_rcu(void);
+void exit_rcu(void);
-extern void rcu_scheduler_starting(void);
+void rcu_scheduler_starting(void);
extern int rcu_scheduler_active __read_mostly;
-extern bool rcu_is_watching(void);
+bool rcu_is_watching(void);
#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index de17134244f3..3aed8d737e1a 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -13,7 +13,7 @@
#define __LINUX_RT_MUTEX_H
#include <linux/linkage.h>
-#include <linux/plist.h>
+#include <linux/rbtree.h>
#include <linux/spinlock_types.h>
extern int max_lock_depth; /* for sysctl */
@@ -22,12 +22,14 @@ extern int max_lock_depth; /* for sysctl */
* The rt_mutex structure
*
* @wait_lock: spinlock to protect the structure
- * @wait_list: pilist head to enqueue waiters in priority order
+ * @waiters: rbtree root to enqueue waiters in priority order
+ * @waiters_leftmost: top waiter
* @owner: the mutex owner
*/
struct rt_mutex {
raw_spinlock_t wait_lock;
- struct plist_head wait_list;
+ struct rb_root waiters;
+ struct rb_node *waiters_leftmost;
struct task_struct *owner;
#ifdef CONFIG_DEBUG_RT_MUTEXES
int save_state;
@@ -66,7 +68,7 @@ struct hrtimer_sleeper;
#define __RT_MUTEX_INITIALIZER(mutexname) \
{ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
- , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list) \
+ , .waiters = RB_ROOT \
, .owner = NULL \
__DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
@@ -98,12 +100,4 @@ extern int rt_mutex_trylock(struct rt_mutex *lock);
extern void rt_mutex_unlock(struct rt_mutex *lock);
-#ifdef CONFIG_RT_MUTEXES
-# define INIT_RT_MUTEXES(tsk) \
- .pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters), \
- INIT_RT_MUTEX_DEBUG(tsk)
-#else
-# define INIT_RT_MUTEXES(tsk)
-#endif
-
#endif
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index 9c9f0495d37c..5b9b84b20407 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -172,8 +172,7 @@ static inline void __raw_read_lock_irq(rwlock_t *lock)
static inline void __raw_read_lock_bh(rwlock_t *lock)
{
- local_bh_disable();
- preempt_disable();
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
}
@@ -200,8 +199,7 @@ static inline void __raw_write_lock_irq(rwlock_t *lock)
static inline void __raw_write_lock_bh(rwlock_t *lock)
{
- local_bh_disable();
- preempt_disable();
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
}
@@ -250,8 +248,7 @@ static inline void __raw_read_unlock_bh(rwlock_t *lock)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
do_raw_read_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+ __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
}
static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
@@ -275,8 +272,7 @@ static inline void __raw_write_unlock_bh(rwlock_t *lock)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
do_raw_write_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+ __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
}
#endif /* __LINUX_RWLOCK_API_SMP_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 53f97eb8dbc7..ffccdad050b5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -16,6 +16,7 @@ struct sched_param {
#include <linux/types.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
+#include <linux/plist.h>
#include <linux/rbtree.h>
#include <linux/thread_info.h>
#include <linux/cpumask.h>
@@ -56,6 +57,70 @@ struct sched_param {
#include <asm/processor.h>
+#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */
+
+/*
+ * Extended scheduling parameters data structure.
+ *
+ * This is needed because the original struct sched_param can not be
+ * altered without introducing ABI issues with legacy applications
+ * (e.g., in sched_getparam()).
+ *
+ * However, the possibility of specifying more than just a priority for
+ * the tasks may be useful for a wide variety of application fields, e.g.,
+ * multimedia, streaming, automation and control, and many others.
+ *
+ * This variant (sched_attr) is meant at describing a so-called
+ * sporadic time-constrained task. In such model a task is specified by:
+ * - the activation period or minimum instance inter-arrival time;
+ * - the maximum (or average, depending on the actual scheduling
+ * discipline) computation time of all instances, a.k.a. runtime;
+ * - the deadline (relative to the actual activation time) of each
+ * instance.
+ * Very briefly, a periodic (sporadic) task asks for the execution of
+ * some specific computation --which is typically called an instance--
+ * (at most) every period. Moreover, each instance typically lasts no more
+ * than the runtime and must be completed by time instant t equal to
+ * the instance activation time + the deadline.
+ *
+ * This is reflected by the actual fields of the sched_attr structure:
+ *
+ * @size size of the structure, for fwd/bwd compat.
+ *
+ * @sched_policy task's scheduling policy
+ * @sched_flags for customizing the scheduler behaviour
+ * @sched_nice task's nice value (SCHED_NORMAL/BATCH)
+ * @sched_priority task's static priority (SCHED_FIFO/RR)
+ * @sched_deadline representative of the task's deadline
+ * @sched_runtime representative of the task's runtime
+ * @sched_period representative of the task's period
+ *
+ * Given this task model, there are a multiplicity of scheduling algorithms
+ * and policies, that can be used to ensure all the tasks will make their
+ * timing constraints.
+ *
+ * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the
+ * only user of this new interface. More information about the algorithm
+ * available in the scheduling class file or in Documentation/.
+ */
+struct sched_attr {
+ u32 size;
+
+ u32 sched_policy;
+ u64 sched_flags;
+
+ /* SCHED_NORMAL, SCHED_BATCH */
+ s32 sched_nice;
+
+ /* SCHED_FIFO, SCHED_RR */
+ u32 sched_priority;
+
+ /* SCHED_DEADLINE */
+ u64 sched_runtime;
+ u64 sched_deadline;
+ u64 sched_period;
+};
+
struct exec_domain;
struct futex_pi_state;
struct robust_list_head;
@@ -168,7 +233,6 @@ extern char ___assert_task_state[1 - 2*!!(
#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
-#define task_is_dead(task) ((task)->exit_state != 0)
#define task_is_stopped_or_traced(task) \
((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
#define task_contributes_to_load(task) \
@@ -1029,6 +1093,51 @@ struct sched_rt_entity {
#endif
};
+struct sched_dl_entity {
+ struct rb_node rb_node;
+
+ /*
+ * Original scheduling parameters. Copied here from sched_attr
+ * during sched_setscheduler2(), they will remain the same until
+ * the next sched_setscheduler2().
+ */
+ u64 dl_runtime; /* maximum runtime for each instance */
+ u64 dl_deadline; /* relative deadline of each instance */
+ u64 dl_period; /* separation of two instances (period) */
+ u64 dl_bw; /* dl_runtime / dl_deadline */
+
+ /*
+ * Actual scheduling parameters. Initialized with the values above,
+ * they are continously updated during task execution. Note that
+ * the remaining runtime could be < 0 in case we are in overrun.
+ */
+ s64 runtime; /* remaining runtime for this instance */
+ u64 deadline; /* absolute deadline for this instance */
+ unsigned int flags; /* specifying the scheduler behaviour */
+
+ /*
+ * Some bool flags:
+ *
+ * @dl_throttled tells if we exhausted the runtime. If so, the
+ * task has to wait for a replenishment to be performed at the
+ * next firing of dl_timer.
+ *
+ * @dl_new tells if a new instance arrived. If so we must
+ * start executing it with full runtime and reset its absolute
+ * deadline;
+ *
+ * @dl_boosted tells if we are boosted due to DI. If so we are
+ * outside bandwidth enforcement mechanism (but only until we
+ * exit the critical section).
+ */
+ int dl_throttled, dl_new, dl_boosted;
+
+ /*
+ * Bandwidth enforcement timer. Each -deadline task has its
+ * own bandwidth to be enforced, thus we need one timer per task.
+ */
+ struct hrtimer dl_timer;
+};
struct rcu_node;
@@ -1065,6 +1174,7 @@ struct task_struct {
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif
+ struct sched_dl_entity dl;
#ifdef CONFIG_PREEMPT_NOTIFIERS
/* list of struct preempt_notifier: */
@@ -1098,6 +1208,7 @@ struct task_struct {
struct list_head tasks;
#ifdef CONFIG_SMP
struct plist_node pushable_tasks;
+ struct rb_node pushable_dl_tasks;
#endif
struct mm_struct *mm, *active_mm;
@@ -1249,9 +1360,12 @@ struct task_struct {
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task */
- struct plist_head pi_waiters;
+ struct rb_root pi_waiters;
+ struct rb_node *pi_waiters_leftmost;
/* Deadlock detection and priority inheritance handling */
struct rt_mutex_waiter *pi_blocked_on;
+ /* Top pi_waiters task */
+ struct task_struct *pi_top_task;
#endif
#ifdef CONFIG_DEBUG_MUTEXES
@@ -1880,7 +1994,9 @@ static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
* but then during bootup it turns out that sched_clock()
* is reliable after all:
*/
-extern int sched_clock_stable;
+extern int sched_clock_stable(void);
+extern void set_sched_clock_stable(void);
+extern void clear_sched_clock_stable(void);
extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void);
@@ -1959,6 +2075,8 @@ extern int sched_setscheduler(struct task_struct *, int,
const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int,
const struct sched_param *);
+extern int sched_setattr(struct task_struct *,
+ const struct sched_attr *);
extern struct task_struct *idle_task(int cpu);
/**
* is_idle_task - is the specified task an idle task?
@@ -2038,7 +2156,7 @@ extern void wake_up_new_task(struct task_struct *tsk);
#else
static inline void kick_process(struct task_struct *tsk) { }
#endif
-extern void sched_fork(unsigned long clone_flags, struct task_struct *p);
+extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
extern void sched_dead(struct task_struct *p);
extern void proc_caches_init(void);
@@ -2627,6 +2745,21 @@ static inline bool __must_check current_clr_polling_and_test(void)
}
#endif
+static inline void current_clr_polling(void)
+{
+ __current_clr_polling();
+
+ /*
+ * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
+ * Once the bit is cleared, we'll get IPIs with every new
+ * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
+ * fold.
+ */
+ smp_mb(); /* paired with resched_task() */
+
+ preempt_fold_need_resched();
+}
+
static __always_inline bool need_resched(void)
{
return unlikely(tif_need_resched());
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
new file mode 100644
index 000000000000..9d303b8847df
--- /dev/null
+++ b/include/linux/sched/deadline.h
@@ -0,0 +1,24 @@
+#ifndef _SCHED_DEADLINE_H
+#define _SCHED_DEADLINE_H
+
+/*
+ * SCHED_DEADLINE tasks has negative priorities, reflecting
+ * the fact that any of them has higher prio than RT and
+ * NORMAL/BATCH tasks.
+ */
+
+#define MAX_DL_PRIO 0
+
+static inline int dl_prio(int prio)
+{
+ if (unlikely(prio < MAX_DL_PRIO))
+ return 1;
+ return 0;
+}
+
+static inline int dl_task(struct task_struct *p)
+{
+ return dl_prio(p->prio);
+}
+
+#endif /* _SCHED_DEADLINE_H */
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index 440434df3627..34e4ebea8fce 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -35,6 +35,7 @@ static inline int rt_task(struct task_struct *p)
#ifdef CONFIG_RT_MUTEXES
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
+extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task);
extern void rt_mutex_adjust_pi(struct task_struct *p);
static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
{
@@ -45,6 +46,10 @@ static inline int rt_mutex_getprio(struct task_struct *p)
{
return p->normal_prio;
}
+static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
+{
+ return NULL;
+}
# define rt_mutex_adjust_pi(p) do { } while (0)
static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
{
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 41467f8ff8ec..31e0193cb0c5 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -48,7 +48,6 @@ extern unsigned int sysctl_numa_balancing_scan_delay;
extern unsigned int sysctl_numa_balancing_scan_period_min;
extern unsigned int sysctl_numa_balancing_scan_period_max;
extern unsigned int sysctl_numa_balancing_scan_size;
-extern unsigned int sysctl_numa_balancing_settle_count;
#ifdef CONFIG_SCHED_DEBUG
extern unsigned int sysctl_sched_migration_cost;
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index cf87a24c0f92..535f158977b9 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -117,15 +117,15 @@ repeat:
}
/**
- * read_seqcount_begin_no_lockdep - start seq-read critical section w/o lockdep
+ * raw_read_seqcount_begin - start seq-read critical section w/o lockdep
* @s: pointer to seqcount_t
* Returns: count to be passed to read_seqcount_retry
*
- * read_seqcount_begin_no_lockdep opens a read critical section of the given
+ * raw_read_seqcount_begin opens a read critical section of the given
* seqcount, but without any lockdep checking. Validity of the critical
* section is tested by checking read_seqcount_retry function.
*/
-static inline unsigned read_seqcount_begin_no_lockdep(const seqcount_t *s)
+static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
{
unsigned ret = __read_seqcount_begin(s);
smp_rmb();
@@ -144,7 +144,7 @@ static inline unsigned read_seqcount_begin_no_lockdep(const seqcount_t *s)
static inline unsigned read_seqcount_begin(const seqcount_t *s)
{
seqcount_lockdep_reader_access(s);
- return read_seqcount_begin_no_lockdep(s);
+ return raw_read_seqcount_begin(s);
}
/**
@@ -206,14 +206,26 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
}
+
+static inline void raw_write_seqcount_begin(seqcount_t *s)
+{
+ s->sequence++;
+ smp_wmb();
+}
+
+static inline void raw_write_seqcount_end(seqcount_t *s)
+{
+ smp_wmb();
+ s->sequence++;
+}
+
/*
* Sequence counter only version assumes that callers are using their
* own mutexing.
*/
static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
{
- s->sequence++;
- smp_wmb();
+ raw_write_seqcount_begin(s);
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
}
@@ -225,8 +237,7 @@ static inline void write_seqcount_begin(seqcount_t *s)
static inline void write_seqcount_end(seqcount_t *s)
{
seqcount_release(&s->dep_map, 1, _RET_IP_);
- smp_wmb();
- s->sequence++;
+ raw_write_seqcount_end(s);
}
/**
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 75f34949d9ab..3f2867ff0ced 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -130,6 +130,16 @@ do { \
#define smp_mb__before_spinlock() smp_wmb()
#endif
+/*
+ * Place this after a lock-acquisition primitive to guarantee that
+ * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
+ * if the UNLOCK and LOCK are executed by the same CPU or if the
+ * UNLOCK and LOCK operate on the same lock variable.
+ */
+#ifndef smp_mb__after_unlock_lock
+#define smp_mb__after_unlock_lock() do { } while (0)
+#endif
+
/**
* raw_spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index bdb9993f0fda..42dfab89e740 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -131,8 +131,7 @@ static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
{
- local_bh_disable();
- preempt_disable();
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
@@ -174,20 +173,17 @@ static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
do_raw_spin_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+ __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
}
static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
{
- local_bh_disable();
- preempt_disable();
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
if (do_raw_spin_trylock(lock)) {
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
return 1;
}
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+ __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
return 0;
}
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index af1f47229e70..d0d188861ad6 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -24,11 +24,14 @@
* flags straight, to suppress compiler warnings of unused lock
* variables, and to add the proper checker annotations:
*/
+#define ___LOCK(lock) \
+ do { __acquire(lock); (void)(lock); } while (0)
+
#define __LOCK(lock) \
- do { preempt_disable(); __acquire(lock); (void)(lock); } while (0)
+ do { preempt_disable(); ___LOCK(lock); } while (0)
#define __LOCK_BH(lock) \
- do { local_bh_disable(); __LOCK(lock); } while (0)
+ do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0)
#define __LOCK_IRQ(lock) \
do { local_irq_disable(); __LOCK(lock); } while (0)
@@ -36,12 +39,15 @@
#define __LOCK_IRQSAVE(lock, flags) \
do { local_irq_save(flags); __LOCK(lock); } while (0)
+#define ___UNLOCK(lock) \
+ do { __release(lock); (void)(lock); } while (0)
+
#define __UNLOCK(lock) \
- do { preempt_enable(); __release(lock); (void)(lock); } while (0)
+ do { preempt_enable(); ___UNLOCK(lock); } while (0)
#define __UNLOCK_BH(lock) \
- do { preempt_enable_no_resched(); local_bh_enable(); \
- __release(lock); (void)(lock); } while (0)
+ do { __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); \
+ ___UNLOCK(lock); } while (0)
#define __UNLOCK_IRQ(lock) \
do { local_irq_enable(); __UNLOCK(lock); } while (0)
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 94273bbe6050..40ed9e9a77e5 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -38,6 +38,7 @@ struct rlimit;
struct rlimit64;
struct rusage;
struct sched_param;
+struct sched_attr;
struct sel_arg_struct;
struct semaphore;
struct sembuf;
@@ -279,9 +280,14 @@ asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
struct sched_param __user *param);
asmlinkage long sys_sched_setparam(pid_t pid,
struct sched_param __user *param);
+asmlinkage long sys_sched_setattr(pid_t pid,
+ struct sched_attr __user *attr);
asmlinkage long sys_sched_getscheduler(pid_t pid);
asmlinkage long sys_sched_getparam(pid_t pid,
struct sched_param __user *param);
+asmlinkage long sys_sched_getattr(pid_t pid,
+ struct sched_attr __user *attr,
+ unsigned int size);
asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr);
asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 5128d33bbb39..0175d8663b6c 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -104,7 +104,7 @@ extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
extern void tick_clock_notify(void);
extern int tick_check_oneshot_change(int allow_nohz);
extern struct tick_sched *tick_get_tick_sched(int cpu);
-extern void tick_check_idle(int cpu);
+extern void tick_check_idle(void);
extern int tick_oneshot_mode_active(void);
# ifndef arch_needs_cpu
# define arch_needs_cpu(cpu) (0)
@@ -112,7 +112,7 @@ extern int tick_oneshot_mode_active(void);
# else
static inline void tick_clock_notify(void) { }
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
-static inline void tick_check_idle(int cpu) { }
+static inline void tick_check_idle(void) { }
static inline int tick_oneshot_mode_active(void) { return 0; }
# endif
@@ -121,7 +121,7 @@ static inline void tick_init(void) { }
static inline void tick_cancel_sched_timer(int cpu) { }
static inline void tick_clock_notify(void) { }
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
-static inline void tick_check_idle(int cpu) { }
+static inline void tick_check_idle(void) { }
static inline int tick_oneshot_mode_active(void) { return 0; }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
@@ -165,7 +165,7 @@ extern cpumask_var_t tick_nohz_full_mask;
static inline bool tick_nohz_full_enabled(void)
{
- if (!static_key_false(&context_tracking_enabled))
+ if (!context_tracking_is_enabled())
return false;
return tick_nohz_full_running;
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 9d8cf056e661..ecd3319dac33 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -25,13 +25,16 @@ static inline void pagefault_disable(void)
static inline void pagefault_enable(void)
{
+#ifndef CONFIG_PREEMPT
/*
* make sure to issue those last loads/stores before enabling
* the pagefault handler again.
*/
barrier();
preempt_count_dec();
- preempt_check_resched();
+#else
+ preempt_enable();
+#endif
}
#ifndef ARCH_HAS_NOCACHE_UACCESS
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 319eae70fe84..e32251e00e62 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -26,16 +26,13 @@
#include <linux/errno.h>
#include <linux/rbtree.h>
+#include <linux/types.h>
struct vm_area_struct;
struct mm_struct;
struct inode;
struct notifier_block;
-#ifdef CONFIG_ARCH_SUPPORTS_UPROBES
-# include <asm/uprobes.h>
-#endif
-
#define UPROBE_HANDLER_REMOVE 1
#define UPROBE_HANDLER_MASK 1
@@ -60,6 +57,8 @@ struct uprobe_consumer {
};
#ifdef CONFIG_UPROBES
+#include <asm/uprobes.h>
+
enum uprobe_task_state {
UTASK_RUNNING,
UTASK_SSTEP,
@@ -72,35 +71,28 @@ enum uprobe_task_state {
*/
struct uprobe_task {
enum uprobe_task_state state;
- struct arch_uprobe_task autask;
- struct return_instance *return_instances;
- unsigned int depth;
- struct uprobe *active_uprobe;
+ union {
+ struct {
+ struct arch_uprobe_task autask;
+ unsigned long vaddr;
+ };
+ struct {
+ struct callback_head dup_xol_work;
+ unsigned long dup_xol_addr;
+ };
+ };
+
+ struct uprobe *active_uprobe;
unsigned long xol_vaddr;
- unsigned long vaddr;
-};
-/*
- * On a breakpoint hit, thread contests for a slot. It frees the
- * slot after singlestep. Currently a fixed number of slots are
- * allocated.
- */
-struct xol_area {
- wait_queue_head_t wq; /* if all slots are busy */
- atomic_t slot_count; /* number of in-use slots */
- unsigned long *bitmap; /* 0 = free slot */
- struct page *page;
-
- /*
- * We keep the vma's vm_start rather than a pointer to the vma
- * itself. The probed process or a naughty kernel module could make
- * the vma go away, and we must handle that reasonably gracefully.
- */
- unsigned long vaddr; /* Page(s) of instruction slots */
+ struct return_instance *return_instances;
+ unsigned int depth;
};
+struct xol_area;
+
struct uprobes_state {
struct xol_area *xol_area;
};
@@ -109,6 +101,7 @@ extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsign
extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
extern bool __weak is_trap_insn(uprobe_opcode_t *insn);
+extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool);
@@ -120,7 +113,6 @@ extern void uprobe_end_dup_mmap(void);
extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm);
extern void uprobe_free_utask(struct task_struct *t);
extern void uprobe_copy_process(struct task_struct *t, unsigned long flags);
-extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
extern int uprobe_post_sstep_notifier(struct pt_regs *regs);
extern int uprobe_pre_sstep_notifier(struct pt_regs *regs);
extern void uprobe_notify_resume(struct pt_regs *regs);
@@ -176,10 +168,6 @@ static inline bool uprobe_deny_signal(void)
{
return false;
}
-static inline unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
-{
- return 0;
-}
static inline void uprobe_free_utask(struct task_struct *t)
{
}
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index f5b72b364bda..c5165fd256f9 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -19,8 +19,8 @@ static inline bool vtime_accounting_enabled(void) { return true; }
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
static inline bool vtime_accounting_enabled(void)
{
- if (static_key_false(&context_tracking_enabled)) {
- if (context_tracking_active())
+ if (context_tracking_is_enabled()) {
+ if (context_tracking_cpu_is_enabled())
return true;
}
diff --git a/include/linux/zorro.h b/include/linux/zorro.h
index dff42025649b..63fbba0740c2 100644
--- a/include/linux/zorro.h
+++ b/include/linux/zorro.h
@@ -11,107 +11,10 @@
#ifndef _LINUX_ZORRO_H
#define _LINUX_ZORRO_H
-#include <linux/device.h>
-
-
- /*
- * Each Zorro board has a 32-bit ID of the form
- *
- * mmmmmmmmmmmmmmmmppppppppeeeeeeee
- *
- * with
- *
- * mmmmmmmmmmmmmmmm 16-bit Manufacturer ID (assigned by CBM (sigh))
- * pppppppp 8-bit Product ID (assigned by manufacturer)
- * eeeeeeee 8-bit Extended Product ID (currently only used
- * for some GVP boards)
- */
-
-
-#define ZORRO_MANUF(id) ((id) >> 16)
-#define ZORRO_PROD(id) (((id) >> 8) & 0xff)
-#define ZORRO_EPC(id) ((id) & 0xff)
-
-#define ZORRO_ID(manuf, prod, epc) \
- ((ZORRO_MANUF_##manuf << 16) | ((prod) << 8) | (epc))
-
-typedef __u32 zorro_id;
-
-
-/* Include the ID list */
-#include <linux/zorro_ids.h>
-
- /*
- * GVP identifies most of its products through the 'extended product code'
- * (epc). The epc has to be ANDed with the GVP_PRODMASK before the
- * identification.
- */
-
-#define GVP_PRODMASK (0xf8)
-#define GVP_SCSICLKMASK (0x01)
-
-enum GVP_flags {
- GVP_IO = 0x01,
- GVP_ACCEL = 0x02,
- GVP_SCSI = 0x04,
- GVP_24BITDMA = 0x08,
- GVP_25BITDMA = 0x10,
- GVP_NOBANK = 0x20,
- GVP_14MHZ = 0x40,
-};
-
-
-struct Node {
- struct Node *ln_Succ; /* Pointer to next (successor) */
- struct Node *ln_Pred; /* Pointer to previous (predecessor) */
- __u8 ln_Type;
- __s8 ln_Pri; /* Priority, for sorting */
- __s8 *ln_Name; /* ID string, null terminated */
-} __attribute__ ((packed));
-
-struct ExpansionRom {
- /* -First 16 bytes of the expansion ROM */
- __u8 er_Type; /* Board type, size and flags */
- __u8 er_Product; /* Product number, assigned by manufacturer */
- __u8 er_Flags; /* Flags */
- __u8 er_Reserved03; /* Must be zero ($ff inverted) */
- __u16 er_Manufacturer; /* Unique ID, ASSIGNED BY COMMODORE-AMIGA! */
- __u32 er_SerialNumber; /* Available for use by manufacturer */
- __u16 er_InitDiagVec; /* Offset to optional "DiagArea" structure */
- __u8 er_Reserved0c;
- __u8 er_Reserved0d;
- __u8 er_Reserved0e;
- __u8 er_Reserved0f;
-} __attribute__ ((packed));
-
-/* er_Type board type bits */
-#define ERT_TYPEMASK 0xc0
-#define ERT_ZORROII 0xc0
-#define ERT_ZORROIII 0x80
-
-/* other bits defined in er_Type */
-#define ERTB_MEMLIST 5 /* Link RAM into free memory list */
-#define ERTF_MEMLIST (1<<5)
-
-struct ConfigDev {
- struct Node cd_Node;
- __u8 cd_Flags; /* (read/write) */
- __u8 cd_Pad; /* reserved */
- struct ExpansionRom cd_Rom; /* copy of board's expansion ROM */
- void *cd_BoardAddr; /* where in memory the board was placed */
- __u32 cd_BoardSize; /* size of board in bytes */
- __u16 cd_SlotAddr; /* which slot number (PRIVATE) */
- __u16 cd_SlotSize; /* number of slots (PRIVATE) */
- void *cd_Driver; /* pointer to node of driver */
- struct ConfigDev *cd_NextCD; /* linked list of drivers to config */
- __u32 cd_Unused[4]; /* for whatever the driver wants */
-} __attribute__ ((packed));
-
-#define ZORRO_NUM_AUTO 16
-
-#ifdef __KERNEL__
+#include <uapi/linux/zorro.h>
+#include <linux/device.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/mod_devicetable.h>
@@ -175,7 +78,23 @@ static inline struct zorro_driver *zorro_dev_driver(const struct zorro_dev *z)
extern unsigned int zorro_num_autocon; /* # of autoconfig devices found */
-extern struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO];
+extern struct zorro_dev *zorro_autocon;
+
+
+ /*
+ * Minimal information about a Zorro device, passed from bootinfo
+ * Only available temporarily, i.e. until initmem has been freed!
+ */
+
+struct zorro_dev_init {
+ struct ExpansionRom rom;
+ u16 slotaddr;
+ u16 slotsize;
+ u32 boardaddr;
+ u32 boardsize;
+};
+
+extern struct zorro_dev_init zorro_autocon_init[ZORRO_NUM_AUTO] __initdata;
/*
@@ -229,6 +148,4 @@ extern DECLARE_BITMAP(zorro_unused_z2ram, 128);
#define Z2RAM_CHUNKSHIFT (16)
-#endif /* __KERNEL__ */
-
#endif /* _LINUX_ZORRO_H */
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 829627d7b846..1d67fb6b23a0 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -42,27 +42,10 @@ static inline bool net_busy_loop_on(void)
return sysctl_net_busy_poll;
}
-/* a wrapper to make debug_smp_processor_id() happy
- * we can use sched_clock() because we don't care much about precision
- * we only care that the average is bounded
- */
-#ifdef CONFIG_DEBUG_PREEMPT
-static inline u64 busy_loop_us_clock(void)
-{
- u64 rc;
-
- preempt_disable_notrace();
- rc = sched_clock();
- preempt_enable_no_resched_notrace();
-
- return rc >> 10;
-}
-#else /* CONFIG_DEBUG_PREEMPT */
static inline u64 busy_loop_us_clock(void)
{
- return sched_clock() >> 10;
+ return local_clock() >> 10;
}
-#endif /* CONFIG_DEBUG_PREEMPT */
static inline unsigned long sk_busy_loop_end_time(struct sock *sk)
{
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 76d54270f2e2..65bb13035598 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -165,7 +165,6 @@ struct inet6_dev {
struct net_device *dev;
struct list_head addr_list;
- int valid_ll_addr_cnt;
struct ifmcaddr6 *mc_list;
struct ifmcaddr6 *mc_tomb;
diff --git a/include/trace/events/ras.h b/include/trace/events/ras.h
index 88b878383797..1c875ad1ee5f 100644
--- a/include/trace/events/ras.h
+++ b/include/trace/events/ras.h
@@ -5,7 +5,7 @@
#define _TRACE_AER_H
#include <linux/tracepoint.h>
-#include <linux/edac.h>
+#include <linux/aer.h>
/*
@@ -63,10 +63,10 @@ TRACE_EVENT(aer_event,
TP_printk("%s PCIe Bus Error: severity=%s, %s\n",
__get_str(dev_name),
- __entry->severity == HW_EVENT_ERR_CORRECTED ? "Corrected" :
- __entry->severity == HW_EVENT_ERR_FATAL ?
- "Fatal" : "Uncorrected",
- __entry->severity == HW_EVENT_ERR_CORRECTED ?
+ __entry->severity == AER_CORRECTABLE ? "Corrected" :
+ __entry->severity == AER_FATAL ?
+ "Fatal" : "Uncorrected, non-fatal",
+ __entry->severity == AER_CORRECTABLE ?
__print_flags(__entry->status, "|", aer_correctable_errors) :
__print_flags(__entry->status, "|", aer_uncorrectable_errors))
);
diff --git a/include/uapi/asm-generic/statfs.h b/include/uapi/asm-generic/statfs.h
index 0999647fca13..cb89cc730f0b 100644
--- a/include/uapi/asm-generic/statfs.h
+++ b/include/uapi/asm-generic/statfs.h
@@ -13,7 +13,7 @@
*/
#ifndef __statfs_word
#if __BITS_PER_LONG == 64
-#define __statfs_word long
+#define __statfs_word __kernel_long_t
#else
#define __statfs_word __u32
#endif
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 33d2b8fe166d..3ce25b5d75a9 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -426,3 +426,5 @@ header-y += x25.h
header-y += xattr.h
header-y += xfrm.h
header-y += hw_breakpoint.h
+header-y += zorro.h
+header-y += zorro_ids.h
diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h
index 104838f65bc1..d6629d49a243 100644
--- a/include/uapi/linux/kexec.h
+++ b/include/uapi/linux/kexec.h
@@ -18,6 +18,7 @@
*/
#define KEXEC_ARCH_DEFAULT ( 0 << 16)
#define KEXEC_ARCH_386 ( 3 << 16)
+#define KEXEC_ARCH_68K ( 4 << 16)
#define KEXEC_ARCH_X86_64 (62 << 16)
#define KEXEC_ARCH_PPC (20 << 16)
#define KEXEC_ARCH_PPC64 (21 << 16)
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 959d454f76a1..e244ed412745 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -725,6 +725,7 @@ enum perf_callchain_context {
#define PERF_FLAG_FD_NO_GROUP (1U << 0)
#define PERF_FLAG_FD_OUTPUT (1U << 1)
#define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */
+#define PERF_FLAG_FD_CLOEXEC (1U << 3) /* O_CLOEXEC */
union perf_mem_data_src {
__u64 val;
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 5a0f945927ac..34f9d7387d13 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -39,8 +39,14 @@
#define SCHED_BATCH 3
/* SCHED_ISO: reserved but not implemented yet */
#define SCHED_IDLE 5
+#define SCHED_DEADLINE 6
+
/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
#define SCHED_RESET_ON_FORK 0x40000000
+/*
+ * For the sched_{set,get}attr() calls
+ */
+#define SCHED_FLAG_RESET_ON_FORK 0x01
#endif /* _UAPI_LINUX_SCHED_H */
diff --git a/include/uapi/linux/zorro.h b/include/uapi/linux/zorro.h
new file mode 100644
index 000000000000..59d021b242ed
--- /dev/null
+++ b/include/uapi/linux/zorro.h
@@ -0,0 +1,113 @@
+/*
+ * linux/zorro.h -- Amiga AutoConfig (Zorro) Bus Definitions
+ *
+ * Copyright (C) 1995--2003 Geert Uytterhoeven
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _UAPI_LINUX_ZORRO_H
+#define _UAPI_LINUX_ZORRO_H
+
+#include <linux/types.h>
+
+
+ /*
+ * Each Zorro board has a 32-bit ID of the form
+ *
+ * mmmmmmmmmmmmmmmmppppppppeeeeeeee
+ *
+ * with
+ *
+ * mmmmmmmmmmmmmmmm 16-bit Manufacturer ID (assigned by CBM (sigh))
+ * pppppppp 8-bit Product ID (assigned by manufacturer)
+ * eeeeeeee 8-bit Extended Product ID (currently only used
+ * for some GVP boards)
+ */
+
+
+#define ZORRO_MANUF(id) ((id) >> 16)
+#define ZORRO_PROD(id) (((id) >> 8) & 0xff)
+#define ZORRO_EPC(id) ((id) & 0xff)
+
+#define ZORRO_ID(manuf, prod, epc) \
+ ((ZORRO_MANUF_##manuf << 16) | ((prod) << 8) | (epc))
+
+typedef __u32 zorro_id;
+
+
+/* Include the ID list */
+#include <linux/zorro_ids.h>
+
+
+ /*
+ * GVP identifies most of its products through the 'extended product code'
+ * (epc). The epc has to be ANDed with the GVP_PRODMASK before the
+ * identification.
+ */
+
+#define GVP_PRODMASK (0xf8)
+#define GVP_SCSICLKMASK (0x01)
+
+enum GVP_flags {
+ GVP_IO = 0x01,
+ GVP_ACCEL = 0x02,
+ GVP_SCSI = 0x04,
+ GVP_24BITDMA = 0x08,
+ GVP_25BITDMA = 0x10,
+ GVP_NOBANK = 0x20,
+ GVP_14MHZ = 0x40,
+};
+
+
+struct Node {
+ __be32 ln_Succ; /* Pointer to next (successor) */
+ __be32 ln_Pred; /* Pointer to previous (predecessor) */
+ __u8 ln_Type;
+ __s8 ln_Pri; /* Priority, for sorting */
+ __be32 ln_Name; /* ID string, null terminated */
+} __packed;
+
+struct ExpansionRom {
+ /* -First 16 bytes of the expansion ROM */
+ __u8 er_Type; /* Board type, size and flags */
+ __u8 er_Product; /* Product number, assigned by manufacturer */
+ __u8 er_Flags; /* Flags */
+ __u8 er_Reserved03; /* Must be zero ($ff inverted) */
+ __be16 er_Manufacturer; /* Unique ID, ASSIGNED BY COMMODORE-AMIGA! */
+ __be32 er_SerialNumber; /* Available for use by manufacturer */
+ __be16 er_InitDiagVec; /* Offset to optional "DiagArea" structure */
+ __u8 er_Reserved0c;
+ __u8 er_Reserved0d;
+ __u8 er_Reserved0e;
+ __u8 er_Reserved0f;
+} __packed;
+
+/* er_Type board type bits */
+#define ERT_TYPEMASK 0xc0
+#define ERT_ZORROII 0xc0
+#define ERT_ZORROIII 0x80
+
+/* other bits defined in er_Type */
+#define ERTB_MEMLIST 5 /* Link RAM into free memory list */
+#define ERTF_MEMLIST (1<<5)
+
+struct ConfigDev {
+ struct Node cd_Node;
+ __u8 cd_Flags; /* (read/write) */
+ __u8 cd_Pad; /* reserved */
+ struct ExpansionRom cd_Rom; /* copy of board's expansion ROM */
+ __be32 cd_BoardAddr; /* where in memory the board was placed */
+ __be32 cd_BoardSize; /* size of board in bytes */
+ __be16 cd_SlotAddr; /* which slot number (PRIVATE) */
+ __be16 cd_SlotSize; /* number of slots (PRIVATE) */
+ __be32 cd_Driver; /* pointer to node of driver */
+ __be32 cd_NextCD; /* linked list of drivers to config */
+ __be32 cd_Unused[4]; /* for whatever the driver wants */
+} __packed;
+
+#define ZORRO_NUM_AUTO 16
+
+#endif /* _UAPI_LINUX_ZORRO_H */
diff --git a/include/linux/zorro_ids.h b/include/uapi/linux/zorro_ids.h
index 74bc53bcfdcf..74bc53bcfdcf 100644
--- a/include/linux/zorro_ids.h
+++ b/include/uapi/linux/zorro_ids.h
diff --git a/include/xen/interface/callback.h b/include/xen/interface/callback.h
index 8c5fa0e20155..dc3193f4b581 100644
--- a/include/xen/interface/callback.h
+++ b/include/xen/interface/callback.h
@@ -36,7 +36,7 @@
* @extra_args == Operation-specific extra arguments (NULL if none).
*/
-/* ia64, x86: Callback for event delivery. */
+/* x86: Callback for event delivery. */
#define CALLBACKTYPE_event 0
/* x86: Failsafe callback when guest state cannot be restored by Xen. */
diff --git a/include/xen/interface/io/protocols.h b/include/xen/interface/io/protocols.h
index 056744b4b05e..545a14ba0bb3 100644
--- a/include/xen/interface/io/protocols.h
+++ b/include/xen/interface/io/protocols.h
@@ -3,7 +3,6 @@
#define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi"
#define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi"
-#define XEN_IO_PROTO_ABI_IA64 "ia64-abi"
#define XEN_IO_PROTO_ABI_POWERPC64 "powerpc64-abi"
#define XEN_IO_PROTO_ABI_ARM "arm-abi"
@@ -11,8 +10,6 @@
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32
#elif defined(__x86_64__)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64
-#elif defined(__ia64__)
-# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64
#elif defined(__powerpc64__)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_POWERPC64
#elif defined(__arm__) || defined(__aarch64__)