From b1a0fbfdde65dffd83c84c006f84fa12041907c5 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 4 Dec 2013 10:12:40 -0500 Subject: percpu: fix spurious sparse warnings from DEFINE_PER_CPU() When CONFIG_DEBUG_FORCE_WEAK_PER_CPU or CONFIG_ARCH_NEEDS_WEAK_PER_CPU is set, DEFINE_PER_CPU() explodes into cryptic series of definitions to still allow using "static" for percpu variables while keeping all per-cpu symbols unique in the kernel image which is required for weak symbols. This ultimately converts the actual symbol to global whether DEFINE_PER_CPU() is prefixed with static or not. Unfortunately, the macro forgot to add explicit extern declartion of the actual symbol ending up defining global symbol without preceding declaration for static definitions which naturally don't have matching DECLARE_PER_CPU(). The only ill effect is triggering of the following warnings. fs/inode.c:74:8: warning: symbol 'nr_inodes' was not declared. Should it be static? fs/inode.c:75:8: warning: symbol 'nr_unused' was not declared. Should it be static? Fix it by adding extern declaration in the DEFINE_PER_CPU() macro. Signed-off-by: Tejun Heo Reported-by: Wanlong Gao Tested-by: Wanlong Gao --- include/linux/percpu-defs.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 57e890abe1f0..a5fc7d01aad6 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -69,6 +69,7 @@ __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ + extern __PCPU_ATTRS(sec) __typeof__(type) name; \ __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ __typeof__(type) name #else -- cgit v1.2.3 From 11e6a09fba446ce799a9de51d2d99586024bbdaa Mon Sep 17 00:00:00 2001 From: Jakob Bornecrantz Date: Sun, 8 Dec 2013 23:39:04 -0800 Subject: drm/vmwgfx: Add max surface memory param Userspace uses this to workaround overcommit issues by flushing the command stream early. Signed-off-by: Jakob Bornecrantz Reviewed-by: Thomas Hellstrom --- include/uapi/drm/vmwgfx_drm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index bcb0912afe7a..f854ca4a1372 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h @@ -75,6 +75,7 @@ #define DRM_VMW_PARAM_FIFO_CAPS 4 #define DRM_VMW_PARAM_MAX_FB_SIZE 5 #define DRM_VMW_PARAM_FIFO_HW_VERSION 6 +#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 /** * struct drm_vmw_getparam_arg -- cgit v1.2.3 From ba1f14fbe70965ae0fb1655a5275a62723f65b77 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 28 Nov 2013 14:26:41 +0100 Subject: sched: Remove PREEMPT_NEED_RESCHED from generic code While hunting a preemption issue with Alexander, Ben noticed that the currently generic PREEMPT_NEED_RESCHED stuff is horribly broken for load-store architectures. We currently rely on the IPI to fold TIF_NEED_RESCHED into PREEMPT_NEED_RESCHED, but when this IPI lands while we already have a load for the preempt-count but before the store, the store will erase the PREEMPT_NEED_RESCHED change. The current preempt-count only works on load-store archs because interrupts are assumed to be completely balanced wrt their preempt_count fiddling; the previous preempt_count load will match the preempt_count state after the interrupt and therefore nothing gets lost. This patch removes the PREEMPT_NEED_RESCHED usage from generic code and pushes it into x86 arch code; the generic code goes back to relying on TIF_NEED_RESCHED. Boot tested on x86_64 and compile tested on ppc64. Reported-by: Benjamin Herrenschmidt Reported-and-Tested-by: Alexander Graf Signed-off-by: Peter Zijlstra Cc: Linus Torvalds Link: http://lkml.kernel.org/r/20131128132641.GP10022@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- include/asm-generic/preempt.h | 35 +++++++++++------------------------ include/linux/sched.h | 2 -- 2 files changed, 11 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index ddf2b420ac8f..1cd3f5d767a8 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h @@ -3,13 +3,11 @@ #include -/* - * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users - * that think a non-zero value indicates we cannot preempt. - */ +#define PREEMPT_ENABLED (0) + static __always_inline int preempt_count(void) { - return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED; + return current_thread_info()->preempt_count; } static __always_inline int *preempt_count_ptr(void) @@ -17,11 +15,6 @@ static __always_inline int *preempt_count_ptr(void) return ¤t_thread_info()->preempt_count; } -/* - * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the - * alternative is loosing a reschedule. Better schedule too often -- also this - * should be a very rare operation. - */ static __always_inline void preempt_count_set(int pc) { *preempt_count_ptr() = pc; @@ -41,28 +34,17 @@ static __always_inline void preempt_count_set(int pc) task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ } while (0) -/* - * We fold the NEED_RESCHED bit into the preempt count such that - * preempt_enable() can decrement and test for needing to reschedule with a - * single instruction. - * - * We invert the actual bit, so that when the decrement hits 0 we know we both - * need to resched (the bit is cleared) and can resched (no preempt count). - */ - static __always_inline void set_preempt_need_resched(void) { - *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED; } static __always_inline void clear_preempt_need_resched(void) { - *preempt_count_ptr() |= PREEMPT_NEED_RESCHED; } static __always_inline bool test_preempt_need_resched(void) { - return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED); + return false; } /* @@ -81,7 +63,12 @@ static __always_inline void __preempt_count_sub(int val) static __always_inline bool __preempt_count_dec_and_test(void) { - return !--*preempt_count_ptr(); + /* + * Because of load-store architectures cannot do per-cpu atomic + * operations; we cannot use PREEMPT_NEED_RESCHED because it might get + * lost. + */ + return !--*preempt_count_ptr() && tif_need_resched(); } /* @@ -89,7 +76,7 @@ static __always_inline bool __preempt_count_dec_and_test(void) */ static __always_inline bool should_resched(void) { - return unlikely(!*preempt_count_ptr()); + return unlikely(!preempt_count() && tif_need_resched()); } #ifdef CONFIG_PREEMPT diff --git a/include/linux/sched.h b/include/linux/sched.h index 768b037dfacb..96d674ba3876 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -440,8 +440,6 @@ struct task_cputime { .sum_exec_runtime = 0, \ } -#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED) - #ifdef CONFIG_PREEMPT_COUNT #define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) #else -- cgit v1.2.3 From be5e610c0fd6ef772cafb9e0bd4128134804aef3 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 18 Nov 2013 18:27:06 +0100 Subject: math64: Add mul_u64_u32_shr() Introduce mul_u64_u32_shr() as proposed by Andy a while back; it allows using 64x64->128 muls on 64bit archs and recent GCC which defines __SIZEOF_INT128__ and __int128. (This new method will be used by the scheduler.) Signed-off-by: Peter Zijlstra Cc: fweisbec@gmail.com Cc: Andy Lutomirski Cc: Linus Torvalds Link: http://lkml.kernel.org/n/tip-hxjoeuzmrcaumR0uZwjpe2pv@git.kernel.org Signed-off-by: Ingo Molnar --- include/linux/math64.h | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) (limited to 'include') diff --git a/include/linux/math64.h b/include/linux/math64.h index 69ed5f5e9f6e..c45c089bfdac 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -133,4 +133,34 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) return ret; } +#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) + +#ifndef mul_u64_u32_shr +static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) +{ + return (u64)(((unsigned __int128)a * mul) >> shift); +} +#endif /* mul_u64_u32_shr */ + +#else + +#ifndef mul_u64_u32_shr +static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) +{ + u32 ah, al; + u64 ret; + + al = a; + ah = a >> 32; + + ret = ((u64)al * mul) >> shift; + if (ah) + ret += ((u64)ah * mul) << (32 - shift); + + return ret; +} +#endif /* mul_u64_u32_shr */ + +#endif + #endif /* _LINUX_MATH64_H */ -- cgit v1.2.3 From 9dbdb155532395ba000c5d5d187658b0e17e529f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 18 Nov 2013 18:27:06 +0100 Subject: sched/fair: Rework sched_fair time accounting Christian suffers from a bad BIOS that wrecks his i5's TSC sync. This results in him occasionally seeing time going backwards - which crashes the scheduler ... Most of our time accounting can actually handle that except the most common one; the tick time update of sched_fair. There is a further problem with that code; previously we assumed that because we get a tick every TICK_NSEC our time delta could never exceed 32bits and math was simpler. However, ever since Frederic managed to get NO_HZ_FULL merged; this is no longer the case since now a task can run for a long time indeed without getting a tick. It only takes about ~4.2 seconds to overflow our u32 in nanoseconds. This means we not only need to better deal with time going backwards; but also means we need to be able to deal with large deltas. This patch reworks the entire code and uses mul_u64_u32_shr() as proposed by Andy a long while ago. We express our virtual time scale factor in a u32 multiplier and shift right and the 32bit mul_u64_u32_shr() implementation reduces to a single 32x32->64 multiply if the time delta is still short (common case). For 64bit a 64x64->128 multiply can be used if ARCH_SUPPORTS_INT128. Reported-and-Tested-by: Christian Engelmayer Signed-off-by: Peter Zijlstra Cc: fweisbec@gmail.com Cc: Paul Turner Cc: Stanislaw Gruszka Cc: Andy Lutomirski Cc: Linus Torvalds Cc: Andrew Morton Link: http://lkml.kernel.org/r/20131118172706.GI3866@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- include/linux/sched.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 96d674ba3876..53f97eb8dbc7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -930,7 +930,8 @@ struct pipe_inode_info; struct uts_namespace; struct load_weight { - unsigned long weight, inv_weight; + unsigned long weight; + u32 inv_weight; }; struct sched_avg { -- cgit v1.2.3 From a51d5229d10dd3a337b674ce8603437d2996c5c3 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Sat, 23 Nov 2013 10:35:58 -0800 Subject: target: Remove write-only stats fields and lock from struct se_node_acl Commit 04f3b31bff72 ("iscsi-target: Convert iscsi_session statistics to atomic_long_t") removed the updating of these fields in iscsi (the only fabric driver that ever touched these counters), and the core has no way to report or otherwise use the values. Remove the last remnants of these counters. Signed-off-by: Roland Dreier Signed-off-by: Nicholas Bellinger --- include/target/target_core_base.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include') diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 45412a6afa69..9f1dda659c5a 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -517,10 +517,6 @@ struct se_node_acl { u32 acl_index; #define MAX_ACL_TAG_SIZE 64 char acl_tag[MAX_ACL_TAG_SIZE]; - u64 num_cmds; - u64 read_bytes; - u64 write_bytes; - spinlock_t stats_lock; /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ atomic_t acl_pr_ref_count; struct se_dev_entry **device_list; -- cgit v1.2.3 From 380108d891acf8db5cf0d477176c7ed2b62b7928 Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Tue, 3 Dec 2013 15:40:37 +0000 Subject: xen/block: Correctly define structures in public headers on ARM32 and ARM64 On ARM (32 bits and 64 bits), the double-word is 8-bytes aligned. This will result on different structure from Xen and Linux repositories. As Linux is using __packed__ attribute, it must have a 4-bytes padding before each "id" field. This change breaks guest block support with older kernel. IMHO, it's acceptable because Xen on ARM is still on Tech Preview and the hypercall ABI is not yet freezed. Only one architecture (x86_32) doesn't have 64-bit ABI for the block interface. Don't add padding if Linux is compiled for this architecture. Signed-off-by: Julien Grall Acked-by: Roger Pau Monne Acked-by: David Vrabel Cc: Boris Ostrovsky Cc: Ian Campbell Acked-by: Stefano Stabellini [I had asked for confirmation that it did not break x86 and Ian went beyound the call of duty to confirm it. Also a internal regression bucket with 32/64 dom0 with 32/64 domU (PV and HVM) confirmed no regressions. ABI changes are a drag..] Signed-off-by: Konrad Rzeszutek Wilk --- include/xen/interface/io/blkif.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h index 65e12099ef89..ae665ac59c36 100644 --- a/include/xen/interface/io/blkif.h +++ b/include/xen/interface/io/blkif.h @@ -146,7 +146,7 @@ struct blkif_request_segment_aligned { struct blkif_request_rw { uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ -#ifdef CONFIG_X86_64 +#ifndef CONFIG_X86_32 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */ #endif uint64_t id; /* private guest value, echoed in resp */ @@ -163,7 +163,7 @@ struct blkif_request_discard { uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */ #define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */ blkif_vdev_t _pad1; /* only for read/write requests */ -#ifdef CONFIG_X86_64 +#ifndef CONFIG_X86_32 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/ #endif uint64_t id; /* private guest value, echoed in resp */ @@ -175,7 +175,7 @@ struct blkif_request_discard { struct blkif_request_other { uint8_t _pad1; blkif_vdev_t _pad2; /* only for read/write requests */ -#ifdef CONFIG_X86_64 +#ifndef CONFIG_X86_32 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ #endif uint64_t id; /* private guest value, echoed in resp */ @@ -184,7 +184,7 @@ struct blkif_request_other { struct blkif_request_indirect { uint8_t indirect_op; uint16_t nr_segments; -#ifdef CONFIG_X86_64 +#ifndef CONFIG_X86_32 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ #endif uint64_t id; @@ -192,7 +192,7 @@ struct blkif_request_indirect { blkif_vdev_t handle; uint16_t _pad2; grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; -#ifdef CONFIG_X86_64 +#ifndef CONFIG_X86_32 uint32_t _pad3; /* make it 64 byte aligned */ #else uint64_t _pad3; /* make it 64 byte aligned */ -- cgit v1.2.3 From 2a4d81547b88b4ff566d4b4f9c0e36285ed07634 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Sun, 15 Dec 2013 21:00:48 -0800 Subject: Input: define KEY_WWAN for Wireless WAN MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some devices with support for mobile networks may have buttons for enabling/disabling such connection. An example can be Linksys router 54G3G. We already have KEY_BLUETOOTH, KEY_WLAN and KEY_UWB so it makes sense to add KEY_WWAN as well. As we already have KEY_WIMAX, use it's value for KEY_WWAN and make it an alias. Signed-off-by: Rafał Miłecki Signed-off-by: Dmitry Torokhov --- include/uapi/linux/input.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h index ecc88592ecbe..bd24470d24a2 100644 --- a/include/uapi/linux/input.h +++ b/include/uapi/linux/input.h @@ -464,7 +464,8 @@ struct input_keymap_entry { #define KEY_BRIGHTNESS_ZERO 244 /* brightness off, use ambient */ #define KEY_DISPLAY_OFF 245 /* display device to off state */ -#define KEY_WIMAX 246 +#define KEY_WWAN 246 /* Wireless WAN (LTE, UMTS, GSM, etc.) */ +#define KEY_WIMAX KEY_WWAN #define KEY_RFKILL 247 /* Key that controls all radios */ #define KEY_MICMUTE 248 /* Mute / unmute the microphone */ -- cgit v1.2.3 From 309243ec14fde1149e1c66f19746e239e86caf39 Mon Sep 17 00:00:00 2001 From: Yann Droneaud Date: Wed, 11 Dec 2013 23:01:44 +0100 Subject: IB/core: const'ify inbuf in struct ib_udata Userspace input buffer is not modified by kernel, so it can be 'const'. This is also a prerequisite to remove the implicit cast from INIT_UDATA(). Link: http://marc.info/?i=cover.1386798254.git.ydroneaud@opteya.com> Signed-off-by: Yann Droneaud Signed-off-by: Roland Dreier --- include/rdma/ib_verbs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 979874c627ee..61e1935c91b1 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -978,7 +978,7 @@ struct ib_uobject { }; struct ib_udata { - void __user *inbuf; + const void __user *inbuf; void __user *outbuf; size_t inlen; size_t outlen; -- cgit v1.2.3 From f78dea064c5f7de07de4912a6e5136dbc443d614 Mon Sep 17 00:00:00 2001 From: Marc Carino Date: Mon, 16 Dec 2013 18:15:53 -0800 Subject: libata: implement ATA_HORKAGE_NO_NCQ_TRIM and apply it to Micro M500 SSDs Certain drives cannot handle queued TRIM commands properly, even though support is indicated in the IDENTIFY DEVICE buffer. This patch allows for disabling the commands for the affected drives and apply it to the Micron/Crucial M500 SSDs which exhibit incorrect protocol behavior when issued queued TRIM commands, which could lead to silent data corruption. tj: Merged two unnecessarily split patches and made minor edits including shortening horkage name. Signed-off-by: Marc Carino Signed-off-by: Tejun Heo Link: http://lkml.kernel.org/g/1387246554-7311-1-git-send-email-marc.ceeeee@gmail.com Cc: stable@vger.kernel.org # 3.12+ --- include/linux/libata.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/libata.h b/include/linux/libata.h index 0e23c26485f4..9b503376738f 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -418,6 +418,7 @@ enum { ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */ ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */ + ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ -- cgit v1.2.3 From 189b84fb54490ae24111124346a8e63f8e019385 Mon Sep 17 00:00:00 2001 From: Vince Weaver Date: Fri, 13 Dec 2013 15:52:25 -0500 Subject: perf: Document the new transaction sample type Commit fdfbbd07e91f8fe3871 ("perf: Add generic transaction flags") added support for PERF_SAMPLE_TRANSACTION but forgot to add documentation for the sample type to include/uapi/linux/perf_event.h Signed-off-by: Vince Weaver Signed-off-by: Peter Zijlstra Cc: Andi Kleen Link: http://lkml.kernel.org/r/alpine.DEB.2.02.1312131548450.10372@pianoman.cluster.toy Signed-off-by: Ingo Molnar --- include/uapi/linux/perf_event.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index e1802d6153ae..959d454f76a1 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -679,6 +679,7 @@ enum perf_event_type { * * { u64 weight; } && PERF_SAMPLE_WEIGHT * { u64 data_src; } && PERF_SAMPLE_DATA_SRC + * { u64 transaction; } && PERF_SAMPLE_TRANSACTION * }; */ PERF_RECORD_SAMPLE = 9, -- cgit v1.2.3 From 85328240c625f322af9f69c7b60e619717101d77 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 26 Nov 2013 06:33:52 +0000 Subject: net: allow netdev_all_upper_get_next_dev_rcu with rtnl lock held It is useful to be able to walk all upper devices when bringing a device online where the RTNL lock is held. In this case it is safe to walk the all_adj_list because the RTNL lock is used to protect the write side as well. This patch adds a check to see if the rtnl lock is held before throwing a warning in netdev_all_upper_get_next_dev_rcu(). Also because we now have a call site for lockdep_rtnl_is_held() outside COFIG_LOCK_PROVING an inline definition returning 1 is needed. Similar to the rcu_read_lock_is_held(). Fixes: 2a47fa45d4df ("ixgbe: enable l2 forwarding acceleration for macvlans") CC: Veaceslav Falico Reported-by: Yuanhan Liu Signed-off-by: John Fastabend Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- include/linux/rtnetlink.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 939428ad25ac..8e3e66ac0a52 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -24,6 +24,11 @@ extern int rtnl_trylock(void); extern int rtnl_is_locked(void); #ifdef CONFIG_PROVE_LOCKING extern int lockdep_rtnl_is_held(void); +#else +static inline int lockdep_rtnl_is_held(void) +{ + return 1; +} #endif /* #ifdef CONFIG_PROVE_LOCKING */ /** -- cgit v1.2.3 From 0e3da5bb8da45890b1dc413404e0f978ab71173e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20Ter=C3=A4s?= Date: Mon, 16 Dec 2013 11:02:09 +0200 Subject: ip_gre: fix msg_name parsing for recvfrom/recvmsg MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ipgre_header_parse() needs to parse the tunnel's ip header and it uses mac_header to locate the iphdr. This got broken when gre tunneling was refactored as mac_header is no longer updated to point to iphdr. Introduce skb_pop_mac_header() helper to do the mac_header assignment and use it in ipgre_rcv() to fix msg_name parsing. Bug introduced in commit c54419321455 (GRE: Refactor GRE tunneling code.) Cc: Pravin B Shelar Signed-off-by: Timo Teräs Signed-off-by: David S. Miller --- include/linux/skbuff.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 215b5ea1cb30..6aae83890520 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1638,6 +1638,11 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) skb->mac_header += offset; } +static inline void skb_pop_mac_header(struct sk_buff *skb) +{ + skb->mac_header = skb->network_header; +} + static inline void skb_probe_transport_header(struct sk_buff *skb, const int offset_hint) { -- cgit v1.2.3 From c97102ba96324da330078ad8619ba4dfe840dbe3 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 18 Dec 2013 17:08:31 -0800 Subject: kexec: migrate to reboot cpu Commit 1b3a5d02ee07 ("reboot: move arch/x86 reboot= handling to generic kernel") moved reboot= handling to generic code. In the process it also removed the code in native_machine_shutdown() which are moving reboot process to reboot_cpu/cpu0. I guess that thought must have been that all reboot paths are calling migrate_to_reboot_cpu(), so we don't need this special handling. But kexec reboot path (kernel_kexec()) is not calling migrate_to_reboot_cpu() so above change broke kexec. Now reboot can happen on non-boot cpu and when INIT is sent in second kerneo to bring up BP, it brings down the machine. So start calling migrate_to_reboot_cpu() in kexec reboot path to avoid this problem. Bisected by WANG Chao. Reported-by: Matthew Whitehead Reported-by: Dave Young Signed-off-by: Vivek Goyal Tested-by: Baoquan He Tested-by: WANG Chao Acked-by: H. Peter Anvin Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/reboot.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/reboot.h b/include/linux/reboot.h index 8e00f9f6f963..9e7db9e73cc1 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h @@ -43,6 +43,7 @@ extern int unregister_reboot_notifier(struct notifier_block *); * Architecture-specific implementations of sys_reboot commands. */ +extern void migrate_to_reboot_cpu(void); extern void machine_restart(char *cmd); extern void machine_halt(void); extern void machine_power_off(void); -- cgit v1.2.3 From de466bd628e8d663fdf3f791bc8db318ee85c714 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 18 Dec 2013 17:08:42 -0800 Subject: mm: numa: avoid unnecessary disruption of NUMA hinting during migration do_huge_pmd_numa_page() handles the case where there is parallel THP migration. However, by the time it is checked the NUMA hinting information has already been disrupted. This patch adds an earlier check with some helpers. Signed-off-by: Mel Gorman Reviewed-by: Rik van Riel Cc: Alex Thorlton Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/migrate.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include') diff --git a/include/linux/migrate.h b/include/linux/migrate.h index f5096b58b20d..b7717d74da7f 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -90,10 +90,19 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, #endif /* CONFIG_MIGRATION */ #ifdef CONFIG_NUMA_BALANCING +extern bool pmd_trans_migrating(pmd_t pmd); +extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd); extern int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node); extern bool migrate_ratelimited(int node); #else +static inline bool pmd_trans_migrating(pmd_t pmd) +{ + return false; +} +static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd) +{ +} static inline int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node) { -- cgit v1.2.3 From 20841405940e7be0617612d521e206e4b6b325db Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Wed, 18 Dec 2013 17:08:44 -0800 Subject: mm: fix TLB flush race between migration, and change_protection_range There are a few subtle races, between change_protection_range (used by mprotect and change_prot_numa) on one side, and NUMA page migration and compaction on the other side. The basic race is that there is a time window between when the PTE gets made non-present (PROT_NONE or NUMA), and the TLB is flushed. During that time, a CPU may continue writing to the page. This is fine most of the time, however compaction or the NUMA migration code may come in, and migrate the page away. When that happens, the CPU may continue writing, through the cached translation, to what is no longer the current memory location of the process. This only affects x86, which has a somewhat optimistic pte_accessible. All other architectures appear to be safe, and will either always flush, or flush whenever there is a valid mapping, even with no permissions (SPARC). The basic race looks like this: CPU A CPU B CPU C load TLB entry make entry PTE/PMD_NUMA fault on entry read/write old page start migrating page change PTE/PMD to new page read/write old page [*] flush TLB reload TLB from new entry read/write new page lose data [*] the old page may belong to a new user at this point! The obvious fix is to flush remote TLB entries, by making sure that pte_accessible aware of the fact that PROT_NONE and PROT_NUMA memory may still be accessible if there is a TLB flush pending for the mm. This should fix both NUMA migration and compaction. [mgorman@suse.de: fix build] Signed-off-by: Rik van Riel Signed-off-by: Mel Gorman Cc: Alex Thorlton Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/pgtable.h | 2 +- include/linux/mm_types.h | 44 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index f330d28e4d0e..b12079afbd5f 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -217,7 +217,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) #endif #ifndef pte_accessible -# define pte_accessible(pte) ((void)(pte),1) +# define pte_accessible(mm, pte) ((void)(pte), 1) #endif #ifndef flush_tlb_fix_spurious_fault diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index bd299418a934..e5c49c30460f 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -442,6 +442,14 @@ struct mm_struct { /* numa_scan_seq prevents two threads setting pte_numa */ int numa_scan_seq; +#endif +#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) + /* + * An operation with batched TLB flushing is going on. Anything that + * can move process memory needs to flush the TLB when moving a + * PROT_NONE or PROT_NUMA mapped page. + */ + bool tlb_flush_pending; #endif struct uprobes_state uprobes_state; }; @@ -459,4 +467,40 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm) return mm->cpu_vm_mask_var; } +#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) +/* + * Memory barriers to keep this state in sync are graciously provided by + * the page table locks, outside of which no page table modifications happen. + * The barriers below prevent the compiler from re-ordering the instructions + * around the memory barriers that are already present in the code. + */ +static inline bool mm_tlb_flush_pending(struct mm_struct *mm) +{ + barrier(); + return mm->tlb_flush_pending; +} +static inline void set_tlb_flush_pending(struct mm_struct *mm) +{ + mm->tlb_flush_pending = true; + barrier(); +} +/* Clearing is done after a TLB flush, which also provides a barrier. */ +static inline void clear_tlb_flush_pending(struct mm_struct *mm) +{ + barrier(); + mm->tlb_flush_pending = false; +} +#else +static inline bool mm_tlb_flush_pending(struct mm_struct *mm) +{ + return false; +} +static inline void set_tlb_flush_pending(struct mm_struct *mm) +{ +} +static inline void clear_tlb_flush_pending(struct mm_struct *mm) +{ +} +#endif + #endif /* _LINUX_MM_TYPES_H */ -- cgit v1.2.3 From af2c1401e6f9177483be4fad876d0073669df9df Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 18 Dec 2013 17:08:45 -0800 Subject: mm: numa: guarantee that tlb_flush_pending updates are visible before page table updates According to documentation on barriers, stores issued before a LOCK can complete after the lock implying that it's possible tlb_flush_pending can be visible after a page table update. As per revised documentation, this patch adds a smp_mb__before_spinlock to guarantee the correct ordering. Signed-off-by: Mel Gorman Acked-by: Paul E. McKenney Reviewed-by: Rik van Riel Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm_types.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index e5c49c30460f..ad0616f2fe2c 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -482,7 +482,12 @@ static inline bool mm_tlb_flush_pending(struct mm_struct *mm) static inline void set_tlb_flush_pending(struct mm_struct *mm) { mm->tlb_flush_pending = true; - barrier(); + + /* + * Guarantee that the tlb_flush_pending store does not leak into the + * critical section updating the page tables + */ + smp_mb__before_spinlock(); } /* Clearing is done after a TLB flush, which also provides a barrier. */ static inline void clear_tlb_flush_pending(struct mm_struct *mm) -- cgit v1.2.3 From 95cadace8f3959282e76ebf8b382bd0930807d2c Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 12 Dec 2013 12:24:11 -0800 Subject: target/file: Update hw_max_sectors based on current block_size This patch allows FILEIO to update hw_max_sectors based on the current max_bytes_per_io. This is required because vfs_[writev,readv]() can accept a maximum of 2048 iovecs per call, so the enforced hw_max_sectors really needs to be calculated based on block_size. This addresses a >= v3.5 bug where block_size=512 was rejecting > 1M sized I/O requests, because FD_MAX_SECTORS was hardcoded to 2048 for the block_size=4096 case. (v2: Use max_bytes_per_io instead of ->update_hw_max_sectors) Reported-by: Henrik Goldman Cc: #3.5+ Signed-off-by: Nicholas Bellinger --- include/target/target_core_base.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 9f1dda659c5a..321301c0a643 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -620,6 +620,7 @@ struct se_dev_attrib { u32 unmap_granularity; u32 unmap_granularity_alignment; u32 max_write_same_len; + u32 max_bytes_per_io; struct se_device *da_dev; struct config_group da_group; }; -- cgit v1.2.3 From ee53664bda169f519ce3c6a22d378f0b946c8178 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 20 Dec 2013 15:10:03 +0200 Subject: mm: Fix NULL pointer dereference in madvise(MADV_WILLNEED) support Sasha Levin found a NULL pointer dereference that is due to a missing page table lock, which in turn is due to the pmd entry in question being a transparent huge-table entry. The code - introduced in commit 1998cc048901 ("mm: make madvise(MADV_WILLNEED) support swap file prefetch") - correctly checks for this situation using pmd_none_or_trans_huge_or_clear_bad(), but it turns out that that function doesn't work correctly. pmd_none_or_trans_huge_or_clear_bad() expected that pmd_bad() would trigger if the transparent hugepage bit was set, but it doesn't do that if pmd_numa() is also set. Note that the NUMA bit only gets set on real NUMA machines, so people trying to reproduce this on most normal development systems would never actually trigger this. Fix it by removing the very subtle (and subtly incorrect) expectation, and instead just checking pmd_trans_huge() explicitly. Reported-by: Sasha Levin Acked-by: Andrea Arcangeli [ Additionally remove the now stale test for pmd_trans_huge() inside the pmd_bad() case - Linus ] Signed-off-by: Linus Torvalds --- include/asm-generic/pgtable.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index b12079afbd5f..db0923458940 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -599,11 +599,10 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) #ifdef CONFIG_TRANSPARENT_HUGEPAGE barrier(); #endif - if (pmd_none(pmdval)) + if (pmd_none(pmdval) || pmd_trans_huge(pmdval)) return 1; if (unlikely(pmd_bad(pmdval))) { - if (!pmd_trans_huge(pmdval)) - pmd_clear_bad(pmd); + pmd_clear_bad(pmd); return 1; } return 0; -- cgit v1.2.3 From 597d795a2a786d22dd872332428e2b9439ede639 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 20 Dec 2013 13:35:58 +0200 Subject: mm: do not allocate page->ptl dynamically, if spinlock_t fits to long In struct page we have enough space to fit long-size page->ptl there, but we use dynamically-allocated page->ptl if size(spinlock_t) is larger than sizeof(int). It hurts 64-bit architectures with CONFIG_GENERIC_LOCKBREAK, where sizeof(spinlock_t) == 8, but it easily fits into struct page. Signed-off-by: Kirill A. Shutemov Acked-by: Hugh Dickins Signed-off-by: Linus Torvalds --- include/linux/lockref.h | 2 +- include/linux/mm.h | 6 +++--- include/linux/mm_types.h | 3 ++- 3 files changed, 6 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/lockref.h b/include/linux/lockref.h index c8929c3832db..4bfde0e99ed5 100644 --- a/include/linux/lockref.h +++ b/include/linux/lockref.h @@ -19,7 +19,7 @@ #define USE_CMPXCHG_LOCKREF \ (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \ - IS_ENABLED(CONFIG_SMP) && !BLOATED_SPINLOCKS) + IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4) struct lockref { union { diff --git a/include/linux/mm.h b/include/linux/mm.h index 1cedd000cf29..35527173cf50 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1317,7 +1317,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ #if USE_SPLIT_PTE_PTLOCKS -#if BLOATED_SPINLOCKS +#if ALLOC_SPLIT_PTLOCKS extern bool ptlock_alloc(struct page *page); extern void ptlock_free(struct page *page); @@ -1325,7 +1325,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page) { return page->ptl; } -#else /* BLOATED_SPINLOCKS */ +#else /* ALLOC_SPLIT_PTLOCKS */ static inline bool ptlock_alloc(struct page *page) { return true; @@ -1339,7 +1339,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page) { return &page->ptl; } -#endif /* BLOATED_SPINLOCKS */ +#endif /* ALLOC_SPLIT_PTLOCKS */ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) { diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index ad0616f2fe2c..290901a8c1de 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -26,6 +26,7 @@ struct address_space; #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) +#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) /* * Each physical page in the system has a struct page associated with @@ -155,7 +156,7 @@ struct page { * system if PG_buddy is set. */ #if USE_SPLIT_PTE_PTLOCKS -#if BLOATED_SPINLOCKS +#if ALLOC_SPLIT_PTLOCKS spinlock_t *ptl; #else spinlock_t ptl; -- cgit v1.2.3 From df36ac1bc2a166eef90785d584e4cfed6f52bd32 Mon Sep 17 00:00:00 2001 From: "Luck, Tony" Date: Wed, 18 Dec 2013 15:17:10 -0800 Subject: pstore: Don't allow high traffic options on fragile devices Some pstore backing devices use on board flash as persistent storage. These have limited numbers of write cycles so it is a poor idea to use them from high frequency operations. Signed-off-by: Tony Luck Signed-off-by: Linus Torvalds --- include/linux/pstore.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/linux/pstore.h b/include/linux/pstore.h index abd437d0a8a7..ece0c6bbfcc5 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -51,6 +51,7 @@ struct pstore_info { char *buf; size_t bufsize; struct mutex read_mutex; /* serialize open/read/close */ + int flags; int (*open)(struct pstore_info *psi); int (*close)(struct pstore_info *psi); ssize_t (*read)(u64 *id, enum pstore_type_id *type, @@ -70,6 +71,8 @@ struct pstore_info { void *data; }; +#define PSTORE_FLAGS_FRAGILE 1 + #ifdef CONFIG_PSTORE extern int pstore_register(struct pstore_info *); extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); -- cgit v1.2.3 From 8e321fefb0e60bae4e2a28d20fc4fa30758d27c6 Mon Sep 17 00:00:00 2001 From: Benjamin LaHaise Date: Sat, 21 Dec 2013 17:56:08 -0500 Subject: aio/migratepages: make aio migrate pages sane The arbitrary restriction on page counts offered by the core migrate_page_move_mapping() code results in rather suspicious looking fiddling with page reference counts in the aio_migratepage() operation. To fix this, make migrate_page_move_mapping() take an extra_count parameter that allows aio to tell the code about its own reference count on the page being migrated. While cleaning up aio_migratepage(), make it validate that the old page being passed in is actually what aio_migratepage() expects to prevent misbehaviour in the case of races. Signed-off-by: Benjamin LaHaise --- include/linux/migrate.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/migrate.h b/include/linux/migrate.h index b7717d74da7f..f015c059e159 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -55,7 +55,8 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page); extern int migrate_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page, - struct buffer_head *head, enum migrate_mode mode); + struct buffer_head *head, enum migrate_mode mode, + int extra_count); #else static inline void putback_lru_pages(struct list_head *l) {} -- cgit v1.2.3 From d00adcc8ae9e22eca9d8af5f66c59ad9a74c90ec Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 23 Dec 2013 09:31:58 -0500 Subject: drm/radeon: 0x9649 is SUMO2 not SUMO Fixes rendering corruption due to incorrect gfx configuration. bug: https://bugs.freedesktop.org/show_bug.cgi?id=63599 Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- include/drm/drm_pciids.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 87578c109e48..49376aec2fbb 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -600,7 +600,7 @@ {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ - {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ + {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ -- cgit v1.2.3 From 439a1cfffe2c1a06e5a6394ccd5d18a8e89b15d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Ol=C5=A1=C3=A1k?= Date: Sun, 22 Dec 2013 02:18:01 +0100 Subject: drm/radeon: expose render backend mask to the userspace MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This will allow userspace to correctly program the PA_SC_RASTER_CONFIG register, so it can be considered a fix. Signed-off-by: Marek Olšák Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- include/uapi/drm/radeon_drm.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 2f3f7ea8c77b..fe421e8a431b 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -983,6 +983,8 @@ struct drm_radeon_cs { #define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17 /* CIK macrotile mode array */ #define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18 +/* query the number of render backends */ +#define RADEON_INFO_SI_BACKEND_ENABLED_MASK 0x19 struct drm_radeon_info { -- cgit v1.2.3 From f60900f2609e893c7f8d0bccc7ada4947dac4cd5 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 23 Dec 2013 18:49:30 +0100 Subject: auxvec.h: account for AT_HWCAP2 in AT_VECTOR_SIZE_BASE Commit 2171364d1a92 ("powerpc: Add HWCAP2 aux entry") introduced a new AT_ auxv entry type AT_HWCAP2 but failed to update AT_VECTOR_SIZE_BASE accordingly. Signed-off-by: Ard Biesheuvel Fixes: 2171364d1a92 (powerpc: Add HWCAP2 aux entry) Cc: stable@vger.kernel.org Acked-by: Michael Neuling Cc: Nishanth Aravamudan Cc: Benjamin Herrenschmidt Signed-off-by: Linus Torvalds --- include/linux/auxvec.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h index 669fef5c745a..3e0fbe441763 100644 --- a/include/linux/auxvec.h +++ b/include/linux/auxvec.h @@ -3,6 +3,6 @@ #include -#define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */ +#define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */ /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ #endif /* _LINUX_AUXVEC_H */ -- cgit v1.2.3 From 73409f3b0ff0ae7bc1f647936b23e6d5d5dcbe28 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 27 Dec 2013 13:04:33 -0500 Subject: net: Add some clarification to skb_tx_timestamp() comment. We've seen so many instances of people invoking skb_tx_timestamp() after the device already has been given the packet, that it's worth being a little bit more verbose and explicit in this comment. Signed-off-by: David S. Miller --- include/linux/skbuff.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 6aae83890520..6f69b3f914fb 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2531,6 +2531,10 @@ static inline void sw_tx_timestamp(struct sk_buff *skb) * Ethernet MAC Drivers should call this function in their hard_xmit() * function immediately before giving the sk_buff to the MAC hardware. * + * Specifically, one should make absolutely sure that this function is + * called before TX completion of this packet can trigger. Otherwise + * the packet could potentially already be freed. + * * @skb: A socket buffer. */ static inline void skb_tx_timestamp(struct sk_buff *skb) -- cgit v1.2.3 From f244d8b623dae7a7bc695b0336f67729b95a9736 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 31 Dec 2013 13:39:42 +0100 Subject: ACPIPHP / radeon / nouveau: Fix VGA switcheroo problem related to hotplug MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The changes in the ACPI-based PCI hotplug (ACPIPHP) subsystem made during the 3.12 development cycle uncovered a problem with VGA switcheroo that on some systems, when the device-specific method (ATPX in the radeon case, _DSM in the nouveau case) is used to turn off the discrete graphics, the BIOS generates ACPI hotplug events for that device and those events cause ACPIPHP to attempt to remove the device from the system (they are events for a device that was present previously and is not present any more, so that's what should be done according to the spec). Then, the system stops functioning correctly. Since the hotplug events in question were simply silently ignored previously, the least intrusive way to address that problem is to make ACPIPHP ignore them again. For this purpose, introduce a new ACPI device flag, no_hotplug, and modify ACPIPHP to ignore hotplug events for PCI devices whose ACPI companions have that flag set. Next, make the radeon and nouveau switcheroo detection code set the no_hotplug flag for the discrete graphics' ACPI companion. Fixes: bbd34fcdd1b2 (ACPI / hotplug / PCI: Register all devices under the given bridge) References: https://bugzilla.kernel.org/show_bug.cgi?id=61891 References: https://bugzilla.kernel.org/show_bug.cgi?id=64891 Reported-and-tested-by: Mike Lothian Reported-and-tested-by: Reported-and-tested-by: Joaquín Aramendía Cc: Alex Deucher Cc: Dave Airlie Cc: Takashi Iwai Signed-off-by: Rafael J. Wysocki Cc: 3.12+ # 3.12+ --- include/acpi/acpi_bus.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 7b2de026a4f3..ca49ebd231ca 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -168,7 +168,8 @@ struct acpi_device_flags { u32 ejectable:1; u32 power_manageable:1; u32 match_driver:1; - u32 reserved:27; + u32 no_hotplug:1; + u32 reserved:26; }; /* File System */ @@ -343,6 +344,7 @@ extern struct kobject *acpi_kobj; extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int); void acpi_bus_private_data_handler(acpi_handle, void *); int acpi_bus_get_private_data(acpi_handle, void **); +void acpi_bus_no_hotplug(acpi_handle handle); extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32); extern int register_acpi_notifier(struct notifier_block *); extern int unregister_acpi_notifier(struct notifier_block *); -- cgit v1.2.3 From 2205369a314e12fcec4781cc73ac9c08fc2b47de Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Dec 2013 16:23:35 -0500 Subject: vlan: Fix header ops passthru when doing TX VLAN offload. When the vlan code detects that the real device can do TX VLAN offloads in hardware, it tries to arrange for the real device's header_ops to be invoked directly. But it does so illegally, by simply hooking the real device's header_ops up to the VLAN device. This doesn't work because we will end up invoking a set of header_ops routines which expect a device type which matches the real device, but will see a VLAN device instead. Fix this by providing a pass-thru set of header_ops which will arrange to pass the proper real device instead. To facilitate this add a dev_rebuild_header(). There are implementations which provide a ->cache and ->create but not a ->rebuild (f.e. PLIP). So we need a helper function just like dev_hard_header() to avoid crashes. Use this helper in the one existing place where the header_ops->rebuild was being invoked, the neighbour code. With lots of help from Florian Westphal. Signed-off-by: David S. Miller --- include/linux/netdevice.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index d9a550bf3e8e..7514b9c37a39 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1912,6 +1912,15 @@ static inline int dev_parse_header(const struct sk_buff *skb, return dev->header_ops->parse(skb, haddr); } +static inline int dev_rebuild_header(struct sk_buff *skb) +{ + const struct net_device *dev = skb->dev; + + if (!dev->header_ops || !dev->header_ops->rebuild) + return 0; + return dev->header_ops->rebuild(skb); +} + typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); int register_gifconf(unsigned int family, gifconf_func_t *gifconf); static inline int unregister_gifconf(unsigned int family) -- cgit v1.2.3 From 7e0309631ecf0cd16edba72ff74747fa1b96ead3 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 1 Jan 2014 23:04:25 +0100 Subject: net: llc: fix order of evaluation in llc_conn_ac_inc_vr_by_1 Function llc_conn_ac_inc_vr_by_1() evaluates via macro PDU_GET_NEXT_Vr() into ... llc_sk(sk)->vR = ++llc_sk(sk)->vR & 0xffffffffffffff7f ... but the order in which the side effects take place is undefined because there is no intervening sequence point. As llc_sk(sk)->vR is written in llc_sk(sk)->vR (assignment left-hand side) and written in ++llc_sk(sk)->vR & 0xffffffffffffff7f this might possibly yield undefined behavior. The final value of llc_sk(sk)->vR is ambiguous, because, depending on the order of expression evaluation, the increment may occur before, after, or interleaved with the assignment. In C, evaluating such an expression yields undefined behavior. Since we're doing the increment via PDU_GET_NEXT_Vr() macro and the only place it is being used is from llc_conn_ac_inc_vr_by_1(), in order to increment vR by 1 with a follow-up optimized modulo, rewrite the expression into ((vR + 1) & CONST) in order to fix this. Signed-off-by: Daniel Borkmann Cc: Arnaldo Carvalho de Melo Cc: Stephen Hemminger Signed-off-by: David S. Miller --- include/net/llc_pdu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h index 31e2de7d57c5..c0f0a13ed818 100644 --- a/include/net/llc_pdu.h +++ b/include/net/llc_pdu.h @@ -142,7 +142,7 @@ #define LLC_S_PF_IS_1(pdu) ((pdu->ctrl_2 & LLC_S_PF_BIT_MASK) ? 1 : 0) #define PDU_SUPV_GET_Nr(pdu) ((pdu->ctrl_2 & 0xFE) >> 1) -#define PDU_GET_NEXT_Vr(sn) (++sn & ~LLC_2_SEQ_NBR_MODULO) +#define PDU_GET_NEXT_Vr(sn) (((sn) + 1) & ~LLC_2_SEQ_NBR_MODULO) /* FRMR information field macros */ -- cgit v1.2.3 From 619a60ee04be33238721a15c1f9704a2a515a33e Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Thu, 2 Jan 2014 14:39:44 -0500 Subject: sctp: Remove outqueue empty state The SCTP outqueue structure maintains a data chunks that are pending transmission, the list of chunks that are pending a retransmission and a length of data in flight. It also tries to keep the emtpy state so that it can performe shutdown sequence or notify user. The problem is that the empy state is inconsistently tracked. It is possible to completely drain the queue without sending anything when using PR-SCTP. In this case, the empty state will not be correctly state as report by Jamal Hadi Salim . This can cause an association to be perminantly stuck in the SHUTDOWN_PENDING state. Additionally, SCTP is incredibly inefficient when setting the empty state. Even though all the data is availaible in the outqueue structure, we ignore it and walk a list of trasnports. In the end, we can completely remove the extra empty state and figure out if the queue is empty by looking at 3 things: length of pending data, length of in-flight data, and exisiting of retransmit data. All of these are already in the strucutre. Reported-by: Jamal Hadi Salim Signed-off-by: Vlad Yasevich Acked-by: Neil Horman Tested-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include') diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 67b5d0068273..0a248b323d87 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1046,9 +1046,6 @@ struct sctp_outq { /* Corked? */ char cork; - - /* Is this structure empty? */ - char empty; }; void sctp_outq_init(struct sctp_association *, struct sctp_outq *); -- cgit v1.2.3 From 7a7ffbabf99445704be01bff5d7e360da908cf8e Mon Sep 17 00:00:00 2001 From: Wei-Chun Chao Date: Thu, 26 Dec 2013 13:10:22 -0800 Subject: ipv4: fix tunneled VM traffic over hw VXLAN/GRE GSO NIC VM to VM GSO traffic is broken if it goes through VXLAN or GRE tunnel and the physical NIC on the host supports hardware VXLAN/GRE GSO offload (e.g. bnx2x and next-gen mlx4). Two issues - (VXLAN) VM traffic has SKB_GSO_DODGY and SKB_GSO_UDP_TUNNEL with SKB_GSO_TCP/UDP set depending on the inner protocol. GSO header integrity check fails in udp4_ufo_fragment if inner protocol is TCP. Also gso_segs is calculated incorrectly using skb->len that includes tunnel header. Fix: robust check should only be applied to the inner packet. (VXLAN & GRE) Once GSO header integrity check passes, NULL segs is returned and the original skb is sent to hardware. However the tunnel header is already pulled. Fix: tunnel header needs to be restored so that hardware can perform GSO properly on the original packet. Signed-off-by: Wei-Chun Chao Signed-off-by: David S. Miller --- include/linux/netdevice.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 7514b9c37a39..5faaadb0c74f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3017,6 +3017,19 @@ static inline void netif_set_gso_max_size(struct net_device *dev, dev->gso_max_size = size; } +static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol, + int pulled_hlen, u16 mac_offset, + int mac_len) +{ + skb->protocol = protocol; + skb->encapsulation = 1; + skb_push(skb, pulled_hlen); + skb_reset_transport_header(skb); + skb->mac_header = mac_offset; + skb->network_header = skb->mac_header + mac_len; + skb->mac_len = mac_len; +} + static inline bool netif_is_macvlan(struct net_device *dev) { return dev->priv_flags & IFF_MACVLAN; -- cgit v1.2.3 From f663dd9aaf9ed124f25f0f8452edf238f087ad50 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 10 Jan 2014 16:18:26 +0800 Subject: net: core: explicitly select a txq before doing l2 forwarding Currently, the tx queue were selected implicitly in ndo_dfwd_start_xmit(). The will cause several issues: - NETIF_F_LLTX were removed for macvlan, so txq lock were done for macvlan instead of lower device which misses the necessary txq synchronization for lower device such as txq stopping or frozen required by dev watchdog or control path. - dev_hard_start_xmit() was called with NULL txq which bypasses the net device watchdog. - dev_hard_start_xmit() does not check txq everywhere which will lead a crash when tso is disabled for lower device. Fix this by explicitly introducing a new param for .ndo_select_queue() for just selecting queues in the case of l2 forwarding offload. netdev_pick_tx() was also extended to accept this parameter and dev_queue_xmit_accel() was used to do l2 forwarding transmission. With this fixes, NETIF_F_LLTX could be preserved for macvlan and there's no need to check txq against NULL in dev_hard_start_xmit(). Also there's no need to keep a dedicated ndo_dfwd_start_xmit() and we can just reuse the code of dev_queue_xmit() to do the transmission. In the future, it was also required for macvtap l2 forwarding support since it provides a necessary synchronization method. Cc: John Fastabend Cc: Neil Horman Cc: e1000-devel@lists.sourceforge.net Signed-off-by: Jason Wang Acked-by: Neil Horman Acked-by: John Fastabend Signed-off-by: David S. Miller --- include/linux/netdevice.h | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 5faaadb0c74f..ce2a1f5f9a1e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -769,7 +769,8 @@ struct netdev_phys_port_id { * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) * Required can not be NULL. * - * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); + * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, + * void *accel_priv); * Called to decide which queue to when device supports multiple * transmit queues. * @@ -990,7 +991,8 @@ struct net_device_ops { netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, struct net_device *dev); u16 (*ndo_select_queue)(struct net_device *dev, - struct sk_buff *skb); + struct sk_buff *skb, + void *accel_priv); void (*ndo_change_rx_flags)(struct net_device *dev, int flags); void (*ndo_set_rx_mode)(struct net_device *dev); @@ -1529,7 +1531,8 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, } struct netdev_queue *netdev_pick_tx(struct net_device *dev, - struct sk_buff *skb); + struct sk_buff *skb, + void *accel_priv); u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); /* @@ -1819,6 +1822,7 @@ int dev_close(struct net_device *dev); void dev_disable_lro(struct net_device *dev); int dev_loopback_xmit(struct sk_buff *newskb); int dev_queue_xmit(struct sk_buff *skb); +int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); int register_netdevice(struct net_device *dev); void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); void unregister_netdevice_many(struct list_head *head); @@ -2426,7 +2430,7 @@ int dev_change_carrier(struct net_device *, bool new_carrier); int dev_get_phys_port_id(struct net_device *dev, struct netdev_phys_port_id *ppid); int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, - struct netdev_queue *txq, void *accel_priv); + struct netdev_queue *txq); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); extern int netdev_budget; -- cgit v1.2.3