From ac37e2515c1a89c477459a2020b6bfdedabdb91b Mon Sep 17 00:00:00 2001 From: huaibin Wang Date: Wed, 11 Feb 2015 18:10:36 +0100 Subject: xfrm: release dst_orig in case of error in xfrm_lookup() dst_orig should be released on error. Function like __xfrm_route_forward() expects that behavior. Since a recent commit, xfrm_lookup() may also be called by xfrm_lookup_route(), which expects the opposite. Let's introduce a new flag (XFRM_LOOKUP_KEEP_DST_REF) to tell what should be done in case of error. Fixes: f92ee61982d("xfrm: Generate blackhole routes only from route lookup functions") Signed-off-by: huaibin Wang Signed-off-by: Nicolas Dichtel Signed-off-by: Steffen Klassert --- include/net/dst.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/net/dst.h b/include/net/dst.h index a8ae4e760778..0fb99a26e973 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -481,6 +481,7 @@ void dst_init(void); enum { XFRM_LOOKUP_ICMP = 1 << 0, XFRM_LOOKUP_QUEUE = 1 << 1, + XFRM_LOOKUP_KEEP_DST_REF = 1 << 2, }; struct flowi; -- cgit v1.2.3 From 4b8164b91d9fdff4dbac0a742d076bdff7fda21b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 31 Jan 2015 20:08:47 -0500 Subject: new helper: dup_iter() Copy iter and kmemdup the underlying array for the copy. Returns a pointer to result of kmemdup() to be kfree()'d later. Signed-off-by: Al Viro --- include/linux/uio.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/uio.h b/include/linux/uio.h index 07a022641996..71880299ed48 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -98,6 +98,8 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start); int iov_iter_npages(const struct iov_iter *i, int maxpages); +const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); + static inline size_t iov_iter_count(struct iov_iter *i) { return i->count; -- cgit v1.2.3 From 846cd66788b11105a62785078360c8854aa98310 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 18 Feb 2015 11:38:06 +0100 Subject: net: Initialize all members in skb_gro_remcsum_init() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit skb_gro_remcsum_init() initializes the gro_remcsum.delta member only, leading to compiler warnings about a possibly uninitialized gro_remcsum.offset member: drivers/net/vxlan.c: In function ‘vxlan_gro_receive’: drivers/net/vxlan.c:602: warning: ‘grc.offset’ may be used uninitialized in this function net/ipv4/fou.c: In function ‘gue_gro_receive’: net/ipv4/fou.c:262: warning: ‘grc.offset’ may be used uninitialized in this function While these are harmless for now: - skb_gro_remcsum_process() sets offset before changing delta, - skb_gro_remcsum_cleanup() checks if delta is non-zero before accessing offset, it's safer to let the initialization function initialize all members. Signed-off-by: Geert Uytterhoeven Acked-by: Tom Herbert Signed-off-by: David S. Miller --- include/linux/netdevice.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 5897b4ea5a3f..429d1790a27e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2342,6 +2342,7 @@ struct gro_remcsum { static inline void skb_gro_remcsum_init(struct gro_remcsum *grc) { + grc->offset = 0; grc->delta = 0; } -- cgit v1.2.3 From 278f7b4fffce9ad267406cf8800df271d14f4a16 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 19 Feb 2015 12:13:13 +0300 Subject: caif: fix a signedness bug in cfpkt_iterate() The cfpkt_iterate() function can return -EPROTO on error, but the function is a u16 so the negative value gets truncated to a positive unsigned short. This causes a static checker warning. The only caller which might care is cffrml_receive(), when it's checking the frame checksum. I modified cffrml_receive() so that it never says -EPROTO is a valid checksum. Also this isn't ever going to be inlined so I removed the "inline". Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- include/net/caif/cfpkt.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h index 1c1ad46250d5..fe328c52c46b 100644 --- a/include/net/caif/cfpkt.h +++ b/include/net/caif/cfpkt.h @@ -171,7 +171,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos); * @return Checksum of buffer. */ -u16 cfpkt_iterate(struct cfpkt *pkt, +int cfpkt_iterate(struct cfpkt *pkt, u16 (*iter_func)(u16 chks, void *buf, u16 len), u16 data); -- cgit v1.2.3 From b9ebafbe8cfeeddec881504c446cccd0d87a51b6 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 20 Feb 2015 06:48:57 -0800 Subject: rhashtable: ensure cache line alignment on bucket_table struct bucket_table contains mostly read fields : size, locks_mask, locks. Make sure these are not sharing a cache line with buckets[] Signed-off-by: Eric Dumazet Acked-by: Daniel Borkmann Acked-by: Thomas Graf Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 58851275fed9..cb2104be2135 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -54,10 +54,11 @@ struct rhash_head { * @buckets: size * hash buckets */ struct bucket_table { - size_t size; - unsigned int locks_mask; - spinlock_t *locks; - struct rhash_head __rcu *buckets[]; + size_t size; + unsigned int locks_mask; + spinlock_t *locks; + + struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; }; typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed); -- cgit v1.2.3 From 30ff54765976e132674e3eae2071ed8ed494665c Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Mon, 23 Feb 2015 08:17:12 -0500 Subject: net: sched: export tc_connmark.h so it is uapi accessible Signed-off-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- include/uapi/linux/tc_act/Kbuild | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild index 19d5219b0b99..242cf0c6e33d 100644 --- a/include/uapi/linux/tc_act/Kbuild +++ b/include/uapi/linux/tc_act/Kbuild @@ -9,3 +9,4 @@ header-y += tc_pedit.h header-y += tc_skbedit.h header-y += tc_vlan.h header-y += tc_bpf.h +header-y += tc_connmark.h -- cgit v1.2.3 From 737eb0301f296d55c22350c6968ff1ef51bacb5f Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 20 Feb 2015 14:53:46 +0000 Subject: genirq / PM: better describe IRQF_NO_SUSPEND semantics The IRQF_NO_SUSPEND flag is intended to be used for interrupts required to be enabled during the suspend-resume cycle. This mostly consists of IPIs and timer interrupts, potentially including chained irqchip interrupts if these are necessary to handle timers or IPIs. If an interrupt does not fall into one of the aforementioned categories, requesting it with IRQF_NO_SUSPEND is likely incorrect. Using IRQF_NO_SUSPEND does not guarantee that the interrupt can wake the system from a suspended state. For an interrupt to be able to trigger a wakeup, it may be necessary to program various components of the system. In these cases it is necessary to use {enable,disabled}_irq_wake. Unfortunately, several drivers assume that IRQF_NO_SUSPEND ensures that an IRQ can wake up the system, and the documentation can be read ambiguously w.r.t. this property. This patch updates the documentation regarding IRQF_NO_SUSPEND to make this caveat explicit, hopefully making future misuse rarer. Cleanup of existing misuse will occur as part of later patch series. Signed-off-by: Mark Rutland Acked-by: Peter Zijlstra (Intel) Signed-off-by: Rafael J. Wysocki --- include/linux/interrupt.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index d9b05b5bf8c7..606771c7cac2 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -52,7 +52,9 @@ * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. * Used by threaded interrupts which need to keep the * irq line disabled until the threaded handler has been run. - * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend + * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee + * that this interrupt will wake the system from a suspended + * state. See Documentation/power/suspend-and-interrupts.txt * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set * IRQF_NO_THREAD - Interrupt cannot be threaded * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device -- cgit v1.2.3 From a38bb793eaebe1178fbd8ef6ab66ccc062bad505 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Wed, 25 Feb 2015 10:23:58 +0200 Subject: OMAPDSS: fix regression with display sysfs files omapdss's sysfs directories for displays used to have 'name' file, giving the name for the display. This file was later renamed to 'display_name' to avoid conflicts with i2c sysfs 'name' file. Looks like at least xserver-xorg-video-omap3 requires the 'name' file to be present. To fix the regression, this patch creates new kobjects for each display, allowing us to create sysfs directories for the displays. This way we have the whole directory for omapdss, and there will be no sysfs file clashes with the underlying display device's sysfs files. We can thus add the 'name' sysfs file back. Signed-off-by: Tomi Valkeinen Tested-by: NeilBrown --- include/video/omapdss.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/video/omapdss.h b/include/video/omapdss.h index 60de61fea8e3..c8ed15daad02 100644 --- a/include/video/omapdss.h +++ b/include/video/omapdss.h @@ -689,6 +689,7 @@ struct omapdss_dsi_ops { }; struct omap_dss_device { + struct kobject kobj; struct device *dev; struct module *owner; -- cgit v1.2.3 From bc4b1f486fe69b86769e07c8edce472327a8462b Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Sun, 15 Feb 2015 11:57:53 +0700 Subject: Revert "USB: serial: make bulk_out_size a lower limit" This reverts commit 5083fd7bdfe6760577235a724cf6dccae13652c2. A bulk-out size smaller than the end-point size is indeed valid. The offending commit broke the usb-debug driver for EHCI debug devices, which use 8-byte buffers. Fixes: 5083fd7bdfe6 ("USB: serial: make bulk_out_size a lower limit") Reported-by: "Li, Elvin" Cc: stable # v3.15 Signed-off-by: Johan Hovold --- include/linux/usb/serial.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 9bb547c7bce7..704a1ab8240c 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -190,8 +190,7 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data) * @num_ports: the number of different ports this device will have. * @bulk_in_size: minimum number of bytes to allocate for bulk-in buffer * (0 = end-point size) - * @bulk_out_size: minimum number of bytes to allocate for bulk-out buffer - * (0 = end-point size) + * @bulk_out_size: bytes to allocate for bulk-out buffer (0 = end-point size) * @calc_num_ports: pointer to a function to determine how many ports this * device has dynamically. It will be called after the probe() * callback is called, but before attach() -- cgit v1.2.3 From 09ee96b21456883e108c3b00597bb37ec512151b Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Thu, 26 Feb 2015 11:41:28 -0500 Subject: dm snapshot: suspend merging snapshot when doing exception handover The "dm snapshot: suspend origin when doing exception handover" commit fixed a exception store handover bug associated with pending exceptions to the "snapshot-origin" target. However, a similar problem exists in snapshot merging. When snapshot merging is in progress, we use the target "snapshot-merge" instead of "snapshot-origin". Consequently, during exception store handover, we must find the snapshot-merge target and suspend its associated mapped_device. To avoid lockdep warnings, the target must be suspended and resumed without holding _origins_lock. Introduce a dm_hold() function that grabs a reference on a mapped_device, but unlike dm_get(), it doesn't crash if the device has the DMF_FREEING flag set, it returns an error in this case. In snapshot_resume() we grab the reference to the origin device using dm_hold() while holding _origins_lock (_origins_lock guarantees that the device won't disappear). Then we release _origins_lock, suspend the device and grab _origins_lock again. NOTE to stable@ people: When backporting to kernels 3.18 and older, use dm_internal_suspend and dm_internal_resume instead of dm_internal_suspend_fast and dm_internal_resume_fast. Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer Cc: stable@vger.kernel.org --- include/linux/device-mapper.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 2646aed1d3fe..fd23978d93fe 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -375,6 +375,7 @@ int dm_create(int minor, struct mapped_device **md); */ struct mapped_device *dm_get_md(dev_t dev); void dm_get(struct mapped_device *md); +int dm_hold(struct mapped_device *md); void dm_put(struct mapped_device *md); /* -- cgit v1.2.3 From 4c4b52d9b2df45e8216d3e30b5452e4a364d2cac Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 25 Feb 2015 16:31:54 +0100 Subject: rhashtable: remove indirection for grow/shrink decision functions Currently, all real users of rhashtable default their grow and shrink decision functions to rht_grow_above_75() and rht_shrink_below_30(), so that there's currently no need to have this explicitly selectable. It can/should be generic and private inside rhashtable until a real use case pops up. Since we can make this private, we'll save us this additional indirection layer and can improve insertion/deletion time as well. Reference: http://patchwork.ozlabs.org/patch/443040/ Suggested-by: David S. Miller Signed-off-by: Daniel Borkmann Acked-by: Thomas Graf Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 13 ------------- 1 file changed, 13 deletions(-) (limited to 'include') diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index cb2104be2135..d438eeb08bff 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -79,12 +79,6 @@ struct rhashtable; * @locks_mul: Number of bucket locks to allocate per cpu (default: 128) * @hashfn: Function to hash key * @obj_hashfn: Function to hash object - * @grow_decision: If defined, may return true if table should expand - * @shrink_decision: If defined, may return true if table should shrink - * - * Note: when implementing the grow and shrink decision function, min/max - * shift must be enforced, otherwise, resizing watermarks they set may be - * useless. */ struct rhashtable_params { size_t nelem_hint; @@ -98,10 +92,6 @@ struct rhashtable_params { size_t locks_mul; rht_hashfn_t hashfn; rht_obj_hashfn_t obj_hashfn; - bool (*grow_decision)(const struct rhashtable *ht, - size_t new_size); - bool (*shrink_decision)(const struct rhashtable *ht, - size_t new_size); }; /** @@ -193,9 +183,6 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params); void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node); bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node); -bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size); -bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size); - int rhashtable_expand(struct rhashtable *ht); int rhashtable_shrink(struct rhashtable *ht); -- cgit v1.2.3 From 140e049c64ce848392adbf4678983ecc76888dde Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 26 Feb 2015 17:42:42 -0500 Subject: NFS: Add a helper to set attribute barriers Signed-off-by: Trond Myklebust Tested-by: Chuck Lever --- include/linux/nfs_fs.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 2f77e0c651c8..3a4ffb5856cd 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -369,6 +369,7 @@ extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ct extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx); extern u64 nfs_compat_user_ino64(u64 fileid); extern void nfs_fattr_init(struct nfs_fattr *fattr); +extern void nfs_fattr_set_barrier(struct nfs_fattr *fattr); extern unsigned long nfs_inc_attr_generation_counter(void); extern struct nfs_fattr *nfs_alloc_fattr(void); -- cgit v1.2.3 From f044636d972246d451e06226cc1675d5da389762 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 26 Feb 2015 16:09:04 -0500 Subject: NFS: Add attribute update barriers to nfs_setattr_update_inode() Ensure that other operations which raced with our setattr RPC call cannot revert the file attribute changes that were made on the server. To do so, we artificially bump the attribute generation counter on the inode so that all calls to nfs_fattr_init() that precede ours will be dropped. The motivation for the patch came from Chuck Lever's reports of readaheads racing with truncate operations and causing the file size to be reverted. Reported-by: Chuck Lever Signed-off-by: Trond Myklebust Tested-by: Chuck Lever --- include/linux/nfs_fs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 3a4ffb5856cd..f26e64e0aff8 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -356,7 +356,7 @@ extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *ino extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); extern int nfs_setattr(struct dentry *, struct iattr *); -extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr); +extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *); extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr, struct nfs4_label *label); extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); -- cgit v1.2.3 From a08a8cd375db9769588257e7782f6b6b68561b88 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 26 Feb 2015 17:36:09 -0500 Subject: NFS: Add attribute update barriers to NFS writebacks Ensure that other operations that race with our write RPC calls cannot revert the file size updates that were made on the server. Signed-off-by: Trond Myklebust Tested-by: Chuck Lever --- include/linux/nfs_fs.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index f26e64e0aff8..59b1516b9fd4 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -343,6 +343,7 @@ extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *, extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *); extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr); +extern int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *); extern void nfs_access_set_mask(struct nfs_access_entry *, u32); -- cgit v1.2.3 From 604b91fee4fc53ddc83c221c9bbce771898ec872 Mon Sep 17 00:00:00 2001 From: Yuval Shaia Date: Sun, 1 Mar 2015 02:57:38 -0800 Subject: xen: Remove trailing semicolon from xenbus_register_frontend() definition Signed-off-by: Yuval Shaia Signed-off-by: David Vrabel --- include/xen/xenbus.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h index b78f21caf55a..b0f1c9e5d687 100644 --- a/include/xen/xenbus.h +++ b/include/xen/xenbus.h @@ -114,9 +114,9 @@ int __must_check __xenbus_register_backend(struct xenbus_driver *drv, const char *mod_name); #define xenbus_register_frontend(drv) \ - __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME); + __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME) #define xenbus_register_backend(drv) \ - __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME); + __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME) void xenbus_unregister_driver(struct xenbus_driver *drv); -- cgit v1.2.3 From f5956fafb00afab474c3886b6297f9b5e7aff722 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Mon, 2 Mar 2015 18:22:15 +0200 Subject: net/mlx4_core: Fix wrong mask and error flow for the update-qp command The bit mask for currently supported driver features (MLX4_UPDATE_QP_SUPPORTED_ATTRS) of the update-qp command was defined twice (using enum value and pre-processor define directive) and wrong. The return value of the call to mlx4_update_qp() from within the SRIOV resource-tracker was wrongly voided down. Fix both issues. issue: none Fixes: 09e05c3f78e9 ('net/mlx4: Set vlan stripping policy by the right command') Fixes: ce8d9e0d6746 ('net/mlx4_core: Add UPDATE_QP SRIOV wrapper support') Signed-off-by: Matan Barak Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- include/linux/mlx4/qp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 2bbc62aa818a..551f85456c11 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -427,7 +427,7 @@ struct mlx4_wqe_inline_seg { enum mlx4_update_qp_attr { MLX4_UPDATE_QP_SMAC = 1 << 0, - MLX4_UPDATE_QP_VSD = 1 << 2, + MLX4_UPDATE_QP_VSD = 1 << 1, MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 2) - 1 }; -- cgit v1.2.3 From c6331ba3d2d68758f36dbc3e09e648d312c24d97 Mon Sep 17 00:00:00 2001 From: Marcin Bis Date: Sun, 1 Mar 2015 13:49:32 +0100 Subject: spi: fix a typo in comment. alway -> always Signed-off-by: Marcin Bis Signed-off-by: Mark Brown --- include/linux/spi/spi.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index ed9489d893a4..856d34dde79b 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -649,7 +649,7 @@ struct spi_transfer { * sequence completes. On some systems, many such sequences can execute as * as single programmed DMA transfer. On all systems, these messages are * queued, and might complete after transactions to other devices. Messages - * sent to a given spi_device are alway executed in FIFO order. + * sent to a given spi_device are always executed in FIFO order. * * The code that submits an spi_message (and its spi_transfers) * to the lower layers is responsible for managing its memory. -- cgit v1.2.3 From 874f946376de57c8d6230b30ad71f742883fee3a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 2 Mar 2015 23:32:08 -0500 Subject: NFS: Fix a regression in the read() syscall When invalidating the page cache for a regular file, we want to first sync all dirty data to disk and then call invalidate_inode_pages2(). The latter relies on nfs_launder_page() and nfs_release_page() to deal respectively with dirty pages, and unstable written pages. When commit 9590544694bec ("NFS: avoid deadlocks with loop-back mounted NFS filesystems.") changed the behaviour of nfs_release_page(), then it made it possible for invalidate_inode_pages2() to fail with an EBUSY. Unfortunately, that error is then propagated back to read(). Let's therefore work around the problem for now by protecting the call to sync the data and invalidate_inode_pages2() so that they are atomic w.r.t. the addition of new writes. Later on, we can revisit whether or not we still need nfs_launder_page() and nfs_release_page(). Signed-off-by: Trond Myklebust --- include/linux/nfs_fs.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 59b1516b9fd4..b01ccf371fdc 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -356,6 +356,7 @@ extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode); extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); +extern int nfs_revalidate_mapping_protected(struct inode *inode, struct address_space *mapping); extern int nfs_setattr(struct dentry *, struct iattr *); extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *); extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr, -- cgit v1.2.3 From 02f513a0970d97e4fc5f262f5a6c814014af524e Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Thu, 5 Feb 2015 14:02:09 +0800 Subject: pm: at91: Workaround DDRSDRC self-refresh bug with LPDDR1 memories. The DDRSDR controller fails miserably to put LPDDR1 memories in self-refresh. Force the controller to think it has DDR2 memories during the self-refresh period, as the DDR2 self-refresh spec is equivalent to LPDDR1, and is correctly implemented in the controller. Assume that the second controller has the same fault, but that is untested. Signed-off-by: Peter Rosin Acked-by: Nicolas Ferre Signed-off-by: Nicolas Ferre --- include/soc/at91/at91sam9_ddrsdr.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/soc/at91/at91sam9_ddrsdr.h b/include/soc/at91/at91sam9_ddrsdr.h index 0210797abf2e..dc10c52e0e91 100644 --- a/include/soc/at91/at91sam9_ddrsdr.h +++ b/include/soc/at91/at91sam9_ddrsdr.h @@ -92,7 +92,7 @@ #define AT91_DDRSDRC_UPD_MR (3 << 20) /* Update load mode register and extended mode register */ #define AT91_DDRSDRC_MDR 0x20 /* Memory Device Register */ -#define AT91_DDRSDRC_MD (3 << 0) /* Memory Device Type */ +#define AT91_DDRSDRC_MD (7 << 0) /* Memory Device Type */ #define AT91_DDRSDRC_MD_SDR 0 #define AT91_DDRSDRC_MD_LOW_POWER_SDR 1 #define AT91_DDRSDRC_MD_LOW_POWER_DDR 3 -- cgit v1.2.3 From 86f1ec32318159a24de349f0a38e79b9d2b3131a Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 3 Mar 2015 20:04:20 +0000 Subject: netfilter: nf_tables: fix userdata length overflow The NFT_USERDATA_MAXLEN is defined to 256, however we only have a u8 to store its size. Introduce a struct nft_userdata which contains a length field and indicate its presence using a single bit in the rule. The length field of struct nft_userdata is also a u8, however we don't store zero sized data, so the actual length is udata->len + 1. Signed-off-by: Patrick McHardy Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 9eaaa7884586..decb9a095ae7 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -119,6 +119,22 @@ int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg, const struct nft_data *data, enum nft_data_types type); + +/** + * struct nft_userdata - user defined data associated with an object + * + * @len: length of the data + * @data: content + * + * The presence of user data is indicated in an object specific fashion, + * so a length of zero can't occur and the value "len" indicates data + * of length len + 1. + */ +struct nft_userdata { + u8 len; + unsigned char data[0]; +}; + /** * struct nft_set_elem - generic representation of set elements * @@ -380,7 +396,7 @@ static inline void *nft_expr_priv(const struct nft_expr *expr) * @handle: rule handle * @genmask: generation mask * @dlen: length of expression data - * @ulen: length of user data (used for comments) + * @udata: user data is appended to the rule * @data: expression data */ struct nft_rule { @@ -388,7 +404,7 @@ struct nft_rule { u64 handle:42, genmask:2, dlen:12, - ulen:8; + udata:1; unsigned char data[] __attribute__((aligned(__alignof__(struct nft_expr)))); }; @@ -476,7 +492,7 @@ static inline struct nft_expr *nft_expr_last(const struct nft_rule *rule) return (struct nft_expr *)&rule->data[rule->dlen]; } -static inline void *nft_userdata(const struct nft_rule *rule) +static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule) { return (void *)&rule->data[rule->dlen]; } -- cgit v1.2.3 From 17f480342026e54000731acaa69bf32787ce46cb Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 27 Feb 2015 00:07:55 +0100 Subject: genirq / PM: Add flag for shared NO_SUSPEND interrupt lines It currently is required that all users of NO_SUSPEND interrupt lines pass the IRQF_NO_SUSPEND flag when requesting the IRQ or the WARN_ON_ONCE() in irq_pm_install_action() will trigger. That is done to warn about situations in which unprepared interrupt handlers may be run unnecessarily for suspended devices and may attempt to access those devices by mistake. However, it may cause drivers that have no technical reasons for using IRQF_NO_SUSPEND to set that flag just because they happen to share the interrupt line with something like a timer. Moreover, the generic handling of wakeup interrupts introduced by commit 9ce7a25849e8 (genirq: Simplify wakeup mechanism) only works for IRQs without any NO_SUSPEND users, so the drivers of wakeup devices needing to use shared NO_SUSPEND interrupt lines for signaling system wakeup generally have to detect wakeup in their interrupt handlers. Thus if they happen to share an interrupt line with a NO_SUSPEND user, they also need to request that their interrupt handlers be run after suspend_device_irqs(). In both cases the reason for using IRQF_NO_SUSPEND is not because the driver in question has a genuine need to run its interrupt handler after suspend_device_irqs(), but because it happens to share the line with some other NO_SUSPEND user. Otherwise, the driver would do without IRQF_NO_SUSPEND just fine. To make it possible to specify that condition explicitly, introduce a new IRQ action handler flag for shared IRQs, IRQF_COND_SUSPEND, that, when set, will indicate to the IRQ core that the interrupt user is generally fine with suspending the IRQ, but it also can tolerate handler invocations after suspend_device_irqs() and, in particular, it is capable of detecting system wakeup and triggering it as appropriate from its interrupt handler. That will allow us to work around a problem with a shared timer interrupt line on at91 platforms. Link: http://marc.info/?l=linux-kernel&m=142252777602084&w=2 Link: http://marc.info/?t=142252775300011&r=1&w=2 Link: https://lkml.org/lkml/2014/12/15/552 Reported-by: Boris Brezillon Signed-off-by: Rafael J. Wysocki Acked-by: Peter Zijlstra (Intel) Acked-by: Mark Rutland --- include/linux/interrupt.h | 5 +++++ include/linux/irqdesc.h | 1 + 2 files changed, 6 insertions(+) (limited to 'include') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 606771c7cac2..2e88580194f0 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -59,6 +59,10 @@ * IRQF_NO_THREAD - Interrupt cannot be threaded * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device * resume time. + * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this + * interrupt handler after suspending interrupts. For system + * wakeup devices users need to implement wakeup detection in + * their interrupt handlers. */ #define IRQF_DISABLED 0x00000020 #define IRQF_SHARED 0x00000080 @@ -72,6 +76,7 @@ #define IRQF_FORCE_RESUME 0x00008000 #define IRQF_NO_THREAD 0x00010000 #define IRQF_EARLY_RESUME 0x00020000 +#define IRQF_COND_SUSPEND 0x00040000 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index faf433af425e..dd1109fb241e 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -78,6 +78,7 @@ struct irq_desc { #ifdef CONFIG_PM_SLEEP unsigned int nr_actions; unsigned int no_suspend_depth; + unsigned int cond_suspend_depth; unsigned int force_resume_depth; #endif #ifdef CONFIG_PROC_FS -- cgit v1.2.3 From 440fd5283a87345cdd4237bdf45fb01130ea0056 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Fri, 23 Jan 2015 09:05:06 +0100 Subject: drm/mm: Support 4 GiB and larger ranges The current implementation is limited by the number of addresses that fit into an unsigned long. This causes problems on 32-bit Tegra where unsigned long is 32-bit but drm_mm is used to manage an IOVA space of 4 GiB. Given the 32-bit limitation, the range is limited to 4 GiB - 1 (or 4 GiB - 4 KiB for page granularity). This commit changes the start and size of the range to be an unsigned 64-bit integer, thus allowing much larger ranges to be supported. [airlied: fix i915 warnings and coloring callback] Signed-off-by: Thierry Reding Reviewed-by: Alex Deucher Reviewed-by: Chris Wilson Signed-off-by: Dave Airlie fixupo --- include/drm/drm_mm.h | 52 ++++++++++++++++++++++++++-------------------------- 1 file changed, 26 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index a24addfdfcec..0de6290df4da 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -68,8 +68,8 @@ struct drm_mm_node { unsigned scanned_preceeds_hole : 1; unsigned allocated : 1; unsigned long color; - unsigned long start; - unsigned long size; + u64 start; + u64 size; struct drm_mm *mm; }; @@ -82,16 +82,16 @@ struct drm_mm { unsigned int scan_check_range : 1; unsigned scan_alignment; unsigned long scan_color; - unsigned long scan_size; - unsigned long scan_hit_start; - unsigned long scan_hit_end; + u64 scan_size; + u64 scan_hit_start; + u64 scan_hit_end; unsigned scanned_blocks; - unsigned long scan_start; - unsigned long scan_end; + u64 scan_start; + u64 scan_end; struct drm_mm_node *prev_scanned_node; void (*color_adjust)(struct drm_mm_node *node, unsigned long color, - unsigned long *start, unsigned long *end); + u64 *start, u64 *end); }; /** @@ -124,7 +124,7 @@ static inline bool drm_mm_initialized(struct drm_mm *mm) return mm->hole_stack.next; } -static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node) +static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node) { return hole_node->start + hole_node->size; } @@ -140,13 +140,13 @@ static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_no * Returns: * Start of the subsequent hole. */ -static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node) +static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node) { BUG_ON(!hole_node->hole_follows); return __drm_mm_hole_node_start(hole_node); } -static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node) +static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node) { return list_entry(hole_node->node_list.next, struct drm_mm_node, node_list)->start; @@ -163,7 +163,7 @@ static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node * Returns: * End of the subsequent hole. */ -static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) +static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node) { return __drm_mm_hole_node_end(hole_node); } @@ -222,7 +222,7 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, - unsigned long size, + u64 size, unsigned alignment, unsigned long color, enum drm_mm_search_flags sflags, @@ -245,7 +245,7 @@ int drm_mm_insert_node_generic(struct drm_mm *mm, */ static inline int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node, - unsigned long size, + u64 size, unsigned alignment, enum drm_mm_search_flags flags) { @@ -255,11 +255,11 @@ static inline int drm_mm_insert_node(struct drm_mm *mm, int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, - unsigned long size, + u64 size, unsigned alignment, unsigned long color, - unsigned long start, - unsigned long end, + u64 start, + u64 end, enum drm_mm_search_flags sflags, enum drm_mm_allocator_flags aflags); /** @@ -282,10 +282,10 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, */ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node, - unsigned long size, + u64 size, unsigned alignment, - unsigned long start, - unsigned long end, + u64 start, + u64 end, enum drm_mm_search_flags flags) { return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, @@ -296,21 +296,21 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, void drm_mm_remove_node(struct drm_mm_node *node); void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); void drm_mm_init(struct drm_mm *mm, - unsigned long start, - unsigned long size); + u64 start, + u64 size); void drm_mm_takedown(struct drm_mm *mm); bool drm_mm_clean(struct drm_mm *mm); void drm_mm_init_scan(struct drm_mm *mm, - unsigned long size, + u64 size, unsigned alignment, unsigned long color); void drm_mm_init_scan_with_range(struct drm_mm *mm, - unsigned long size, + u64 size, unsigned alignment, unsigned long color, - unsigned long start, - unsigned long end); + u64 start, + u64 end); bool drm_mm_scan_add_block(struct drm_mm_node *node); bool drm_mm_scan_remove_block(struct drm_mm_node *node); -- cgit v1.2.3 From 54c4cd68ed7abd9f245722bee39464d04ddb4cfd Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 4 Mar 2015 00:18:38 -0500 Subject: drm/ttm: device address space != CPU address space We need to store device offsets in 64 bit as the device address space may be larger than the CPU's. Fixes GPU init failures on radeons with 4GB or more of vram on 32 bit kernels. We put vram at the start of the GPU's address space so the gart aperture starts at 4 GB causing all GPU addresses in the gart aperture to get truncated. bug: https://bugs.freedesktop.org/show_bug.cgi?id=89072 [airlied: fix warning on nouveau build] Signed-off-by: Alex Deucher Cc: thellstrom@vmware.com Acked-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- include/drm/ttm/ttm_bo_api.h | 2 +- include/drm/ttm/ttm_bo_driver.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 0ccf7f267ff9..c768ddfbe53c 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -249,7 +249,7 @@ struct ttm_buffer_object { * either of these locks held. */ - unsigned long offset; + uint64_t offset; /* GPU address space is independent of CPU word size */ uint32_t cur_placement; struct sg_table *sg; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 142d752fc450..813042cede57 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -277,7 +277,7 @@ struct ttm_mem_type_manager { bool has_type; bool use_type; uint32_t flags; - unsigned long gpu_offset; + uint64_t gpu_offset; /* GPU address space is independent of CPU word size */ uint64_t size; uint32_t available_caching; uint32_t default_caching; -- cgit v1.2.3 From 40eeb111d7c88bfbc38e1dfe330bc4cec05e0806 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Thu, 5 Mar 2015 10:08:14 +0100 Subject: Revert "pinctrl: consumer: use correct retval for placeholder functions" This reverts commit 5a7d2efdd93f6c4bb6cd3d5df3d2f5611c9b87ac. As per discussion on the mailing list, this is not the right thing to do. NULL cookies are valid in the stubs. Reported-by: Wolfram Sang Signed-off-by: Linus Walleij --- include/linux/pinctrl/consumer.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h index 72c0415d6c21..18eccefea06e 100644 --- a/include/linux/pinctrl/consumer.h +++ b/include/linux/pinctrl/consumer.h @@ -82,7 +82,7 @@ static inline int pinctrl_gpio_direction_output(unsigned gpio) static inline struct pinctrl * __must_check pinctrl_get(struct device *dev) { - return ERR_PTR(-ENOSYS); + return NULL; } static inline void pinctrl_put(struct pinctrl *p) @@ -93,7 +93,7 @@ static inline struct pinctrl_state * __must_check pinctrl_lookup_state( struct pinctrl *p, const char *name) { - return ERR_PTR(-ENOSYS); + return NULL; } static inline int pinctrl_select_state(struct pinctrl *p, @@ -104,7 +104,7 @@ static inline int pinctrl_select_state(struct pinctrl *p, static inline struct pinctrl * __must_check devm_pinctrl_get(struct device *dev) { - return ERR_PTR(-ENOSYS); + return NULL; } static inline void devm_pinctrl_put(struct pinctrl *p) -- cgit v1.2.3 From 8603e1b30027f943cc9c1eef2b291d42c3347af1 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 5 Mar 2015 08:04:13 -0500 Subject: workqueue: fix hang involving racing cancel[_delayed]_work_sync()'s for PREEMPT_NONE cancel[_delayed]_work_sync() are implemented using __cancel_work_timer() which grabs the PENDING bit using try_to_grab_pending() and then flushes the work item with PENDING set to prevent the on-going execution of the work item from requeueing itself. try_to_grab_pending() can always grab PENDING bit without blocking except when someone else is doing the above flushing during cancelation. In that case, try_to_grab_pending() returns -ENOENT. In this case, __cancel_work_timer() currently invokes flush_work(). The assumption is that the completion of the work item is what the other canceling task would be waiting for too and thus waiting for the same condition and retrying should allow forward progress without excessive busy looping Unfortunately, this doesn't work if preemption is disabled or the latter task has real time priority. Let's say task A just got woken up from flush_work() by the completion of the target work item. If, before task A starts executing, task B gets scheduled and invokes __cancel_work_timer() on the same work item, its try_to_grab_pending() will return -ENOENT as the work item is still being canceled by task A and flush_work() will also immediately return false as the work item is no longer executing. This puts task B in a busy loop possibly preventing task A from executing and clearing the canceling state on the work item leading to a hang. task A task B worker executing work __cancel_work_timer() try_to_grab_pending() set work CANCELING flush_work() block for work completion completion, wakes up A __cancel_work_timer() while (forever) { try_to_grab_pending() -ENOENT as work is being canceled flush_work() false as work is no longer executing } This patch removes the possible hang by updating __cancel_work_timer() to explicitly wait for clearing of CANCELING rather than invoking flush_work() after try_to_grab_pending() fails with -ENOENT. Link: http://lkml.kernel.org/g/20150206171156.GA8942@axis.com v3: bit_waitqueue() can't be used for work items defined in vmalloc area. Switched to custom wake function which matches the target work item and exclusive wait and wakeup. v2: v1 used wake_up() on bit_waitqueue() which leads to NULL deref if the target bit waitqueue has wait_bit_queue's on it. Use DEFINE_WAIT_BIT() and __wake_up_bit() instead. Reported by Tomeu Vizoso. Signed-off-by: Tejun Heo Reported-by: Rabin Vincent Cc: Tomeu Vizoso Cc: stable@vger.kernel.org Tested-by: Jesper Nilsson Tested-by: Rabin Vincent --- include/linux/workqueue.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 74db135f9957..f597846ff605 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -70,7 +70,8 @@ enum { /* data contains off-queue information when !WORK_STRUCT_PWQ */ WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, - WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE), + __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE, + WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING), /* * When a work item is off queue, its high bits point to the last -- cgit v1.2.3 From ef2b22ac540c018bd574d1846ab95b9bfcf38702 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 2 Mar 2015 22:26:55 +0100 Subject: cpuidle / sleep: Use broadcast timer for states that stop local timer Commit 381063133246 (PM / sleep: Re-implement suspend-to-idle handling) overlooked the fact that entering some sufficiently deep idle states by CPUs may cause their local timers to stop and in those cases it is necessary to switch over to a broadcast timer prior to entering the idle state. If the cpuidle driver in use does not provide the new ->enter_freeze callback for any of the idle states, that problem affects suspend-to-idle too, but it is not taken into account after the changes made by commit 381063133246. Fix that by changing the definition of cpuidle_enter_freeze() and re-arranging of the code in cpuidle_idle_call(), so the former does not call cpuidle_enter() any more and the fallback case is handled by cpuidle_idle_call() directly. Fixes: 381063133246 (PM / sleep: Re-implement suspend-to-idle handling) Reported-and-tested-by: Lorenzo Pieralisi Signed-off-by: Rafael J. Wysocki Acked-by: Peter Zijlstra (Intel) --- include/linux/cpuidle.h | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index f551a9299ac9..306178d7309f 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -126,6 +126,8 @@ struct cpuidle_driver { #ifdef CONFIG_CPU_IDLE extern void disable_cpuidle(void); +extern bool cpuidle_not_available(struct cpuidle_driver *drv, + struct cpuidle_device *dev); extern int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev); @@ -150,11 +152,17 @@ extern void cpuidle_resume(void); extern int cpuidle_enable_device(struct cpuidle_device *dev); extern void cpuidle_disable_device(struct cpuidle_device *dev); extern int cpuidle_play_dead(void); -extern void cpuidle_enter_freeze(void); +extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv, + struct cpuidle_device *dev); +extern int cpuidle_enter_freeze(struct cpuidle_driver *drv, + struct cpuidle_device *dev); extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); #else static inline void disable_cpuidle(void) { } +static inline bool cpuidle_not_available(struct cpuidle_driver *drv, + struct cpuidle_device *dev) +{return true; } static inline int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) {return -ENODEV; } @@ -183,7 +191,12 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev) {return -ENODEV; } static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } static inline int cpuidle_play_dead(void) {return -ENODEV; } -static inline void cpuidle_enter_freeze(void) { } +static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, + struct cpuidle_device *dev) +{return -ENODEV; } +static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv, + struct cpuidle_device *dev) +{return -ENODEV; } static inline struct cpuidle_driver *cpuidle_get_cpu_driver( struct cpuidle_device *dev) {return NULL; } #endif -- cgit v1.2.3 From 424e0f039bfa8a51fb5c5178b6ece8baa4996469 Mon Sep 17 00:00:00 2001 From: Dave Gerlach Date: Fri, 27 Feb 2015 19:10:26 -0600 Subject: ARM: dts: am33xx: fix SLEWCTRL_FAST pinctrl binding According to AM335x TRM, Document spruh73l, Revised February 2015, Section 9.2.2 Pad Control Registers, setting bit 6 of the pad control registers actually sets the SLEWCTRL value to slow rather than fast as the current macro indicates. Introduce a new macro, SLEWCTRL_SLOW, that sets the bit, and modify SLEWCTRL_FAST to 0 but keep it for completeness. Current users of the macro (i2c and mdio) are left unmodified as SLEWCTRL_FAST was the macro used and actual desired state. Tested on am335x-gp-evm with no difference in software performance seen. Signed-off-by: Dave Gerlach Signed-off-by: Tony Lindgren --- include/dt-bindings/pinctrl/am33xx.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/dt-bindings/pinctrl/am33xx.h b/include/dt-bindings/pinctrl/am33xx.h index 2fbc804e1a45..226f77246a70 100644 --- a/include/dt-bindings/pinctrl/am33xx.h +++ b/include/dt-bindings/pinctrl/am33xx.h @@ -13,7 +13,8 @@ #define PULL_DISABLE (1 << 3) #define INPUT_EN (1 << 5) -#define SLEWCTRL_FAST (1 << 6) +#define SLEWCTRL_SLOW (1 << 6) +#define SLEWCTRL_FAST 0 /* update macro depending on INPUT_EN and PULL_ENA */ #undef PIN_OUTPUT -- cgit v1.2.3 From 10b218551444934b3ed864ec9eb81332d68d21ed Mon Sep 17 00:00:00 2001 From: Dave Gerlach Date: Fri, 27 Feb 2015 19:10:27 -0600 Subject: ARM: dts: am43xx: fix SLEWCTRL_FAST pinctrl binding According to AM437x TRM, Document SPRUHL7B, Revised December 2014, Section 7.2.1 Pad Control Registers, setting bit 19 of the pad control registers actually sets the SLEWCTRL value to slow rather than fast as the current macro indicates. Introduce a new macro, SLEWCTRL_SLOW, that sets the bit, and modify SLEWCTRL_FAST to 0 but keep it for completeness. Current users of the macro (i2c, mdio, and uart) are left unmodified as SLEWCTRL_FAST was the macro used and actual desired state. Tested on am437x-gp-evm with no difference in software performance seen. Signed-off-by: Dave Gerlach Signed-off-by: Tony Lindgren --- include/dt-bindings/pinctrl/am43xx.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/dt-bindings/pinctrl/am43xx.h b/include/dt-bindings/pinctrl/am43xx.h index 9c2e4f82381e..5f4d01898c9c 100644 --- a/include/dt-bindings/pinctrl/am43xx.h +++ b/include/dt-bindings/pinctrl/am43xx.h @@ -18,7 +18,8 @@ #define PULL_DISABLE (1 << 16) #define PULL_UP (1 << 17) #define INPUT_EN (1 << 18) -#define SLEWCTRL_FAST (1 << 19) +#define SLEWCTRL_SLOW (1 << 19) +#define SLEWCTRL_FAST 0 #define DS0_PULL_UP_DOWN_EN (1 << 27) #define PIN_OUTPUT (PULL_DISABLE) -- cgit v1.2.3 From 2bb785169e9709d41220e5c18b0270883a82f85c Mon Sep 17 00:00:00 2001 From: Peter Hurley Date: Sun, 1 Mar 2015 10:18:16 -0500 Subject: serial: core: Fix iotype userspace breakage commit 3ffb1a8193bea ("serial: core: Add big-endian iotype") re-numbered userspace-dependent values; ioctl(TIOCSSERIAL) can assign the port iotype (which is expected to match the selected i/o accessors), so iotype values must not be changed. Cc: Kevin Cernekee Cc: # 3.19+ Signed-off-by: Peter Hurley Reviewed-by: Kevin Cernekee Signed-off-by: Greg Kroah-Hartman --- include/linux/serial_core.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index baf3e1d08416..1094f2d9cadb 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -147,9 +147,9 @@ struct uart_port { #define UPIO_HUB6 (1) /* Hub6 ISA card */ #define UPIO_MEM (2) /* 8b MMIO access */ #define UPIO_MEM32 (3) /* 32b little endian */ -#define UPIO_MEM32BE (4) /* 32b big endian */ -#define UPIO_AU (5) /* Au1x00 and RT288x type IO */ -#define UPIO_TSI (6) /* Tsi108/109 type IO */ +#define UPIO_AU (4) /* Au1x00 and RT288x type IO */ +#define UPIO_TSI (5) /* Tsi108/109 type IO */ +#define UPIO_MEM32BE (6) /* 32b big endian */ unsigned int read_status_mask; /* driver specific */ unsigned int ignore_status_mask; /* driver specific */ -- cgit v1.2.3 From 647f162b8e7e446c4bade031eb8a1a0a83d3de82 Mon Sep 17 00:00:00 2001 From: Peter Hurley Date: Sun, 1 Mar 2015 10:24:28 -0500 Subject: serial: uapi: Declare all userspace-visible io types ioctl(TIOCGSERIAL|TIOCSSERIAL) report and can change the port->iotype. UART drivers use the UPIO_* definitions, but the uapi header defines parallel values and userspace uses these parallel values for ioctls; thus the userspace values are definitive. Define UPIO_* iotypes in terms of the uapi defines, SERIAL_IO_*; extend the uapi defines to include all values in use by the serial core. Signed-off-by: Peter Hurley Signed-off-by: Greg Kroah-Hartman --- include/linux/serial_core.h | 14 +++++++------- include/uapi/linux/serial.h | 4 ++++ 2 files changed, 11 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 1094f2d9cadb..d10965f0d8a4 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -143,13 +143,13 @@ struct uart_port { unsigned char iotype; /* io access style */ unsigned char unused1; -#define UPIO_PORT (0) /* 8b I/O port access */ -#define UPIO_HUB6 (1) /* Hub6 ISA card */ -#define UPIO_MEM (2) /* 8b MMIO access */ -#define UPIO_MEM32 (3) /* 32b little endian */ -#define UPIO_AU (4) /* Au1x00 and RT288x type IO */ -#define UPIO_TSI (5) /* Tsi108/109 type IO */ -#define UPIO_MEM32BE (6) /* 32b big endian */ +#define UPIO_PORT (SERIAL_IO_PORT) /* 8b I/O port access */ +#define UPIO_HUB6 (SERIAL_IO_HUB6) /* Hub6 ISA card */ +#define UPIO_MEM (SERIAL_IO_MEM) /* 8b MMIO access */ +#define UPIO_MEM32 (SERIAL_IO_MEM32) /* 32b little endian */ +#define UPIO_AU (SERIAL_IO_AU) /* Au1x00 and RT288x type IO */ +#define UPIO_TSI (SERIAL_IO_TSI) /* Tsi108/109 type IO */ +#define UPIO_MEM32BE (SERIAL_IO_MEM32BE) /* 32b big endian */ unsigned int read_status_mask; /* driver specific */ unsigned int ignore_status_mask; /* driver specific */ diff --git a/include/uapi/linux/serial.h b/include/uapi/linux/serial.h index 5e0d0ed61cf3..25331f9faa76 100644 --- a/include/uapi/linux/serial.h +++ b/include/uapi/linux/serial.h @@ -65,6 +65,10 @@ struct serial_struct { #define SERIAL_IO_PORT 0 #define SERIAL_IO_HUB6 1 #define SERIAL_IO_MEM 2 +#define SERIAL_IO_MEM32 3 +#define SERIAL_IO_AU 4 +#define SERIAL_IO_TSI 5 +#define SERIAL_IO_MEM32BE 6 #define UART_CLEAR_FIFO 0x01 #define UART_USE_FIFO 0x02 -- cgit v1.2.3 From f54b97ed0b17d3da5f98ba8188cd5646415a922d Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 6 Mar 2015 16:37:41 +0000 Subject: irqchip: gicv3-its: Allocate enough memory for the full range of DeviceID The ITS table allocator is only allocating a single page per table. This works fine for most things, but leads to silent lack of interrupt delivery if we end-up with a device that has an ID that is out of the range defined by a single page of memory. Even worse, depending on the page size, behaviour changes, which is not a very good experience. A solution is actually to allocate memory for the full range of ID that the ITS supports. A massive waste memory wise, but at least a safe bet. Tested on a Phytium SoC. Tested-by: Chen Baozi Acked-by: Chen Baozi Signed-off-by: Marc Zyngier Link: https://lkml.kernel.org/r/1425659870-11832-3-git-send-email-marc.zyngier@arm.com Signed-off-by: Jason Cooper --- include/linux/irqchip/arm-gic-v3.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 800544bc7bfd..cbdd440d486d 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -166,6 +166,8 @@ #define GITS_TRANSLATER 0x10040 +#define GITS_TYPER_DEVBITS_SHIFT 13 +#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) #define GITS_TYPER_PTA (1UL << 19) #define GITS_CBASER_VALID (1UL << 63) -- cgit v1.2.3 From 7cb991164a46992a499ecdc77b17f8ac94bdb75f Mon Sep 17 00:00:00 2001 From: Yun Wu Date: Fri, 6 Mar 2015 16:37:49 +0000 Subject: irqchip: gicv3-its: Define macros for GITS_CTLR fields Define macros for GITS_CTLR fields to avoid using magic numbers. Acked-by: Marc Zyngier Signed-off-by: Yun Wu Signed-off-by: Marc Zyngier Link: https://lkml.kernel.org/r/1425659870-11832-11-git-send-email-marc.zyngier@arm.com Signed-off-by: Jason Cooper --- include/linux/irqchip/arm-gic-v3.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index cbdd440d486d..781974afff9f 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -166,6 +166,9 @@ #define GITS_TRANSLATER 0x10040 +#define GITS_CTLR_ENABLE (1U << 0) +#define GITS_CTLR_QUIESCENT (1U << 31) + #define GITS_TYPER_DEVBITS_SHIFT 13 #define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) #define GITS_TYPER_PTA (1UL << 19) -- cgit v1.2.3 From 7e41a9def062167b5405711a42c9ecfd163e31a9 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 6 Mar 2015 12:50:03 +1030 Subject: virtio_blk: typo fix Now that QEmu reuses linux virtio headers, we noticed a typo in the exported virtio block header. Fix it up. Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell --- include/uapi/linux/virtio_blk.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h index 3c53eec4ae22..b695ba959186 100644 --- a/include/uapi/linux/virtio_blk.h +++ b/include/uapi/linux/virtio_blk.h @@ -60,7 +60,7 @@ struct virtio_blk_config { __u32 size_max; /* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */ __u32 seg_max; - /* geometry the device (if VIRTIO_BLK_F_GEOMETRY) */ + /* geometry of the device (if VIRTIO_BLK_F_GEOMETRY) */ struct virtio_blk_geometry { __u16 cylinders; __u8 heads; -- cgit v1.2.3 From 0fa2a56437d0b7ef5d86eef2778ad3469ca72d5a Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 6 Mar 2015 12:50:03 +1030 Subject: virtio_blk: fix comment for virtio 1.0 Fix up comment to match virtio 1.0 logic: virtio_blk_outhdr isn't the first elements anymore, the only requirement is that it comes first in the s/g list. Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell --- include/uapi/linux/virtio_blk.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h index b695ba959186..19c66fcbab8a 100644 --- a/include/uapi/linux/virtio_blk.h +++ b/include/uapi/linux/virtio_blk.h @@ -119,7 +119,11 @@ struct virtio_blk_config { #define VIRTIO_BLK_T_BARRIER 0x80000000 #endif /* !VIRTIO_BLK_NO_LEGACY */ -/* This is the first element of the read scatter-gather list. */ +/* + * This comes first in the read scatter-gather list. + * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated, + * this is the first element of the read scatter-gather list. + */ struct virtio_blk_outhdr { /* VIRTIO_BLK_T* */ __virtio32 type; -- cgit v1.2.3 From 3d3801effda19b21012b5d1981e96cc277df85fd Mon Sep 17 00:00:00 2001 From: Michael Turquette Date: Wed, 25 Feb 2015 09:11:01 -0800 Subject: clk: introduce clk_is_match Some drivers compare struct clk pointers as a means of knowing if the two pointers reference the same clock hardware. This behavior is dubious (drivers must not dereference struct clk), but did not cause any regressions until the per-user struct clk patch was merged. Now the test for matching clk's will always fail with per-user struct clk's. clk_is_match is introduced to fix the regression and prevent drivers from comparing the pointers manually. Fixes: 035a61c314eb ("clk: Make clk API return per-user struct clk instances") Cc: Russell King Cc: Shawn Guo Cc: Tomeu Vizoso Signed-off-by: Michael Turquette [arnd@arndb.de: Fix COMMON_CLK=N && HAS_CLK=Y config] Signed-off-by: Arnd Bergmann [sboyd@codeaurora.org: const arguments to clk_is_match() and remove unnecessary ternary operation] Signed-off-by: Stephen Boyd --- include/linux/clk.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'include') diff --git a/include/linux/clk.h b/include/linux/clk.h index 8381bbfbc308..68c16a6bedb3 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -125,6 +125,19 @@ int clk_set_phase(struct clk *clk, int degrees); */ int clk_get_phase(struct clk *clk); +/** + * clk_is_match - check if two clk's point to the same hardware clock + * @p: clk compared against q + * @q: clk compared against p + * + * Returns true if the two struct clk pointers both point to the same hardware + * clock node. Put differently, returns true if struct clk *p and struct clk *q + * share the same struct clk_core object. + * + * Returns false otherwise. Note that two NULL clks are treated as matching. + */ +bool clk_is_match(const struct clk *p, const struct clk *q); + #else static inline long clk_get_accuracy(struct clk *clk) @@ -142,6 +155,11 @@ static inline long clk_get_phase(struct clk *clk) return -ENOTSUPP; } +static inline bool clk_is_match(const struct clk *p, const struct clk *q) +{ + return p == q; +} + #endif /** -- cgit v1.2.3 From c29390c6dfeee0944ac6b5610ebbe403944378fc Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 11 Mar 2015 18:42:02 -0700 Subject: xps: must clear sender_cpu before forwarding John reported that my previous commit added a regression on his router. This is because sender_cpu & napi_id share a common location, so get_xps_queue() can see garbage and perform an out of bound access. We need to make sure sender_cpu is cleared before doing the transmit, otherwise any NIC busy poll enabled (skb_mark_napi_id()) can trigger this bug. Signed-off-by: Eric Dumazet Reported-by: John Bisected-by: John Fixes: 2bd82484bb4c ("xps: fix xps for stacked devices") Signed-off-by: David S. Miller --- include/linux/skbuff.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 30007afe70b3..f54d6659713a 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -948,6 +948,13 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) to->l4_hash = from->l4_hash; }; +static inline void skb_sender_cpu_clear(struct sk_buff *skb) +{ +#ifdef CONFIG_XPS + skb->sender_cpu = 0; +#endif +} + #ifdef NET_SKBUFF_DATA_USES_OFFSET static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) { -- cgit v1.2.3 From a5af5aa8b67dfdba36c853b70564fd2dfe73d478 Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin Date: Thu, 12 Mar 2015 16:26:11 -0700 Subject: kasan, module, vmalloc: rework shadow allocation for modules Current approach in handling shadow memory for modules is broken. Shadow memory could be freed only after memory shadow corresponds it is no longer used. vfree() called from interrupt context could use memory its freeing to store 'struct llist_node' in it: void vfree(const void *addr) { ... if (unlikely(in_interrupt())) { struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred); if (llist_add((struct llist_node *)addr, &p->list)) schedule_work(&p->wq); Later this list node used in free_work() which actually frees memory. Currently module_memfree() called in interrupt context will free shadow before freeing module's memory which could provoke kernel crash. So shadow memory should be freed after module's memory. However, such deallocation order could race with kasan_module_alloc() in module_alloc(). Free shadow right before releasing vm area. At this point vfree()'d memory is not used anymore and yet not available for other allocations. New VM_KASAN flag used to indicate that vm area has dynamically allocated shadow memory so kasan frees shadow only if it was previously allocated. Signed-off-by: Andrey Ryabinin Acked-by: Rusty Russell Cc: Dmitry Vyukov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 5 +++-- include/linux/vmalloc.h | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 72ba725ddf9c..5fa48a21d73e 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -5,6 +5,7 @@ struct kmem_cache; struct page; +struct vm_struct; #ifdef CONFIG_KASAN @@ -52,7 +53,7 @@ void kasan_slab_free(struct kmem_cache *s, void *object); #define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) int kasan_module_alloc(void *addr, size_t size); -void kasan_module_free(void *addr); +void kasan_free_shadow(const struct vm_struct *vm); #else /* CONFIG_KASAN */ @@ -82,7 +83,7 @@ static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {} static inline void kasan_slab_free(struct kmem_cache *s, void *object) {} static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } -static inline void kasan_module_free(void *addr) {} +static inline void kasan_free_shadow(const struct vm_struct *vm) {} #endif /* CONFIG_KASAN */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 7d7acb35603d..0ec598381f97 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -17,6 +17,7 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */ #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ #define VM_NO_GUARD 0x00000040 /* don't add guard page */ +#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ /* bits [20..32] reserved for arch specific ioremap internals */ /* -- cgit v1.2.3 From d3733e5c98e952d419e77fa721912f09d15a2806 Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin Date: Thu, 12 Mar 2015 16:26:14 -0700 Subject: kasan, module: move MODULE_ALIGN macro into include/linux/moduleloader.h is more suitable place for this macro. Also change alignment to PAGE_SIZE for CONFIG_KASAN=n as such alignment already assumed in several places. Signed-off-by: Andrey Ryabinin Cc: Dmitry Vyukov Acked-by: Rusty Russell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 4 ---- include/linux/moduleloader.h | 8 ++++++++ 2 files changed, 8 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 5fa48a21d73e..5bb074431eb0 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -50,15 +50,11 @@ void kasan_krealloc(const void *object, size_t new_size); void kasan_slab_alloc(struct kmem_cache *s, void *object); void kasan_slab_free(struct kmem_cache *s, void *object); -#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) - int kasan_module_alloc(void *addr, size_t size); void kasan_free_shadow(const struct vm_struct *vm); #else /* CONFIG_KASAN */ -#define MODULE_ALIGN 1 - static inline void kasan_unpoison_shadow(const void *address, size_t size) {} static inline void kasan_enable_current(void) {} diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h index f7556261fe3c..4d0cb9bba93e 100644 --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h @@ -84,4 +84,12 @@ void module_arch_cleanup(struct module *mod); /* Any cleanup before freeing mod->module_init */ void module_arch_freeing_init(struct module *mod); + +#ifdef CONFIG_KASAN +#include +#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) +#else +#define MODULE_ALIGN PAGE_SIZE +#endif + #endif -- cgit v1.2.3 From a4994b810d52ccb26de922c8d231fe05d14610d4 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 13 Mar 2015 11:59:11 +1030 Subject: uapi/virtio_scsi: allow overriding CDB/SENSE size QEMU wants to use virtio scsi structures with a different VIRTIO_SCSI_CDB_SIZE/VIRTIO_SCSI_SENSE_SIZE, let's add ifdefs to allow overriding them. Keep the old defines under new names: VIRTIO_SCSI_CDB_DEFAULT_SIZE/VIRTIO_SCSI_SENSE_DEFAULT_SIZE, since that's what these values really are: defaults for cdb/sense size fields. Suggested-by: Paolo Bonzini Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell --- include/uapi/linux/virtio_scsi.h | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/virtio_scsi.h b/include/uapi/linux/virtio_scsi.h index 42b9370771b0..cc18ef8825c0 100644 --- a/include/uapi/linux/virtio_scsi.h +++ b/include/uapi/linux/virtio_scsi.h @@ -29,8 +29,16 @@ #include -#define VIRTIO_SCSI_CDB_SIZE 32 -#define VIRTIO_SCSI_SENSE_SIZE 96 +/* Default values of the CDB and sense data size configuration fields */ +#define VIRTIO_SCSI_CDB_DEFAULT_SIZE 32 +#define VIRTIO_SCSI_SENSE_DEFAULT_SIZE 96 + +#ifndef VIRTIO_SCSI_CDB_SIZE +#define VIRTIO_SCSI_CDB_SIZE VIRTIO_SCSI_CDB_DEFAULT_SIZE +#endif +#ifndef VIRTIO_SCSI_SENSE_SIZE +#define VIRTIO_SCSI_SENSE_SIZE VIRTIO_SCSI_SENSE_DEFAULT_SIZE +#endif /* SCSI command request, followed by data-out */ struct virtio_scsi_cmd_req { -- cgit v1.2.3 From a697c2efba03ac7bfdbffbba7f0f1aa294f7dee0 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Tue, 10 Mar 2015 20:31:04 -0700 Subject: of/platform: Fix sparc:allmodconfig build sparc:allmodconfig fails to build with: drivers/built-in.o: In function `platform_bus_init': (.init.text+0x3684): undefined reference to `of_platform_register_reconfig_notifier' of_platform_register_reconfig_notifier is only declared if both OF_ADDRESS and OF_DYNAMIC are configured. Yet, the include file only declares a dummy function if OF_DYNAMIC is not configured. The sparc architecture does not configure OF_ADDRESS, but does configure OF_DYNAMIC, causing above error. Fixes: 801d728c10db ("of/reconfig: Add OF_DYNAMIC notifier for platform_bus_type") Cc: Pantelis Antoniou Signed-off-by: Guenter Roeck Signed-off-by: Rob Herring --- include/linux/of_platform.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index 8a860f096c35..611a691145c4 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h @@ -84,7 +84,7 @@ static inline int of_platform_populate(struct device_node *root, static inline void of_platform_depopulate(struct device *parent) { } #endif -#ifdef CONFIG_OF_DYNAMIC +#if defined(CONFIG_OF_DYNAMIC) && defined(CONFIG_OF_ADDRESS) extern void of_platform_register_reconfig_notifier(void); #else static inline void of_platform_register_reconfig_notifier(void) { } -- cgit v1.2.3 From 40fb70f3aa0a67d28a30c854d4e7aa10b0511db9 Mon Sep 17 00:00:00 2001 From: Alexey Kodanev Date: Fri, 13 Mar 2015 19:13:53 +0300 Subject: vxlan: fix wrong usage of VXLAN_VID_MASK commit dfd8645ea1bd9127 wrongly assumes that VXLAN_VDI_MASK includes eight lower order reserved bits of VNI field that are using for remote checksum offload. Right now, when VNI number greater then 0xffff, vxlan_udp_encap_recv() will always return with 'bad_flag' error, reducing the usable vni range from 0..16777215 to 0..65535. Also, it doesn't really check whether RCO bits processed or not. Fix it by adding new VNI mask which has all 32 bits of VNI field: 24 bits for id and 8 bits for other usage. Signed-off-by: Alexey Kodanev Signed-off-by: David S. Miller --- include/net/vxlan.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/net/vxlan.h b/include/net/vxlan.h index eabd3a038674..c73e7abbbaa5 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -91,6 +91,7 @@ struct vxlanhdr { #define VXLAN_N_VID (1u << 24) #define VXLAN_VID_MASK (VXLAN_N_VID - 1) +#define VXLAN_VNI_MASK (VXLAN_VID_MASK << 8) #define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr)) struct vxlan_metadata { -- cgit v1.2.3 From ae705930fca6322600690df9dc1c7d0516145a93 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Fri, 13 Mar 2015 17:02:56 +0000 Subject: arm/arm64: KVM: Keep elrsr/aisr in sync with software model MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is an interesting bug in the vgic code, which manifests itself when the KVM run loop has a signal pending or needs a vmid generation rollover after having disabled interrupts but before actually switching to the guest. In this case, we flush the vgic as usual, but we sync back the vgic state and exit to userspace before entering the guest. The consequence is that we will be syncing the list registers back to the software model using the GICH_ELRSR and GICH_EISR from the last execution of the guest, potentially overwriting a list register containing an interrupt. This showed up during migration testing where we would capture a state where the VM has masked the arch timer but there were no interrupts, resulting in a hung test. Cc: Marc Zyngier Reported-by: Alex Bennee Signed-off-by: Christoffer Dall Signed-off-by: Alex Bennée Acked-by: Marc Zyngier Signed-off-by: Christoffer Dall --- include/kvm/arm_vgic.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 7c55dd5dd2c9..66203b268984 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -114,6 +114,7 @@ struct vgic_ops { void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr); u64 (*get_elrsr)(const struct kvm_vcpu *vcpu); u64 (*get_eisr)(const struct kvm_vcpu *vcpu); + void (*clear_eisr)(struct kvm_vcpu *vcpu); u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu); void (*enable_underflow)(struct kvm_vcpu *vcpu); void (*disable_underflow)(struct kvm_vcpu *vcpu); -- cgit v1.2.3 From 8cb2c2dc472775479a1a7e78180955f6f1cb0b0a Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Thu, 12 Mar 2015 12:55:13 +0100 Subject: livepatch: Fix subtle race with coming and going modules There is a notifier that handles live patches for coming and going modules. It takes klp_mutex lock to avoid races with coming and going patches but it does not keep the lock all the time. Therefore the following races are possible: 1. The notifier is called sometime in STATE_MODULE_COMING. The module is visible by find_module() in this state all the time. It means that new patch can be registered and enabled even before the notifier is called. It might create wrong order of stacked patches, see below for an example. 2. New patch could still see the module in the GOING state even after the notifier has been called. It will try to initialize the related object structures but the module could disappear at any time. There will stay mess in the structures. It might even cause an invalid memory access. This patch solves the problem by adding a boolean variable into struct module. The value is true after the coming and before the going handler is called. New patches need to be applied when the value is true and they need to ignore the module when the value is false. Note that we need to know state of all modules on the system. The races are related to new patches. Therefore we do not know what modules will get patched. Also note that we could not simply ignore going modules. The code from the module could be called even in the GOING state until mod->exit() finishes. If we start supporting patches with semantic changes between function calls, we need to apply new patches to any still usable code. See below for an example. Finally note that the patch solves only the situation when a new patch is registered. There are no such problems when the patch is being removed. It does not matter who disable the patch first, whether the normal disable_patch() or the module notifier. There is nothing to do once the patch is disabled. Alternative solutions: ====================== + reject new patches when a patched module is coming or going; this is ugly + wait with adding new patch until the module leaves the COMING and GOING states; this might be dangerous and complicated; we would need to release kgr_lock in the middle of the patch registration to avoid a deadlock with the coming and going handlers; also we might need a waitqueue for each module which seems to be even bigger overhead than the boolean + stop modules from entering COMING and GOING states; wait until modules leave these states when they are already there; looks complicated; we would need to ignore the module that asked to stop the others to avoid a deadlock; also it is unclear what to do when two modules asked to stop others and both are in COMING state (situation when two new patches are applied) + always register/enable new patches and fix up the potential mess (registered patches order) in klp_module_init(); this is nasty and prone to regressions in the future development + add another MODULE_STATE where the kallsyms are visible but the module is not used yet; this looks too complex; the module states are checked on "many" locations Example of patch stacking breakage: =================================== The notifier could _not_ _simply_ ignore already initialized module objects. For example, let's have three patches (P1, P2, P3) for functions a() and b() where a() is from vmcore and b() is from a module M. Something like: a() b() P1 a1() b1() P2 a2() b2() P3 a3() b3(3) If you load the module M after all patches are registered and enabled. The ftrace ops for function a() and b() has listed the functions in this order: ops_a->func_stack -> list(a3,a2,a1) ops_b->func_stack -> list(b3,b2,b1) , so the pointer to b3() is the first and will be used. Then you might have the following scenario. Let's start with state when patches P1 and P2 are registered and enabled but the module M is not loaded. Then ftrace ops for b() does not exist. Then we get into the following race: CPU0 CPU1 load_module(M) complete_formation() mod->state = MODULE_STATE_COMING; mutex_unlock(&module_mutex); klp_register_patch(P3); klp_enable_patch(P3); # STATE 1 klp_module_notify(M) klp_module_notify_coming(P1); klp_module_notify_coming(P2); klp_module_notify_coming(P3); # STATE 2 The ftrace ops for a() and b() then looks: STATE1: ops_a->func_stack -> list(a3,a2,a1); ops_b->func_stack -> list(b3); STATE2: ops_a->func_stack -> list(a3,a2,a1); ops_b->func_stack -> list(b2,b1,b3); therefore, b2() is used for the module but a3() is used for vmcore because they were the last added. Example of the race with going modules: ======================================= CPU0 CPU1 delete_module() #SYSCALL try_stop_module() mod->state = MODULE_STATE_GOING; mutex_unlock(&module_mutex); klp_register_patch() klp_enable_patch() #save place to switch universe b() # from module that is going a() # from core (patched) mod->exit(); Note that the function b() can be called until we call mod->exit(). If we do not apply patch against b() because it is in MODULE_STATE_GOING, it will call patched a() with modified semantic and things might get wrong. [jpoimboe@redhat.com: use one boolean instead of two] Signed-off-by: Petr Mladek Acked-by: Josh Poimboeuf Acked-by: Rusty Russell Signed-off-by: Jiri Kosina --- include/linux/module.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/linux/module.h b/include/linux/module.h index b653d7c0a05a..7232fde6a991 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -344,6 +344,10 @@ struct module { unsigned long *ftrace_callsites; #endif +#ifdef CONFIG_LIVEPATCH + bool klp_alive; +#endif + #ifdef CONFIG_MODULE_UNLOAD /* What modules depend on me? */ struct list_head source_list; -- cgit v1.2.3 From e03826d5045e81a66a4fad7be9a8ecdaeb7911cf Mon Sep 17 00:00:00 2001 From: Keerthy Date: Tue, 17 Mar 2015 15:56:04 +0530 Subject: regulator: palmas: Correct TPS659038 register definition for REGEN2 The register offset for REGEN2_CTRL in different for TPS659038 chip as when compared with other Palmas family PMICs. In the case of TPS659038 the wrong offset pointed to PLLEN_CTRL and was causing a hang. Correcting the same. Signed-off-by: Keerthy Signed-off-by: Mark Brown Cc: stable@vger.kernel.org --- include/linux/mfd/palmas.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h index fb0390a1a498..ee7b1ce7a6f8 100644 --- a/include/linux/mfd/palmas.h +++ b/include/linux/mfd/palmas.h @@ -2999,6 +2999,9 @@ enum usb_irq_events { #define PALMAS_GPADC_TRIM15 0x0E #define PALMAS_GPADC_TRIM16 0x0F +/* TPS659038 regen2_ctrl offset iss different from palmas */ +#define TPS659038_REGEN2_CTRL 0x12 + /* TPS65917 Interrupt registers */ /* Registers for function INTERRUPT */ -- cgit v1.2.3 From ad41faa88e39af451427c921a0f8b441e104b6fa Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Tue, 17 Mar 2015 11:16:00 +0100 Subject: netdevice.h: fix ndo_bridge_* comments The argument 'flags' was missing in ndo_bridge_setlink(). ndo_bridge_dellink() was missing. Fixes: 407af3299ef1 ("bridge: Add netlink interface to configure vlans on bridge ports") Fixes: add511b38266 ("bridge: add flags argument to ndo_bridge_setlink and ndo_bridge_dellink") CC: Vlad Yasevich CC: Roopa Prabhu Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/linux/netdevice.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 429d1790a27e..dcf6ec27739b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -965,9 +965,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * Used to add FDB entries to dump requests. Implementers should add * entries to skb and update idx with the number of entries. * - * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh) + * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, + * u16 flags) * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, * struct net_device *dev, u32 filter_mask) + * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, + * u16 flags); * * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); * Called to change device carrier. Soft-devices (like dummy, team, etc) -- cgit v1.2.3 From cf39284d41f67964cf42b21bb386c012cf5b7f65 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Wed, 18 Mar 2015 08:57:41 +0800 Subject: regulator: Fix documentation for regmap in the config dev_get_regulator() does not exist, fix the typo. Signed-off-by: Axel Lin Signed-off-by: Mark Brown --- include/linux/regulator/driver.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index d4ad5b5a02bb..045f709cb89b 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -316,7 +316,7 @@ struct regulator_desc { * @driver_data: private regulator data * @of_node: OpenFirmware node to parse for device tree bindings (may be * NULL). - * @regmap: regmap to use for core regmap helpers if dev_get_regulator() is + * @regmap: regmap to use for core regmap helpers if dev_get_regmap() is * insufficient. * @ena_gpio_initialized: GPIO controlling regulator enable was properly * initialized, meaning that >= 0 is a valid gpio -- cgit v1.2.3 From 4017a7ee693d1cae6735c0dac21594a7c6416c4c Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Mon, 2 Mar 2015 01:10:28 +0100 Subject: netfilter: restore rule tracing via nfnetlink_log MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since fab4085 ("netfilter: log: nf_log_packet() as real unified interface"), the loginfo structure that is passed to nf_log_packet() is used to explicitly indicate the logger type you want to use. This is a problem for people tracing rules through nfnetlink_log since packets are always routed to the NF_LOG_TYPE logger after the aforementioned patch. We can fix this by removing the trace loginfo structures, but that still changes the log level from 4 to 5 for tracing messages and there may be someone relying on this outthere. So let's just introduce a new nf_log_trace() function that restores the former behaviour. Reported-by: Markus Kötter Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_log.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include') diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h index 534e1f2ac4fc..57639fca223a 100644 --- a/include/net/netfilter/nf_log.h +++ b/include/net/netfilter/nf_log.h @@ -79,6 +79,16 @@ void nf_log_packet(struct net *net, const struct nf_loginfo *li, const char *fmt, ...); +__printf(8, 9) +void nf_log_trace(struct net *net, + u_int8_t pf, + unsigned int hooknum, + const struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + const struct nf_loginfo *li, + const char *fmt, ...); + struct nf_log_buf; struct nf_log_buf *nf_log_buf_open(void); -- cgit v1.2.3 From 5067c0469c643512f24786990e315f9c15cc7d24 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 12 Mar 2015 10:32:18 -0700 Subject: ata: Add a new flag to destinguish sas controller SAS controller has its own tag allocation, which doesn't directly match to ATA tag, so SAS and SATA have different code path for ata tags. Originally we use port->scsi_host (98bd4be1) to destinguish SAS controller, but libsas set ->scsi_host too, so we can't use it for the destinguish, we add a new flag for this purpose. Without this patch, the following oops can happen because scsi-mq uses a host-wide tag map shared among all devices with some integer tag values >= ATA_MAX_QUEUE. These unexpectedly high tag values cause __ata_qc_from_tag() to return NULL, which is then dereferenced in ata_qc_new_init(). BUG: unable to handle kernel NULL pointer dereference at 0000000000000058 IP: [] ata_qc_new_init+0x3e/0x120 PGD 32adf0067 PUD 32adf1067 PMD 0 Oops: 0002 [#1] SMP DEBUG_PAGEALLOC Modules linked in: iscsi_tcp libiscsi_tcp libiscsi scsi_transport_iscsi igb i2c_algo_bit ptp pps_core pm80xx libsas scsi_transport_sas sg coretemp eeprom w83795 i2c_i801 CPU: 4 PID: 1450 Comm: cydiskbench Not tainted 4.0.0-rc3 #1 Hardware name: Supermicro X8DTH-i/6/iF/6F/X8DTH, BIOS 2.1b 05/04/12 task: ffff8800ba86d500 ti: ffff88032a064000 task.ti: ffff88032a064000 RIP: 0010:[] [] ata_qc_new_init+0x3e/0x120 RSP: 0018:ffff88032a067858 EFLAGS: 00010046 RAX: 0000000000000000 RBX: ffff8800ba0d2230 RCX: 000000000000002a RDX: ffffffff80505ae0 RSI: 0000000000000020 RDI: ffff8800ba0d2230 RBP: ffff88032a067868 R08: 0000000000000201 R09: 0000000000000001 R10: 0000000000000000 R11: 0000000000000000 R12: ffff8800ba0d0000 R13: ffff8800ba0d2230 R14: ffffffff80505ae0 R15: ffff8800ba0d0000 FS: 0000000041223950(0063) GS:ffff88033e480000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 0000000000000058 CR3: 000000032a0a3000 CR4: 00000000000006e0 Stack: ffff880329eee758 ffff880329eee758 ffff88032a0678a8 ffffffff80502dad ffff8800ba167978 ffff880329eee758 ffff88032bf9c520 ffff8800ba167978 ffff88032bf9c520 ffff88032bf9a290 ffff88032a0678b8 ffffffff80506909 Call Trace: [] ata_scsi_translate+0x3d/0x1b0 [] ata_sas_queuecmd+0x149/0x2a0 [] sas_queuecommand+0xa0/0x1f0 [libsas] [] scsi_dispatch_cmd+0xd4/0x1a0 [] scsi_queue_rq+0x66f/0x7f0 [] __blk_mq_run_hw_queue+0x208/0x3f0 [] blk_mq_run_hw_queue+0x88/0xc0 [] blk_mq_insert_request+0xc4/0x130 [] blk_execute_rq_nowait+0x73/0x160 [] sg_common_write+0x3da/0x720 [sg] [] sg_new_write+0x250/0x360 [sg] [] sg_write+0x13b/0x450 [sg] [] vfs_write+0xd1/0x1b0 [] SyS_write+0x54/0xc0 [] system_call_fastpath+0x12/0x17 tj: updated description. Fixes: 12cb5ce101ab ("libata: use blk taging") Reported-and-tested-by: Tony Battersby Signed-off-by: Shaohua Li Signed-off-by: Tejun Heo --- include/linux/libata.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/libata.h b/include/linux/libata.h index fc03efa64ffe..6b08cc106c21 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -232,6 +232,7 @@ enum { * led */ ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */ ATA_FLAG_LOWTAG = (1 << 24), /* host wants lowest available tag */ + ATA_FLAG_SAS_HOST = (1 << 25), /* SAS host */ /* bits 24:31 of ap->flags are reserved for LLD specific flags */ -- cgit v1.2.3 From c6b570d97c0e77f570bb6b2ed30d372b2b1e9aae Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Mon, 9 Mar 2015 12:20:13 +0100 Subject: regmap: introduce regmap_name to fix syscon regmap trace events This patch fixes a NULL pointer dereference when enabling regmap event tracing in the presence of a syscon regmap, introduced by commit bdb0066df96e ("mfd: syscon: Decouple syscon interface from platform devices"). That patch introduced syscon regmaps that have their dev field set to NULL. The regmap trace events expect it to point to a valid struct device and feed it to dev_name(): $ echo 1 > /sys/kernel/debug/tracing/events/regmap/enable Unable to handle kernel NULL pointer dereference at virtual address 0000002c pgd = 80004000 [0000002c] *pgd=00000000 Internal error: Oops: 17 [#1] SMP ARM Modules linked in: coda videobuf2_vmalloc CPU: 0 PID: 304 Comm: kworker/0:2 Not tainted 4.0.0-rc2+ #9197 Hardware name: Freescale i.MX6 Quad/DualLite (Device Tree) Workqueue: events_freezable thermal_zone_device_check task: 9f25a200 ti: 9f1ee000 task.ti: 9f1ee000 PC is at ftrace_raw_event_regmap_block+0x3c/0xe4 LR is at _regmap_raw_read+0x1bc/0x1cc pc : [<803636e8>] lr : [<80365f2c>] psr: 600f0093 sp : 9f1efd78 ip : 9f1efdb8 fp : 9f1efdb4 r10: 00000004 r9 : 00000001 r8 : 00000001 r7 : 00000180 r6 : 00000000 r5 : 9f00e3c0 r4 : 00000003 r3 : 00000001 r2 : 00000180 r1 : 00000000 r0 : 9f00e3c0 Flags: nZCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment kernel Control: 10c5387d Table: 2d91004a DAC: 00000015 Process kworker/0:2 (pid: 304, stack limit = 0x9f1ee210) Stack: (0x9f1efd78 to 0x9f1f0000) fd60: 9f1efda4 9f1efd88 fd80: 800708c0 805f9510 80927140 800f0013 9f1fc800 9eb2f490 00000000 00000180 fda0: 808e3840 00000001 9f1efdfc 9f1efdb8 80365f2c 803636b8 805f8958 800708e0 fdc0: a00f0013 803636ac 9f16de00 00000180 80927140 9f1fc800 9f1fc800 9f1efe6c fde0: 9f1efe6c 9f732400 00000000 00000000 9f1efe1c 9f1efe00 80365f70 80365d7c fe00: 80365f3c 9f1fc800 9f1fc800 00000180 9f1efe44 9f1efe20 803656a4 80365f48 fe20: 9f1fc800 00000180 9f1efe6c 9f1efe6c 9f732400 00000000 9f1efe64 9f1efe48 fe40: 803657bc 80365634 00000001 9e95f910 9f1fc800 9f1efeb4 9f1efe8c 9f1efe68 fe60: 80452ac0 80365778 9f1efe8c 9f1efe78 9e93d400 9e93d5e8 9f1efeb4 9f72ef40 fe80: 9f1efeac 9f1efe90 8044e11c 80452998 8045298c 9e93d608 9e93d400 808e1978 fea0: 9f1efecc 9f1efeb0 8044fd14 8044e0d0 ffffffff 9f25a200 9e93d608 9e481380 fec0: 9f1efedc 9f1efed0 8044fde8 8044fcec 9f1eff1c 9f1efee0 80038d50 8044fdd8 fee0: 9f1ee020 9f72ef40 9e481398 00000000 00000008 9f72ef54 9f1ee020 9f72ef40 ff00: 9e481398 9e481380 00000008 9f72ef40 9f1eff5c 9f1eff20 80039754 80038bfc ff20: 00000000 9e481380 80894100 808e1662 00000000 9e4f2ec0 00000000 9e481380 ff40: 800396f8 00000000 00000000 00000000 9f1effac 9f1eff60 8003e020 80039704 ff60: ffffffff 00000000 ffffffff 9e481380 00000000 00000000 9f1eff78 9f1eff78 ff80: 00000000 00000000 9f1eff88 9f1eff88 9e4f2ec0 8003df30 00000000 00000000 ffa0: 00000000 9f1effb0 8000eb60 8003df3c 00000000 00000000 00000000 00000000 ffc0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ffe0: 00000000 00000000 00000000 00000000 00000013 00000000 ffffffff ffffffff Backtrace: [<803636ac>] (ftrace_raw_event_regmap_block) from [<80365f2c>] (_regmap_raw_read+0x1bc/0x1cc) r9:00000001 r8:808e3840 r7:00000180 r6:00000000 r5:9eb2f490 r4:9f1fc800 [<80365d70>] (_regmap_raw_read) from [<80365f70>] (_regmap_bus_read+0x34/0x6c) r10:00000000 r9:00000000 r8:9f732400 r7:9f1efe6c r6:9f1efe6c r5:9f1fc800 r4:9f1fc800 [<80365f3c>] (_regmap_bus_read) from [<803656a4>] (_regmap_read+0x7c/0x144) r6:00000180 r5:9f1fc800 r4:9f1fc800 r3:80365f3c [<80365628>] (_regmap_read) from [<803657bc>] (regmap_read+0x50/0x70) r9:00000000 r8:9f732400 r7:9f1efe6c r6:9f1efe6c r5:00000180 r4:9f1fc800 [<8036576c>] (regmap_read) from [<80452ac0>] (imx_get_temp+0x134/0x1a4) r6:9f1efeb4 r5:9f1fc800 r4:9e95f910 r3:00000001 [<8045298c>] (imx_get_temp) from [<8044e11c>] (thermal_zone_get_temp+0x58/0x74) r7:9f72ef40 r6:9f1efeb4 r5:9e93d5e8 r4:9e93d400 [<8044e0c4>] (thermal_zone_get_temp) from [<8044fd14>] (thermal_zone_device_update+0x34/0xec) r6:808e1978 r5:9e93d400 r4:9e93d608 r3:8045298c [<8044fce0>] (thermal_zone_device_update) from [<8044fde8>] (thermal_zone_device_check+0x1c/0x20) r5:9e481380 r4:9e93d608 [<8044fdcc>] (thermal_zone_device_check) from [<80038d50>] (process_one_work+0x160/0x3d4) [<80038bf0>] (process_one_work) from [<80039754>] (worker_thread+0x5c/0x4f4) r10:9f72ef40 r9:00000008 r8:9e481380 r7:9e481398 r6:9f72ef40 r5:9f1ee020 r4:9f72ef54 [<800396f8>] (worker_thread) from [<8003e020>] (kthread+0xf0/0x108) r10:00000000 r9:00000000 r8:00000000 r7:800396f8 r6:9e481380 r5:00000000 r4:9e4f2ec0 [<8003df30>] (kthread) from [<8000eb60>] (ret_from_fork+0x14/0x34) r7:00000000 r6:00000000 r5:8003df30 r4:9e4f2ec0 Code: e3140040 1a00001a e3140020 1a000016 (e596002c) ---[ end trace 193c15c2494ec960 ]--- Fixes: bdb0066df96e (mfd: syscon: Decouple syscon interface from platform devices) Signed-off-by: Philipp Zabel Signed-off-by: Mark Brown Cc: stable@vger.kernel.org --- include/trace/events/regmap.h | 123 +++++++++++++++++++++--------------------- 1 file changed, 61 insertions(+), 62 deletions(-) (limited to 'include') diff --git a/include/trace/events/regmap.h b/include/trace/events/regmap.h index 23d561512f64..22317d2b52ab 100644 --- a/include/trace/events/regmap.h +++ b/include/trace/events/regmap.h @@ -7,27 +7,26 @@ #include #include -struct device; -struct regmap; +#include "../../../drivers/base/regmap/internal.h" /* * Log register events */ DECLARE_EVENT_CLASS(regmap_reg, - TP_PROTO(struct device *dev, unsigned int reg, + TP_PROTO(struct regmap *map, unsigned int reg, unsigned int val), - TP_ARGS(dev, reg, val), + TP_ARGS(map, reg, val), TP_STRUCT__entry( - __string( name, dev_name(dev) ) - __field( unsigned int, reg ) - __field( unsigned int, val ) + __string( name, regmap_name(map) ) + __field( unsigned int, reg ) + __field( unsigned int, val ) ), TP_fast_assign( - __assign_str(name, dev_name(dev)); + __assign_str(name, regmap_name(map)); __entry->reg = reg; __entry->val = val; ), @@ -39,45 +38,45 @@ DECLARE_EVENT_CLASS(regmap_reg, DEFINE_EVENT(regmap_reg, regmap_reg_write, - TP_PROTO(struct device *dev, unsigned int reg, + TP_PROTO(struct regmap *map, unsigned int reg, unsigned int val), - TP_ARGS(dev, reg, val) + TP_ARGS(map, reg, val) ); DEFINE_EVENT(regmap_reg, regmap_reg_read, - TP_PROTO(struct device *dev, unsigned int reg, + TP_PROTO(struct regmap *map, unsigned int reg, unsigned int val), - TP_ARGS(dev, reg, val) + TP_ARGS(map, reg, val) ); DEFINE_EVENT(regmap_reg, regmap_reg_read_cache, - TP_PROTO(struct device *dev, unsigned int reg, + TP_PROTO(struct regmap *map, unsigned int reg, unsigned int val), - TP_ARGS(dev, reg, val) + TP_ARGS(map, reg, val) ); DECLARE_EVENT_CLASS(regmap_block, - TP_PROTO(struct device *dev, unsigned int reg, int count), + TP_PROTO(struct regmap *map, unsigned int reg, int count), - TP_ARGS(dev, reg, count), + TP_ARGS(map, reg, count), TP_STRUCT__entry( - __string( name, dev_name(dev) ) - __field( unsigned int, reg ) - __field( int, count ) + __string( name, regmap_name(map) ) + __field( unsigned int, reg ) + __field( int, count ) ), TP_fast_assign( - __assign_str(name, dev_name(dev)); + __assign_str(name, regmap_name(map)); __entry->reg = reg; __entry->count = count; ), @@ -89,48 +88,48 @@ DECLARE_EVENT_CLASS(regmap_block, DEFINE_EVENT(regmap_block, regmap_hw_read_start, - TP_PROTO(struct device *dev, unsigned int reg, int count), + TP_PROTO(struct regmap *map, unsigned int reg, int count), - TP_ARGS(dev, reg, count) + TP_ARGS(map, reg, count) ); DEFINE_EVENT(regmap_block, regmap_hw_read_done, - TP_PROTO(struct device *dev, unsigned int reg, int count), + TP_PROTO(struct regmap *map, unsigned int reg, int count), - TP_ARGS(dev, reg, count) + TP_ARGS(map, reg, count) ); DEFINE_EVENT(regmap_block, regmap_hw_write_start, - TP_PROTO(struct device *dev, unsigned int reg, int count), + TP_PROTO(struct regmap *map, unsigned int reg, int count), - TP_ARGS(dev, reg, count) + TP_ARGS(map, reg, count) ); DEFINE_EVENT(regmap_block, regmap_hw_write_done, - TP_PROTO(struct device *dev, unsigned int reg, int count), + TP_PROTO(struct regmap *map, unsigned int reg, int count), - TP_ARGS(dev, reg, count) + TP_ARGS(map, reg, count) ); TRACE_EVENT(regcache_sync, - TP_PROTO(struct device *dev, const char *type, + TP_PROTO(struct regmap *map, const char *type, const char *status), - TP_ARGS(dev, type, status), + TP_ARGS(map, type, status), TP_STRUCT__entry( - __string( name, dev_name(dev) ) - __string( status, status ) - __string( type, type ) - __field( int, type ) + __string( name, regmap_name(map) ) + __string( status, status ) + __string( type, type ) + __field( int, type ) ), TP_fast_assign( - __assign_str(name, dev_name(dev)); + __assign_str(name, regmap_name(map)); __assign_str(status, status); __assign_str(type, type); ), @@ -141,17 +140,17 @@ TRACE_EVENT(regcache_sync, DECLARE_EVENT_CLASS(regmap_bool, - TP_PROTO(struct device *dev, bool flag), + TP_PROTO(struct regmap *map, bool flag), - TP_ARGS(dev, flag), + TP_ARGS(map, flag), TP_STRUCT__entry( - __string( name, dev_name(dev) ) - __field( int, flag ) + __string( name, regmap_name(map) ) + __field( int, flag ) ), TP_fast_assign( - __assign_str(name, dev_name(dev)); + __assign_str(name, regmap_name(map)); __entry->flag = flag; ), @@ -161,32 +160,32 @@ DECLARE_EVENT_CLASS(regmap_bool, DEFINE_EVENT(regmap_bool, regmap_cache_only, - TP_PROTO(struct device *dev, bool flag), + TP_PROTO(struct regmap *map, bool flag), - TP_ARGS(dev, flag) + TP_ARGS(map, flag) ); DEFINE_EVENT(regmap_bool, regmap_cache_bypass, - TP_PROTO(struct device *dev, bool flag), + TP_PROTO(struct regmap *map, bool flag), - TP_ARGS(dev, flag) + TP_ARGS(map, flag) ); DECLARE_EVENT_CLASS(regmap_async, - TP_PROTO(struct device *dev), + TP_PROTO(struct regmap *map), - TP_ARGS(dev), + TP_ARGS(map), TP_STRUCT__entry( - __string( name, dev_name(dev) ) + __string( name, regmap_name(map) ) ), TP_fast_assign( - __assign_str(name, dev_name(dev)); + __assign_str(name, regmap_name(map)); ), TP_printk("%s", __get_str(name)) @@ -194,50 +193,50 @@ DECLARE_EVENT_CLASS(regmap_async, DEFINE_EVENT(regmap_block, regmap_async_write_start, - TP_PROTO(struct device *dev, unsigned int reg, int count), + TP_PROTO(struct regmap *map, unsigned int reg, int count), - TP_ARGS(dev, reg, count) + TP_ARGS(map, reg, count) ); DEFINE_EVENT(regmap_async, regmap_async_io_complete, - TP_PROTO(struct device *dev), + TP_PROTO(struct regmap *map), - TP_ARGS(dev) + TP_ARGS(map) ); DEFINE_EVENT(regmap_async, regmap_async_complete_start, - TP_PROTO(struct device *dev), + TP_PROTO(struct regmap *map), - TP_ARGS(dev) + TP_ARGS(map) ); DEFINE_EVENT(regmap_async, regmap_async_complete_done, - TP_PROTO(struct device *dev), + TP_PROTO(struct regmap *map), - TP_ARGS(dev) + TP_ARGS(map) ); TRACE_EVENT(regcache_drop_region, - TP_PROTO(struct device *dev, unsigned int from, + TP_PROTO(struct regmap *map, unsigned int from, unsigned int to), - TP_ARGS(dev, from, to), + TP_ARGS(map, from, to), TP_STRUCT__entry( - __string( name, dev_name(dev) ) - __field( unsigned int, from ) - __field( unsigned int, to ) + __string( name, regmap_name(map) ) + __field( unsigned int, from ) + __field( unsigned int, to ) ), TP_fast_assign( - __assign_str(name, dev_name(dev)); + __assign_str(name, regmap_name(map)); __entry->from = from; __entry->to = to; ), -- cgit v1.2.3 From 9bc6548f372d8c829235095d91de99d8df79db6e Mon Sep 17 00:00:00 2001 From: Christophe Vu-Brugier Date: Thu, 19 Mar 2015 14:30:13 +0100 Subject: target: do not reject FUA CDBs when write cache is enabled but emulate_write_cache is 0 A check that rejects a CDB with FUA bit set if no write cache is emulated was added by the following commit: fde9f50 target: Add sanity checks for DPO/FUA bit usage The condition is as follows: if (!dev->dev_attrib.emulate_fua_write || !dev->dev_attrib.emulate_write_cache) However, this check is wrong if the backend device supports WCE but "emulate_write_cache" is disabled. This patch uses se_dev_check_wce() (previously named spc_check_dev_wce) to invoke transport->get_write_cache() if the device has a write cache or check the "emulate_write_cache" attribute otherwise. Reported-by: Christoph Hellwig Signed-off-by: Christophe Vu-Brugier Signed-off-by: Nicholas Bellinger --- include/target/target_core_backend.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index db81c65b8f48..d61be7297b2c 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -111,6 +111,7 @@ void array_free(void *array, int n); void target_core_setup_sub_cits(struct se_subsystem_api *); /* attribute helpers from target_core_device.c for backend drivers */ +bool se_dev_check_wce(struct se_device *); int se_dev_set_max_unmap_lba_count(struct se_device *, u32); int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32); int se_dev_set_unmap_granularity(struct se_device *, u32); -- cgit v1.2.3 From 074c238177a75f5e79af3b2cb6a84e54823ef950 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 25 Mar 2015 15:55:42 -0700 Subject: mm: numa: slow PTE scan rate if migration failures occur Dave Chinner reported the following on https://lkml.org/lkml/2015/3/1/226 Across the board the 4.0-rc1 numbers are much slower, and the degradation is far worse when using the large memory footprint configs. Perf points straight at the cause - this is from 4.0-rc1 on the "-o bhash=101073" config: - 56.07% 56.07% [kernel] [k] default_send_IPI_mask_sequence_phys - default_send_IPI_mask_sequence_phys - 99.99% physflat_send_IPI_mask - 99.37% native_send_call_func_ipi smp_call_function_many - native_flush_tlb_others - 99.85% flush_tlb_page ptep_clear_flush try_to_unmap_one rmap_walk try_to_unmap migrate_pages migrate_misplaced_page - handle_mm_fault - 99.73% __do_page_fault trace_do_page_fault do_async_page_fault + async_page_fault 0.63% native_send_call_func_single_ipi generic_exec_single smp_call_function_single This is showing excessive migration activity even though excessive migrations are meant to get throttled. Normally, the scan rate is tuned on a per-task basis depending on the locality of faults. However, if migrations fail for any reason then the PTE scanner may scan faster if the faults continue to be remote. This means there is higher system CPU overhead and fault trapping at exactly the time we know that migrations cannot happen. This patch tracks when migration failures occur and slows the PTE scanner. Signed-off-by: Mel Gorman Reported-by: Dave Chinner Tested-by: Dave Chinner Cc: Ingo Molnar Cc: Aneesh Kumar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 6d77432e14ff..a419b65770d6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1625,11 +1625,11 @@ struct task_struct { /* * numa_faults_locality tracks if faults recorded during the last - * scan window were remote/local. The task scan period is adapted - * based on the locality of the faults with different weights - * depending on whether they were shared or private faults + * scan window were remote/local or failed to migrate. The task scan + * period is adapted based on the locality of the faults with different + * weights depending on whether they were shared or private faults */ - unsigned long numa_faults_locality[2]; + unsigned long numa_faults_locality[3]; unsigned long numa_pages_migrated; #endif /* CONFIG_NUMA_BALANCING */ @@ -1719,6 +1719,7 @@ struct task_struct { #define TNF_NO_GROUP 0x02 #define TNF_SHARED 0x04 #define TNF_FAULT_LOCAL 0x08 +#define TNF_MIGRATE_FAIL 0x10 #ifdef CONFIG_NUMA_BALANCING extern void task_numa_fault(int last_node, int node, int pages, int flags); -- cgit v1.2.3